aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/docker
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/docker')
-rw-r--r--vendor/github.com/docker/docker/AUTHORS62
-rw-r--r--vendor/github.com/docker/docker/api/swagger.yaml1982
-rw-r--r--vendor/github.com/docker/docker/api/types/client.go4
-rw-r--r--vendor/github.com/docker/docker/api/types/configs.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/container/container_top.go4
-rw-r--r--vendor/github.com/docker/docker/api/types/container/host_config.go3
-rw-r--r--vendor/github.com/docker/docker/api/types/events/events.go2
-rw-r--r--vendor/github.com/docker/docker/api/types/network/network.go3
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/container.go17
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/service.go61
-rw-r--r--vendor/github.com/docker/docker/api/types/swarm/task.go18
-rw-r--r--vendor/github.com/docker/docker/api/types/types.go48
-rw-r--r--vendor/github.com/docker/docker/api/types/volume.go5
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/volume_create.go5
-rw-r--r--vendor/github.com/docker/docker/api/types/volume/volume_list.go3
-rw-r--r--vendor/github.com/docker/docker/client/client_unix.go2
-rw-r--r--vendor/github.com/docker/docker/client/container_create.go13
-rw-r--r--vendor/github.com/docker/docker/client/container_stats.go16
-rw-r--r--vendor/github.com/docker/docker/client/errors.go8
-rw-r--r--vendor/github.com/docker/docker/client/interface.go4
-rw-r--r--vendor/github.com/docker/docker/client/ping.go2
-rw-r--r--vendor/github.com/docker/docker/client/request.go3
-rw-r--r--vendor/github.com/docker/docker/client/service_create.go76
-rw-r--r--vendor/github.com/docker/docker/client/service_update.go41
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive.go38
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_linux.go19
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/archive_unix.go4
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools.go32
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go85
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go34
-rw-r--r--vendor/github.com/docker/docker/pkg/idtools/utils_unix.go5
-rw-r--r--vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags.go137
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go49
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_linux.go87
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go31
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mount.go159
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go59
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_linux.go73
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo.go40
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go54
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go143
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go12
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go71
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/unmount_unix.go22
-rw-r--r--vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go7
-rw-r--r--vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/system/filesys_windows.go8
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init_unix.go12
-rw-r--r--vendor/github.com/docker/docker/pkg/system/init_windows.go11
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lcow.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lcow_unix.go8
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go28
-rw-r--r--vendor/github.com/docker/docker/pkg/system/lcow_windows.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/system/path_windows.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/process_unix.go20
-rw-r--r--vendor/github.com/docker/docker/pkg/system/process_windows.go2
-rw-r--r--vendor/github.com/docker/docker/pkg/system/rm.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_bsd.go (renamed from vendor/github.com/docker/docker/pkg/system/stat_freebsd.go)2
-rw-r--r--vendor/github.com/docker/docker/pkg/system/stat_solaris.go13
-rw-r--r--vendor/github.com/docker/docker/pkg/system/syscall_unix.go6
-rw-r--r--vendor/github.com/docker/docker/pkg/system/syscall_windows.go82
-rw-r--r--vendor/github.com/docker/docker/pkg/system/xattrs_linux.go17
-rw-r--r--vendor/github.com/docker/docker/pkg/term/ascii.go66
-rw-r--r--vendor/github.com/docker/docker/pkg/term/proxy.go78
-rw-r--r--vendor/github.com/docker/docker/pkg/term/tc.go20
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term.go124
-rw-r--r--vendor/github.com/docker/docker/pkg/term/term_windows.go221
-rw-r--r--vendor/github.com/docker/docker/pkg/term/termios_bsd.go42
-rw-r--r--vendor/github.com/docker/docker/pkg/term/termios_linux.go39
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go263
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go64
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/console.go35
-rw-r--r--vendor/github.com/docker/docker/pkg/term/windows/windows.go34
-rw-r--r--vendor/github.com/docker/docker/pkg/term/winsize.go20
77 files changed, 2017 insertions, 2839 deletions
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
index ad166ba8d..dffacff11 100644
--- a/vendor/github.com/docker/docker/AUTHORS
+++ b/vendor/github.com/docker/docker/AUTHORS
@@ -45,6 +45,7 @@ AJ Bowen <aj@soulshake.net>
Ajey Charantimath <ajey.charantimath@gmail.com>
ajneu <ajneu@users.noreply.github.com>
Akash Gupta <akagup@microsoft.com>
+Akhil Mohan <akhil.mohan@mayadata.io>
Akihiro Matsushima <amatsusbit@gmail.com>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Akim Demaille <akim.demaille@docker.com>
@@ -52,10 +53,12 @@ Akira Koyasu <mail@akirakoyasu.net>
Akshay Karle <akshay.a.karle@gmail.com>
Al Tobey <al@ooyala.com>
alambike <alambike@gmail.com>
+Alan Hoyle <alan@alanhoyle.com>
Alan Scherger <flyinprogrammer@gmail.com>
Alan Thompson <cloojure@gmail.com>
Albert Callarisa <shark234@gmail.com>
Albert Zhang <zhgwenming@gmail.com>
+Albin Kerouanton <albin@akerouanton.name>
Alejandro González Hevia <alejandrgh11@gmail.com>
Aleksa Sarai <asarai@suse.de>
Aleksandrs Fadins <aleks@s-ko.net>
@@ -109,6 +112,7 @@ Amy Lindburg <amy.lindburg@docker.com>
Anand Patil <anand.prabhakar.patil@gmail.com>
AnandkumarPatel <anandkumarpatel@gmail.com>
Anatoly Borodin <anatoly.borodin@gmail.com>
+Anca Iordache <anca.iordache@docker.com>
Anchal Agrawal <aagrawa4@illinois.edu>
Anda Xu <anda.xu@docker.com>
Anders Janmyr <anders@janmyr.com>
@@ -215,10 +219,12 @@ Benjamin Atkin <ben@benatkin.com>
Benjamin Baker <Benjamin.baker@utexas.edu>
Benjamin Boudreau <boudreau.benjamin@gmail.com>
Benjamin Yolken <yolken@stripe.com>
+Benny Ng <benny.tpng@gmail.com>
Benoit Chesneau <bchesneau@gmail.com>
Bernerd Schaefer <bj.schaefer@gmail.com>
Bernhard M. Wiedemann <bwiedemann@suse.de>
Bert Goethals <bert@bertg.be>
+Bertrand Roussel <broussel@sierrawireless.com>
Bevisy Zhang <binbin36520@gmail.com>
Bharath Thiruveedula <bharath_ves@hotmail.com>
Bhiraj Butala <abhiraj.butala@gmail.com>
@@ -231,6 +237,7 @@ Bingshen Wang <bingshen.wbs@alibaba-inc.com>
Blake Geno <blakegeno@gmail.com>
Boaz Shuster <ripcurld.github@gmail.com>
bobby abbott <ttobbaybbob@gmail.com>
+Boqin Qin <bobbqqin@gmail.com>
Boris Pruessmann <boris@pruessmann.org>
Boshi Lian <farmer1992@gmail.com>
Bouke Haarsma <bouke@webatoom.nl>
@@ -334,7 +341,7 @@ Chris Gibson <chris@chrisg.io>
Chris Khoo <chris.khoo@gmail.com>
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
Chris McKinnel <chrismckinnel@gmail.com>
-Chris Price <chris.price@docker.com>
+Chris Price <cprice@mirantis.com>
Chris Seto <chriskseto@gmail.com>
Chris Snow <chsnow123@gmail.com>
Chris St. Pierre <chris.a.st.pierre@gmail.com>
@@ -361,7 +368,7 @@ Christopher Currie <codemonkey+github@gmail.com>
Christopher Jones <tophj@linux.vnet.ibm.com>
Christopher Latham <sudosurootdev@gmail.com>
Christopher Rigor <crigor@gmail.com>
-Christy Perez <christy@linux.vnet.ibm.com>
+Christy Norman <christy@linux.vnet.ibm.com>
Chun Chen <ramichen@tencent.com>
Ciro S. Costa <ciro.costa@usp.br>
Clayton Coleman <ccoleman@redhat.com>
@@ -381,8 +388,10 @@ Corey Farrell <git@cfware.com>
Cory Forsyth <cory.forsyth@gmail.com>
cressie176 <github@stephen-cresswell.net>
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
+Cristian Ariza <dev@cristianrz.com>
Cristian Staretu <cristian.staretu@gmail.com>
cristiano balducci <cristiano.balducci@gmail.com>
+Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
CUI Wei <ghostplant@qq.com>
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
@@ -409,12 +418,14 @@ Dan Williams <me@deedubs.com>
Dani Hodovic <dani.hodovic@gmail.com>
Dani Louca <dani.louca@docker.com>
Daniel Antlinger <d.antlinger@gmx.at>
+Daniel Black <daniel@linux.ibm.com>
Daniel Dao <dqminh@cloudflare.com>
Daniel Exner <dex@dragonslave.de>
Daniel Farrell <dfarrell@redhat.com>
Daniel Garcia <daniel@danielgarcia.info>
Daniel Gasienica <daniel@gasienica.ch>
Daniel Grunwell <mwgrunny@gmail.com>
+Daniel Helfand <helfand.4@gmail.com>
Daniel Hiltgen <daniel.hiltgen@docker.com>
Daniel J Walsh <dwalsh@redhat.com>
Daniel Menet <membership@sontags.ch>
@@ -496,6 +507,7 @@ Derek McGowan <derek@mcgstyle.net>
Deric Crago <deric.crago@gmail.com>
Deshi Xiao <dxiao@redhat.com>
devmeyster <arthurfbi@yahoo.com>
+Devon Estes <devon.estes@klarna.com>
Devvyn Murphy <devvyn@devvyn.com>
Dharmit Shah <shahdharmit@gmail.com>
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
@@ -545,7 +557,7 @@ Douglas Curtis <dougcurtis1@gmail.com>
Dr Nic Williams <drnicwilliams@gmail.com>
dragon788 <dragon788@users.noreply.github.com>
Dražen Lučanin <kermit666@gmail.com>
-Drew Erny <drew.erny@docker.com>
+Drew Erny <derny@mirantis.com>
Drew Hubl <drew.hubl@gmail.com>
Dustin Sallings <dustin@spy.net>
Ed Costello <epc@epcostello.com>
@@ -607,6 +619,7 @@ Evan Phoenix <evan@fallingsnow.net>
Evan Wies <evan@neomantra.net>
Evelyn Xu <evelynhsu21@gmail.com>
Everett Toews <everett.toews@rackspace.com>
+Evgeniy Makhrov <e.makhrov@corp.badoo.com>
Evgeny Shmarnev <shmarnev@gmail.com>
Evgeny Vereshchagin <evvers@ya.ru>
Ewa Czechowska <ewa@ai-traders.com>
@@ -653,6 +666,7 @@ Florian <FWirtz@users.noreply.github.com>
Florian Klein <florian.klein@free.fr>
Florian Maier <marsmensch@users.noreply.github.com>
Florian Noeding <noeding@adobe.com>
+Florian Schmaus <flo@geekplace.eu>
Florian Weingarten <flo@hackvalue.de>
Florin Asavoaie <florin.asavoaie@gmail.com>
Florin Patan <florinpatan@gmail.com>
@@ -689,7 +703,7 @@ Gareth Rushgrove <gareth@morethanseven.net>
Garrett Barboza <garrett@garrettbarboza.com>
Gary Schaetz <gary@schaetzkc.com>
Gaurav <gaurav.gosec@gmail.com>
-gautam, prasanna <prasannagautam@gmail.com>
+Gaurav Singh <gaurav1086@gmail.com>
Gaël PORTAY <gael.portay@savoirfairelinux.com>
Genki Takiuchi <genki@s21g.com>
GennadySpb <lipenkov@gmail.com>
@@ -720,7 +734,7 @@ Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
Gosuke Miyashita <gosukenator@gmail.com>
Gou Rao <gou@portworx.com>
Govinda Fichtner <govinda.fichtner@googlemail.com>
-Grant Millar <grant@cylo.io>
+Grant Millar <rid@cylo.io>
Grant Reaber <grant.reaber@gmail.com>
Graydon Hoare <graydon@pobox.com>
Greg Fausak <greg@tacodata.com>
@@ -743,6 +757,7 @@ Haichao Yang <yang.haichao@zte.com.cn>
haikuoliu <haikuo@amazon.com>
Hakan Özler <hakan.ozler@kodcu.com>
Hamish Hutchings <moredhel@aoeu.me>
+Hannes Ljungberg <hannes@5monkeys.se>
Hans Kristian Flaatten <hans@starefossen.com>
Hans Rødtang <hansrodtang@gmail.com>
Hao Shu Wei <haosw@cn.ibm.com>
@@ -769,6 +784,8 @@ Hollie Teal <hollie@docker.com>
Hong Xu <hong@topbug.net>
Hongbin Lu <hongbin034@gmail.com>
Hongxu Jia <hongxu.jia@windriver.com>
+Honza Pokorny <me@honza.ca>
+Hsing-Hui Hsu <hsinghui@amazon.com>
hsinko <21551195@zju.edu.cn>
Hu Keping <hukeping@huawei.com>
Hu Tao <hutao@cn.fujitsu.com>
@@ -809,6 +826,7 @@ Ingo Gottwald <in.gottwald@gmail.com>
Innovimax <innovimax@gmail.com>
Isaac Dupree <antispam@idupree.com>
Isabel Jimenez <contact.isabeljimenez@gmail.com>
+Isaiah Grace <irgkenya4@gmail.com>
Isao Jonas <isao.jonas@gmail.com>
Iskander Sharipov <quasilyte@gmail.com>
Ivan Babrou <ibobrik@gmail.com>
@@ -824,6 +842,7 @@ Jacob Edelman <edelman.jd@gmail.com>
Jacob Tomlinson <jacob@tom.linson.uk>
Jacob Vallejo <jakeev@amazon.com>
Jacob Wen <jian.w.wen@oracle.com>
+Jaime Cepeda <jcepedavillamayor@gmail.com>
Jaivish Kothari <janonymous.codevulture@gmail.com>
Jake Champlin <jake.champlin.27@gmail.com>
Jake Moshenko <jake@devtable.com>
@@ -838,12 +857,13 @@ James Kyburz <james.kyburz@gmail.com>
James Kyle <james@jameskyle.org>
James Lal <james@lightsofapollo.com>
James Mills <prologic@shortcircuit.net.au>
-James Nesbitt <james.nesbitt@wunderkraut.com>
+James Nesbitt <jnesbitt@mirantis.com>
James Nugent <james@jen20.com>
James Turnbull <james@lovedthanlost.net>
James Watkins-Harvey <jwatkins@progi-media.com>
Jamie Hannaford <jamie@limetree.org>
Jamshid Afshar <jafshar@yahoo.com>
+Jan Chren <dev.rindeal@gmail.com>
Jan Keromnes <janx@linux.com>
Jan Koprowski <jan.koprowski@gmail.com>
Jan Pazdziora <jpazdziora@redhat.com>
@@ -858,6 +878,7 @@ Jared Hocutt <jaredh@netapp.com>
Jaroslaw Zabiello <hipertracker@gmail.com>
jaseg <jaseg@jaseg.net>
Jasmine Hegman <jasmine@jhegman.com>
+Jason A. Donenfeld <Jason@zx2c4.com>
Jason Divock <jdivock@gmail.com>
Jason Giedymin <jasong@apache.org>
Jason Green <Jason.Green@AverInformatics.Com>
@@ -905,7 +926,7 @@ Jeroen Franse <jeroenfranse@gmail.com>
Jeroen Jacobs <github@jeroenj.be>
Jesse Dearing <jesse.dearing@gmail.com>
Jesse Dubay <jesse@thefortytwo.net>
-Jessica Frazelle <acidburn@microsoft.com>
+Jessica Frazelle <jess@oxide.computer>
Jezeniel Zapanta <jpzapanta22@gmail.com>
Jhon Honce <jhonce@redhat.com>
Ji.Zhilong <zhilongji@gmail.com>
@@ -913,6 +934,7 @@ Jian Liao <jliao@alauda.io>
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
Jiang Jinyang <jjyruby@gmail.com>
Jie Luo <luo612@zju.edu.cn>
+Jie Ma <jienius@outlook.com>
Jihyun Hwang <jhhwang@telcoware.com>
Jilles Oldenbeuving <ojilles@gmail.com>
Jim Alateras <jima@comware.com.au>
@@ -969,6 +991,7 @@ Jon Johnson <jonjohnson@google.com>
Jon Surrell <jon.surrell@gmail.com>
Jon Wedaman <jweede@gmail.com>
Jonas Dohse <jonas@dohse.ch>
+Jonas Heinrich <Jonas@JonasHeinrich.com>
Jonas Pfenniger <jonas@pfenniger.name>
Jonathan A. Schweder <jonathanschweder@gmail.com>
Jonathan A. Sternberg <jonathansternberg@gmail.com>
@@ -1018,6 +1041,8 @@ Julien Dubois <julien.dubois@gmail.com>
Julien Kassar <github@kassisol.com>
Julien Maitrehenry <julien.maitrehenry@me.com>
Julien Pervillé <julien.perville@perfect-memory.com>
+Julien Pivotto <roidelapluie@inuits.eu>
+Julio Guerra <julio@sqreen.com>
Julio Montes <imc.coder@gmail.com>
Jun-Ru Chang <jrjang@gmail.com>
Jussi Nummelin <jussi.nummelin@gmail.com>
@@ -1191,7 +1216,6 @@ Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
Luke Marsden <me@lukemarsden.net>
Lyn <energylyn@zju.edu.cn>
Lynda O'Leary <lyndaoleary29@gmail.com>
-lzhfromutsc <lzhfromustc@gmail.com>
Lénaïc Huard <lhuard@amadeus.com>
Ma Müller <mueller-ma@users.noreply.github.com>
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
@@ -1285,6 +1309,7 @@ Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
Mattias Jernberg <nostrad@gmail.com>
Mauricio Garavaglia <mauricio@medallia.com>
mauriyouth <mauriyouth@gmail.com>
+Max Harmathy <max.harmathy@web.de>
Max Shytikov <mshytikov@gmail.com>
Maxim Fedchyshyn <sevmax@gmail.com>
Maxim Ivanov <ivanov.maxim@gmail.com>
@@ -1342,6 +1367,7 @@ Miguel Morales <mimoralea@gmail.com>
Mihai Borobocea <MihaiBorob@gmail.com>
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
Mike Brown <brownwm@us.ibm.com>
+Mike Bush <mpbush@gmail.com>
Mike Casas <mkcsas0@gmail.com>
Mike Chelen <michael.chelen@gmail.com>
Mike Danese <mikedanese@google.com>
@@ -1434,6 +1460,7 @@ Nik Nyby <nikolas@gnu.org>
Nikhil Chawla <chawlanikhil24@gmail.com>
NikolaMandic <mn080202@gmail.com>
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
+Nikolay Edigaryev <edigaryev@gmail.com>
Nikolay Milovanov <nmil@itransformers.net>
Nirmal Mehta <nirmalkmehta@gmail.com>
Nishant Totla <nishanttotla@gmail.com>
@@ -1637,6 +1664,7 @@ Roland Kammerer <roland.kammerer@linbit.com>
Roland Moriz <rmoriz@users.noreply.github.com>
Roma Sokolov <sokolov.r.v@gmail.com>
Roman Dudin <katrmr@gmail.com>
+Roman Mazur <roman@balena.io>
Roman Strashkin <roman.strashkin@gmail.com>
Ron Smits <ron.smits@gmail.com>
Ron Williams <ron.a.williams@gmail.com>
@@ -1793,6 +1821,7 @@ Srini Brahmaroutu <srbrahma@us.ibm.com>
Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
Staf Wagemakers <staf@wagemakers.be>
Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
+Stanislav Levin <slev@altlinux.org>
Steeve Morin <steeve.morin@gmail.com>
Stefan Berger <stefanb@linux.vnet.ibm.com>
Stefan J. Wernli <swernli@microsoft.com>
@@ -1804,7 +1833,7 @@ Stefan Weil <sw@weilnetz.de>
Stephan Spindler <shutefan@gmail.com>
Stephen Benjamin <stephen@redhat.com>
Stephen Crosby <stevecrozz@gmail.com>
-Stephen Day <stephen.day@docker.com>
+Stephen Day <stevvooe@gmail.com>
Stephen Drake <stephen@xenolith.net>
Stephen Rust <srust@blockbridge.com>
Steve Desmond <steve@vtsv.ca>
@@ -1875,6 +1904,7 @@ Tianyi Wang <capkurmagati@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tiffany Jernigan <tiffany.f.j@gmail.com>
Tiffany Low <tiffany@box.com>
+Till Wegmüller <toasterson@gmail.com>
Tim <elatllat@gmail.com>
Tim Bart <tim@fewagainstmany.com>
Tim Bosse <taim@bosboot.org>
@@ -1927,7 +1957,7 @@ Tony Miller <mcfiredrill@gmail.com>
toogley <toogley@mailbox.org>
Torstein Husebø <torstein@huseboe.net>
Tõnis Tiigi <tonistiigi@gmail.com>
-tpng <benny.tpng@gmail.com>
+Trace Andreason <tandreason@gmail.com>
tracylihui <793912329@qq.com>
Trapier Marshall <trapier.marshall@docker.com>
Travis Cline <travis.cline@gmail.com>
@@ -1950,6 +1980,7 @@ Utz Bacher <utz.bacher@de.ibm.com>
vagrant <vagrant@ubuntu-14.04-amd64-vbox>
Vaidas Jablonskis <jablonskis@gmail.com>
vanderliang <lansheng@meili-inc.com>
+Velko Ivanov <vivanov@deeperplane.com>
Veres Lajos <vlajos@gmail.com>
Victor Algaze <valgaze@gmail.com>
Victor Coisne <victor.coisne@dotcloud.com>
@@ -1961,12 +1992,13 @@ Victor Palma <palma.victor@gmail.com>
Victor Vieux <victor.vieux@docker.com>
Victoria Bialas <victoria.bialas@docker.com>
Vijaya Kumar K <vijayak@caviumnetworks.com>
-Vikram bir Singh <vikrambir.singh@docker.com>
+Vikram bir Singh <vsingh@mirantis.com>
Viktor Stanchev <me@viktorstanchev.com>
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
VinayRaghavanKS <raghavan.vinay@gmail.com>
Vincent Batts <vbatts@redhat.com>
Vincent Bernat <Vincent.Bernat@exoscale.ch>
+Vincent Boulineau <vincent.boulineau@datadoghq.com>
Vincent Demeester <vincent.demeester@docker.com>
Vincent Giersch <vincent.giersch@ovh.net>
Vincent Mayers <vincent.mayers@inbloom.org>
@@ -1997,6 +2029,8 @@ Wang Long <long.wanglong@huawei.com>
Wang Ping <present.wp@icloud.com>
Wang Xing <hzwangxing@corp.netease.com>
Wang Yuexiao <wang.yuexiao@zte.com.cn>
+Wang Yumu <37442693@qq.com>
+wanghuaiqing <wanghuaiqing@loongson.cn>
Ward Vandewege <ward@jhvc.com>
WarheadsSE <max@warheads.net>
Wassim Dhif <wassimdhif@gmail.com>
@@ -2013,6 +2047,7 @@ Wen Cheng Ma <wenchma@cn.ibm.com>
Wendel Fleming <wfleming@usc.edu>
Wenjun Tang <tangwj2@lenovo.com>
Wenkai Yin <yinw@vmware.com>
+wenlxie <wenlxie@ebay.com>
Wentao Zhang <zhangwentao234@huawei.com>
Wenxuan Zhao <viz@linux.com>
Wenyu You <21551128@zju.edu.cn>
@@ -2030,6 +2065,8 @@ William Hubbs <w.d.hubbs@gmail.com>
William Martin <wmartin@pivotal.io>
William Riancho <wr.wllm@gmail.com>
William Thurston <thurstw@amazon.com>
+Wilson Júnior <wilsonpjunior@gmail.com>
+Wing-Kam Wong <wingkwong.code@gmail.com>
WiseTrem <shepelyov.g@gmail.com>
Wolfgang Powisch <powo@powo.priv.at>
Wonjun Kim <wonjun.kim@navercorp.com>
@@ -2039,6 +2076,7 @@ Xianglin Gao <xlgao@zju.edu.cn>
Xianlu Bird <xianlubird@gmail.com>
Xiao YongBiao <xyb4638@gmail.com>
XiaoBing Jiang <s7v7nislands@gmail.com>
+Xiaodong Liu <liuxiaodong@loongson.cn>
Xiaodong Zhang <a4012017@sina.com>
Xiaoxi He <xxhe@alauda.io>
Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
@@ -2109,6 +2147,7 @@ Zhenan Ye <21551168@zju.edu.cn>
zhenghenghuo <zhenghenghuo@zju.edu.cn>
Zhenhai Gao <gaozh1988@live.com>
Zhenkun Bi <bi.zhenkun@zte.com.cn>
+zhipengzuo <zuozhipeng@baidu.com>
Zhou Hao <zhouhao@cn.fujitsu.com>
Zhoulin Xie <zhoulin.xie@daocloud.io>
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
@@ -2129,6 +2168,7 @@ Zunayed Ali <zunayed@gmail.com>
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
Átila Camurça Alves <camurca.home@gmail.com>
尹吉峰 <jifeng.yin@gmail.com>
+屈骏 <qujun@tiduyun.com>
徐俊杰 <paco.xu@daocloud.io>
慕陶 <jihui.xjh@alibaba-inc.com>
搏通 <yufeng.pyf@alibaba-inc.com>
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index 21fdc88fa..ca172599d 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -26,13 +26,19 @@ info:
x-logo:
url: "https://docs.docker.com/images/logo-docker-main.png"
description: |
- The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API.
+ The Engine API is an HTTP API served by Docker Engine. It is the API the
+ Docker client uses to communicate with the Engine, so everything the Docker
+ client can do can be done with the API.
- Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls.
+ Most of the client's commands map directly to API endpoints (e.g. `docker ps`
+ is `GET /containers/json`). The notable exception is running containers,
+ which consists of several API calls.
# Errors
- The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format:
+ The API uses standard HTTP status codes to indicate the success or failure
+ of the API call. The body of the response will be JSON in the following
+ format:
```
{
@@ -65,7 +71,11 @@ info:
# Authentication
- Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
+ Authentication for registries is handled client side. The client has to send
+ authentication details to various endpoints that need to communicate with
+ registries, such as `POST /images/(name)/push`. These are sent as
+ `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5)
+ (JSON) string with the following structure:
```
{
@@ -76,9 +86,11 @@ info:
}
```
- The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required.
+ The `serveraddress` is a domain/IP without a protocol. Throughout this
+ structure, double quotes are required.
- If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials:
+ If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth),
+ you can just pass this instead of credentials:
```
{
@@ -104,7 +116,9 @@ tags:
- name: "Network"
x-displayName: "Networks"
description: |
- Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information.
+ Networks are user-defined networks that containers can be attached to.
+ See the [networking documentation](https://docs.docker.com/network/)
+ for more information.
- name: "Volume"
x-displayName: "Volumes"
description: |
@@ -112,34 +126,46 @@ tags:
- name: "Exec"
x-displayName: "Exec"
description: |
- Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information.
+ Run new commands inside running containers. Refer to the
+ [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/)
+ for more information.
+
+ To exec a command in a container, you first need to create an exec instance,
+ then start it. These two API endpoints are wrapped up in a single command-line
+ command, `docker exec`.
- To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`.
# Swarm things
- name: "Swarm"
x-displayName: "Swarm"
description: |
- Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information.
+ Engines can be clustered together in a swarm. Refer to the
+ [swarm mode documentation](https://docs.docker.com/engine/swarm/)
+ for more information.
- name: "Node"
x-displayName: "Nodes"
description: |
- Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work.
+ Nodes are instances of the Engine participating in a swarm. Swarm mode
+ must be enabled for these endpoints to work.
- name: "Service"
x-displayName: "Services"
description: |
- Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work.
+ Services are the definitions of tasks to run on a swarm. Swarm mode must
+ be enabled for these endpoints to work.
- name: "Task"
x-displayName: "Tasks"
description: |
- A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work.
+ A task is a container running on a swarm. It is the atomic scheduling unit
+ of swarm. Swarm mode must be enabled for these endpoints to work.
- name: "Secret"
x-displayName: "Secrets"
description: |
- Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work.
+ Secrets are sensitive data that can be used by services. Swarm mode must
+ be enabled for these endpoints to work.
- name: "Config"
x-displayName: "Configs"
description: |
- Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work.
+ Configs are application configurations that can be used by services. Swarm
+ mode must be enabled for these endpoints to work.
# System things
- name: "Plugin"
x-displayName: "Plugins"
@@ -345,9 +371,11 @@ definitions:
RestartPolicy:
description: |
- The behavior to apply when the container exits. The default is not to restart.
+ The behavior to apply when the container exits. The default is not to
+ restart.
- An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server.
+ An ever increasing delay (double the previous delay, starting at 100ms) is
+ added before each restart to prevent flooding the server.
type: "object"
properties:
Name:
@@ -364,7 +392,8 @@ definitions:
- "on-failure"
MaximumRetryCount:
type: "integer"
- description: "If `on-failure` is used, the number of times to retry before giving up"
+ description: |
+ If `on-failure` is used, the number of times to retry before giving up.
Resources:
description: "A container's resources (cgroups config, ulimits, etc)"
@@ -372,7 +401,9 @@ definitions:
properties:
# Applicable to all platforms
CpuShares:
- description: "An integer value representing this container's relative CPU weight versus other containers."
+ description: |
+ An integer value representing this container's relative CPU weight
+ versus other containers.
type: "integer"
Memory:
description: "Memory limit in bytes."
@@ -381,7 +412,11 @@ definitions:
default: 0
# Applicable to UNIX platforms
CgroupParent:
- description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist."
+ description: |
+ Path to `cgroups` under which the container's `cgroup` is created. If
+ the path is not absolute, the path is considered to be relative to the
+ `cgroups` path of the init process. Cgroups are created if they do not
+ already exist.
type: "string"
BlkioWeight:
description: "Block IO weight (relative weight)."
@@ -390,7 +425,11 @@ definitions:
maximum: 1000
BlkioWeightDevice:
description: |
- Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`.
+ Block IO weight (relative device weight) in the form:
+
+ ```
+ [{"Path": "device_path", "Weight": weight}]
+ ```
type: "array"
items:
type: "object"
@@ -402,25 +441,41 @@ definitions:
minimum: 0
BlkioDeviceReadBps:
description: |
- Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ Limit read rate (bytes per second) from a device, in the form:
+
+ ```
+ [{"Path": "device_path", "Rate": rate}]
+ ```
type: "array"
items:
$ref: "#/definitions/ThrottleDevice"
BlkioDeviceWriteBps:
description: |
- Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ Limit write rate (bytes per second) to a device, in the form:
+
+ ```
+ [{"Path": "device_path", "Rate": rate}]
+ ```
type: "array"
items:
$ref: "#/definitions/ThrottleDevice"
BlkioDeviceReadIOps:
description: |
- Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ Limit read rate (IO per second) from a device, in the form:
+
+ ```
+ [{"Path": "device_path", "Rate": rate}]
+ ```
type: "array"
items:
$ref: "#/definitions/ThrottleDevice"
BlkioDeviceWriteIOps:
description: |
- Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ Limit write rate (IO per second) to a device, in the form:
+
+ ```
+ [{"Path": "device_path", "Rate": rate}]
+ ```
type: "array"
items:
$ref: "#/definitions/ThrottleDevice"
@@ -429,23 +484,31 @@ definitions:
type: "integer"
format: "int64"
CpuQuota:
- description: "Microseconds of CPU time that the container can get in a CPU period."
+ description: |
+ Microseconds of CPU time that the container can get in a CPU period.
type: "integer"
format: "int64"
CpuRealtimePeriod:
- description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks."
+ description: |
+ The length of a CPU real-time period in microseconds. Set to 0 to
+ allocate no time allocated to real-time tasks.
type: "integer"
format: "int64"
CpuRealtimeRuntime:
- description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks."
+ description: |
+ The length of a CPU real-time runtime in microseconds. Set to 0 to
+ allocate no time allocated to real-time tasks.
type: "integer"
format: "int64"
CpusetCpus:
- description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)"
+ description: |
+ CPUs in which to allow execution (e.g., `0-3`, `0,1`).
type: "string"
example: "0-3"
CpusetMems:
- description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems."
+ description: |
+ Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
+ effective on NUMA systems.
type: "string"
Devices:
description: "A list of devices to add to the container."
@@ -459,12 +522,19 @@ definitions:
type: "string"
example: "c 13:* rwm"
DeviceRequests:
- description: "a list of requests for devices to be sent to device drivers"
+ description: |
+ A list of requests for devices to be sent to device drivers.
type: "array"
items:
$ref: "#/definitions/DeviceRequest"
KernelMemory:
- description: "Kernel memory limit in bytes."
+ description: |
+ Kernel memory limit in bytes.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated
+ > `kmem.limit_in_bytes`.
type: "integer"
format: "int64"
example: 209715200
@@ -477,11 +547,15 @@ definitions:
type: "integer"
format: "int64"
MemorySwap:
- description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap."
+ description: |
+ Total memory limit (memory + swap). Set as `-1` to enable unlimited
+ swap.
type: "integer"
format: "int64"
MemorySwappiness:
- description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100."
+ description: |
+ Tune a container's memory swappiness behavior. Accepts an integer
+ between 0 and 100.
type: "integer"
format: "int64"
minimum: 0
@@ -494,18 +568,26 @@ definitions:
description: "Disable OOM Killer for the container."
type: "boolean"
Init:
- description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used."
+ description: |
+ Run an init inside the container that forwards signals and reaps
+ processes. This field is omitted if empty, and the default (as
+ configured on the daemon) is used.
type: "boolean"
x-nullable: true
PidsLimit:
description: |
- Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change.
+ Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`
+ to not change.
type: "integer"
format: "int64"
x-nullable: true
Ulimits:
description: |
- A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
+ A list of resource limits to set in the container. For example:
+
+ ```
+ {"Name": "nofile", "Soft": 1024, "Hard": 2048}
+ ```
type: "array"
items:
type: "object"
@@ -524,14 +606,18 @@ definitions:
description: |
The number of usable CPUs (Windows only).
- On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.
+ On Windows Server containers, the processor resource controls are
+ mutually exclusive. The order of precedence is `CPUCount` first, then
+ `CPUShares`, and `CPUPercent` last.
type: "integer"
format: "int64"
CpuPercent:
description: |
The usable percentage of the available CPUs (Windows only).
- On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.
+ On Windows Server containers, the processor resource controls are
+ mutually exclusive. The order of precedence is `CPUCount` first, then
+ `CPUShares`, and `CPUPercent` last.
type: "integer"
format: "int64"
IOMaximumIOps:
@@ -539,12 +625,37 @@ definitions:
type: "integer"
format: "int64"
IOMaximumBandwidth:
- description: "Maximum IO in bytes per second for the container system drive (Windows only)"
+ description: |
+ Maximum IO in bytes per second for the container system drive
+ (Windows only).
type: "integer"
format: "int64"
+ Limit:
+ description: |
+ An object describing a limit on resources which can be requested by a task.
+ type: "object"
+ properties:
+ NanoCPUs:
+ type: "integer"
+ format: "int64"
+ example: 4000000000
+ MemoryBytes:
+ type: "integer"
+ format: "int64"
+ example: 8272408576
+ Pids:
+ description: |
+ Limits the maximum number of PIDs in the container. Set `0` for unlimited.
+ type: "integer"
+ format: "int64"
+ default: 0
+ example: 100
+
ResourceObject:
- description: "An object describing the resources which can be advertised by a node and requested by a task"
+ description: |
+ An object describing the resources which can be advertised by a node and
+ requested by a task.
type: "object"
properties:
NanoCPUs:
@@ -559,7 +670,9 @@ definitions:
$ref: "#/definitions/GenericResources"
GenericResources:
- description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)"
+ description: |
+ User-defined resources can be either Integer resources (e.g, `SSD=3`) or
+ String resources (e.g, `GPU=UUID1`).
type: "array"
items:
type: "object"
@@ -606,18 +719,92 @@ definitions:
items:
type: "string"
Interval:
- description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ description: |
+ The time to wait between checks in nanoseconds. It should be 0 or at
+ least 1000000 (1 ms). 0 means inherit.
type: "integer"
Timeout:
- description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ description: |
+ The time to wait before considering the check to have hung. It should
+ be 0 or at least 1000000 (1 ms). 0 means inherit.
type: "integer"
Retries:
- description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit."
+ description: |
+ The number of consecutive failures needed to consider a container as
+ unhealthy. 0 means inherit.
type: "integer"
StartPeriod:
- description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ description: |
+ Start period for the container to initialize before starting
+ health-retries countdown in nanoseconds. It should be 0 or at least
+ 1000000 (1 ms). 0 means inherit.
type: "integer"
+ Health:
+ description: |
+ Health stores information about the container's healthcheck results.
+ type: "object"
+ properties:
+ Status:
+ description: |
+ Status is one of `none`, `starting`, `healthy` or `unhealthy`
+
+ - "none" Indicates there is no healthcheck
+ - "starting" Starting indicates that the container is not yet ready
+ - "healthy" Healthy indicates that the container is running correctly
+ - "unhealthy" Unhealthy indicates that the container has a problem
+ type: "string"
+ enum:
+ - "none"
+ - "starting"
+ - "healthy"
+ - "unhealthy"
+ example: "healthy"
+ FailingStreak:
+ description: "FailingStreak is the number of consecutive failures"
+ type: "integer"
+ example: 0
+ Log:
+ type: "array"
+ description: |
+ Log contains the last few results (oldest first)
+ items:
+ x-nullable: true
+ $ref: "#/definitions/HealthcheckResult"
+
+ HealthcheckResult:
+ description: |
+ HealthcheckResult stores information about a single run of a healthcheck probe
+ type: "object"
+ properties:
+ Start:
+ description: |
+ Date and time at which this check started in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "date-time"
+ example: "2020-01-04T10:44:24.496525531Z"
+ End:
+ description: |
+ Date and time at which this check ended in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2020-01-04T10:45:21.364524523Z"
+ ExitCode:
+ description: |
+ ExitCode meanings:
+
+ - `0` healthy
+ - `1` unhealthy
+ - `2` reserved (considered unhealthy)
+ - other values: error running probe
+ type: "integer"
+ example: 0
+ Output:
+ description: "Output from last check"
+ type: "string"
+
HostConfig:
description: "Container configuration that depends on the host we are running on"
allOf:
@@ -628,12 +815,44 @@ definitions:
Binds:
type: "array"
description: |
- A list of volume bindings for this container. Each volume binding is a string in one of these forms:
-
- - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
- - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
- - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path.
- - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path.
+ A list of volume bindings for this container. Each volume binding
+ is a string in one of these forms:
+
+ - `host-src:container-dest[:options]` to bind-mount a host path
+ into the container. Both `host-src`, and `container-dest` must
+ be an _absolute_ path.
+ - `volume-name:container-dest[:options]` to bind-mount a volume
+ managed by a volume driver into the container. `container-dest`
+ must be an _absolute_ path.
+
+ `options` is an optional, comma-delimited list of:
+
+ - `nocopy` disables automatic copying of data from the container
+ path to the volume. The `nocopy` flag only applies to named volumes.
+ - `[ro|rw]` mounts a volume read-only or read-write, respectively.
+ If omitted or set to `rw`, volumes are mounted read-write.
+ - `[z|Z]` applies SELinux labels to allow or deny multiple containers
+ to read and write to the same volume.
+ - `z`: a _shared_ content label is applied to the content. This
+ label indicates that multiple containers can share the volume
+ content, for both reading and writing.
+ - `Z`: a _private unshared_ label is applied to the content.
+ This label indicates that only the current container can use
+ a private volume. Labeling systems such as SELinux require
+ proper labels to be placed on volume content that is mounted
+ into a container. Without a label, the security system can
+ prevent a container's processes from using the content. By
+ default, the labels set by the host operating system are not
+ modified.
+ - `[[r]shared|[r]slave|[r]private]` specifies mount
+ [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).
+ This only applies to bind-mounted volumes, not internal volumes
+ or named volumes. Mount propagation requires the source mount
+ point (the location where the source directory is mounted in the
+ host operating system) to have the correct propagation properties.
+ For shared volumes, the source mount point must be set to `shared`.
+ For slave volumes, the mount must be set to either `shared` or
+ `slave`.
items:
type: "string"
ContainerIDFile:
@@ -661,46 +880,50 @@ definitions:
type: "string"
NetworkMode:
type: "string"
- description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken
- as a custom network's name to which this container should connect to."
+ description: |
+ Network mode to use for this container. Supported standard values
+ are: `bridge`, `host`, `none`, and `container:<name|id>`. Any
+ other value is taken as a custom network's name to which this
+ container should connect to.
PortBindings:
$ref: "#/definitions/PortMap"
RestartPolicy:
$ref: "#/definitions/RestartPolicy"
AutoRemove:
type: "boolean"
- description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set."
+ description: |
+ Automatically remove the container when the container's process
+ exits. This has no effect if `RestartPolicy` is set.
VolumeDriver:
type: "string"
description: "Driver that this container uses to mount volumes."
VolumesFrom:
type: "array"
- description: "A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`."
+ description: |
+ A list of volumes to inherit from another container, specified in
+ the form `<container name>[:<ro|rw>]`.
items:
type: "string"
Mounts:
- description: "Specification for mounts to be added to the container."
+ description: |
+ Specification for mounts to be added to the container.
type: "array"
items:
$ref: "#/definitions/Mount"
# Applicable to UNIX platforms
- Capabilities:
- type: "array"
- description: |
- A list of kernel capabilities to be available for container (this overrides the default set).
-
- Conflicts with options 'CapAdd' and 'CapDrop'"
- items:
- type: "string"
CapAdd:
type: "array"
- description: "A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'"
+ description: |
+ A list of kernel capabilities to add to the container. Conflicts
+ with option 'Capabilities'.
items:
type: "string"
CapDrop:
type: "array"
- description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'"
+ description: |
+ A list of kernel capabilities to drop from the container. Conflicts
+ with option 'Capabilities'.
items:
type: "string"
CgroupnsMode:
@@ -709,13 +932,13 @@ definitions:
- "private"
- "host"
description: |
- cgroup namespace mode for the container. Possible values are:
+ cgroup namespace mode for the container. Possible values are:
- - `"private"`: the container runs in its own private cgroup namespace
- - `"host"`: use the host system's cgroup namespace
+ - `"private"`: the container runs in its own private cgroup namespace
+ - `"host"`: use the host system's cgroup namespace
- If not specified, the daemon default is used, which can either be `"private"`
- or `"host"`, depending on daemon version, kernel support and configuration.
+ If not specified, the daemon default is used, which can either be `"private"`
+ or `"host"`, depending on daemon version, kernel support and configuration.
Dns:
type: "array"
description: "A list of DNS servers for the container to use."
@@ -734,43 +957,49 @@ definitions:
ExtraHosts:
type: "array"
description: |
- A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
+ A list of hostnames/IP mappings to add to the container's `/etc/hosts`
+ file. Specified in the form `["hostname:IP"]`.
items:
type: "string"
GroupAdd:
type: "array"
- description: "A list of additional groups that the container process will run as."
+ description: |
+ A list of additional groups that the container process will run as.
items:
type: "string"
IpcMode:
type: "string"
description: |
- IPC sharing mode for the container. Possible values are:
+ IPC sharing mode for the container. Possible values are:
- - `"none"`: own private IPC namespace, with /dev/shm not mounted
- - `"private"`: own private IPC namespace
- - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers
- - `"container:<name|id>"`: join another (shareable) container's IPC namespace
- - `"host"`: use the host system's IPC namespace
+ - `"none"`: own private IPC namespace, with /dev/shm not mounted
+ - `"private"`: own private IPC namespace
+ - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers
+ - `"container:<name|id>"`: join another (shareable) container's IPC namespace
+ - `"host"`: use the host system's IPC namespace
- If not specified, daemon default is used, which can either be `"private"`
- or `"shareable"`, depending on daemon version and configuration.
+ If not specified, daemon default is used, which can either be `"private"`
+ or `"shareable"`, depending on daemon version and configuration.
Cgroup:
type: "string"
description: "Cgroup to use for the container."
Links:
type: "array"
- description: "A list of links for the container in the form `container_name:alias`."
+ description: |
+ A list of links for the container in the form `container_name:alias`.
items:
type: "string"
OomScoreAdj:
type: "integer"
- description: "An integer value containing the score given to the container in order to tune OOM killer preferences."
+ description: |
+ An integer value containing the score given to the container in
+ order to tune OOM killer preferences.
example: 500
PidMode:
type: "string"
description: |
- Set the PID (Process) Namespace mode for the container. It can be either:
+ Set the PID (Process) Namespace mode for the container. It can be
+ either:
- `"container:<name|id>"`: joins another container's PID namespace
- `"host"`: use the host's PID namespace inside the container
@@ -783,11 +1012,13 @@ definitions:
Allocates an ephemeral host port for all of a container's
exposed ports.
- Ports are de-allocated when the container stops and allocated when the container starts.
- The allocated port might be changed when restarting the container.
+ Ports are de-allocated when the container stops and allocated when
+ the container starts. The allocated port might be changed when
+ restarting the container.
- The port is selected from the ephemeral port range that depends on the kernel.
- For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
+ The port is selected from the ephemeral port range that depends on
+ the kernel. For example, on Linux the range is defined by
+ `/proc/sys/net/ipv4/ip_local_port_range`.
ReadonlyRootfs:
type: "boolean"
description: "Mount the container's root filesystem as read only."
@@ -806,7 +1037,12 @@ definitions:
Tmpfs:
type: "object"
description: |
- A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`.
+ A map of container directories which should be replaced by tmpfs
+ mounts, and their corresponding mount options. For example:
+
+ ```
+ { "/run": "rw,noexec,nosuid,size=65536k" }
+ ```
additionalProperties:
type: "string"
UTSMode:
@@ -814,15 +1050,23 @@ definitions:
description: "UTS namespace to use for the container."
UsernsMode:
type: "string"
- description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled."
+ description: |
+ Sets the usernamespace mode for the container when usernamespace
+ remapping option is enabled.
ShmSize:
type: "integer"
- description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB."
+ description: |
+ Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.
minimum: 0
Sysctls:
type: "object"
description: |
- A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}`
+ A list of kernel parameters (sysctls) to set in the container.
+ For example:
+
+ ```
+ {"net.ipv4.ip_forward": "1"}
+ ```
additionalProperties:
type: "string"
Runtime:
@@ -831,7 +1075,8 @@ definitions:
# Applicable to Windows
ConsoleSize:
type: "array"
- description: "Initial console size, as an `[height, width]` array. (Windows only)"
+ description: |
+ Initial console size, as an `[height, width]` array. (Windows only)
minItems: 2
maxItems: 2
items:
@@ -839,19 +1084,24 @@ definitions:
minimum: 0
Isolation:
type: "string"
- description: "Isolation technology of the container. (Windows only)"
+ description: |
+ Isolation technology of the container. (Windows only)
enum:
- "default"
- "process"
- "hyperv"
MaskedPaths:
type: "array"
- description: "The list of paths to be masked inside the container (this overrides the default set of paths)"
+ description: |
+ The list of paths to be masked inside the container (this overrides
+ the default set of paths).
items:
type: "string"
ReadonlyPaths:
type: "array"
- description: "The list of paths to be set as read-only inside the container (this overrides the default set of paths)"
+ description: |
+ The list of paths to be set as read-only inside the container
+ (this overrides the default set of paths).
items:
type: "string"
@@ -892,7 +1142,8 @@ definitions:
- {}
default: {}
Tty:
- description: "Attach standard streams to a TTY, including `stdin` if it is not closed."
+ description: |
+ Attach standard streams to a TTY, including `stdin` if it is not closed.
type: "boolean"
default: false
OpenStdin:
@@ -905,12 +1156,15 @@ definitions:
default: false
Env:
description: |
- A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value.
+ A list of environment variables to set inside the container in the
+ form `["VAR=value", ...]`. A variable without `=` is removed from the
+ environment, rather than to have an empty value.
type: "array"
items:
type: "string"
Cmd:
- description: "Command to run specified as a string or an array of strings."
+ description: |
+ Command to run specified as a string or an array of strings.
type: "array"
items:
type: "string"
@@ -920,10 +1174,13 @@ definitions:
description: "Command is already escaped (Windows only)"
type: "boolean"
Image:
- description: "The name of the image to use when creating the container"
+ description: |
+ The name of the image to use when creating the container/
type: "string"
Volumes:
- description: "An object mapping mount point paths inside the container to empty objects."
+ description: |
+ An object mapping mount point paths inside the container to empty
+ objects.
type: "object"
additionalProperties:
type: "object"
@@ -937,7 +1194,9 @@ definitions:
description: |
The entry point for the container as a string or an array of strings.
- If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
+ If the array consists of exactly one empty string (`[""]`) then the
+ entry point is reset to system default (i.e., the entry point used by
+ docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
type: "array"
items:
type: "string"
@@ -948,7 +1207,8 @@ definitions:
description: "MAC address of the container."
type: "string"
OnBuild:
- description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`."
+ description: |
+ `ONBUILD` metadata that were defined in the image's `Dockerfile`.
type: "array"
items:
type: "string"
@@ -958,7 +1218,8 @@ definitions:
additionalProperties:
type: "string"
StopSignal:
- description: "Signal to stop a container as a string or unsigned integer."
+ description: |
+ Signal to stop a container as a string or unsigned integer.
type: "string"
default: "SIGTERM"
StopTimeout:
@@ -966,11 +1227,48 @@ definitions:
type: "integer"
default: 10
Shell:
- description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell."
+ description: |
+ Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
type: "array"
items:
type: "string"
+ NetworkingConfig:
+ description: |
+ NetworkingConfig represents the container's networking configuration for
+ each of its interfaces.
+ It is used for the networking configs specified in the `docker create`
+ and `docker network connect` commands.
+ type: "object"
+ properties:
+ EndpointsConfig:
+ description: |
+ A mapping of network name to endpoint configuration for that network.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+ example:
+ # putting an example here, instead of using the example values from
+ # /definitions/EndpointSettings, because containers/create currently
+ # does not support attaching to multiple networks, so the example request
+ # would be confusing if it showed that multiple networks can be contained
+ # in the EndpointsConfig.
+ # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323)
+ EndpointsConfig:
+ isolated_nw:
+ IPAMConfig:
+ IPv4Address: "172.20.30.33"
+ IPv6Address: "2001:db8:abcd::3033"
+ LinkLocalIPs:
+ - "169.254.34.68"
+ - "fe80::3468"
+ Links:
+ - "container_1"
+ - "container_2"
+ Aliases:
+ - "server_x"
+ - "server_y"
+
NetworkSettings:
description: "NetworkSettings exposes the network settings in the API"
type: "object"
@@ -1413,13 +1711,16 @@ definitions:
type: "string"
Scope:
type: "string"
- description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level."
+ description: |
+ The level at which the volume exists. Either `global` for cluster-wide,
+ or `local` for machine level.
default: "local"
x-nullable: false
enum: ["local", "global"]
Options:
type: "object"
- description: "The driver specific options used when creating the volume."
+ description: |
+ The driver specific options used when creating the volume.
additionalProperties:
type: "string"
UsageData:
@@ -1537,7 +1838,12 @@ definitions:
type: "string"
default: "default"
Config:
- description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": <CIDR>, \"IPRange\": <CIDR>, \"Gateway\": <IP address>, \"AuxAddress\": <device_name:IP address>}`"
+ description: |
+ List of IPAM configuration options, specified as a map:
+
+ ```
+ {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>}
+ ```
type: "array"
items:
type: "object"
@@ -1599,12 +1905,24 @@ definitions:
Shared:
type: "boolean"
Size:
+ description: |
+ Amount of disk space used by the build cache (in bytes).
type: "integer"
CreatedAt:
- type: "integer"
+ description: |
+ Date and time at which the build cache was created in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-08-18T10:44:24.496525531Z"
LastUsedAt:
- type: "integer"
+ description: |
+ Date and time at which the build cache was last used in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
x-nullable: true
+ example: "2017-08-09T07:09:37.632105588Z"
UsageCount:
type: "integer"
@@ -1883,7 +2201,9 @@ definitions:
x-nullable: false
example: "tiborvass/sample-volume-plugin"
Enabled:
- description: "True if the plugin is running. False if the plugin is not running, only installed."
+ description:
+ True if the plugin is running. False if the plugin is not running,
+ only installed.
type: "boolean"
x-nullable: false
example: true
@@ -2085,13 +2405,16 @@ definitions:
ObjectVersion:
description: |
- The version number of the object such as node, service, etc. This is needed to avoid conflicting writes.
- The client must send the version number along with the modified specification when updating these objects.
- This approach ensures safe concurrency and determinism in that the change on the object
- may not be applied if the version number has changed from the last read. In other words,
- if two update requests specify the same base version, only one of the requests can succeed.
- As a result, two separate update requests that happen at the same time will not
- unintentionally overwrite each other.
+ The version number of the object such as node, service, etc. This is needed
+ to avoid conflicting writes. The client must send the version number along
+ with the modified specification when updating these objects.
+
+ This approach ensures safe concurrency and determinism in that the change
+ on the object may not be applied if the version number has changed from the
+ last read. In other words, if two update requests specify the same base
+ version, only one of the requests can succeed. As a result, two separate
+ update requests that happen at the same time will not unintentionally
+ overwrite each other.
type: "object"
properties:
Index:
@@ -2260,17 +2583,23 @@ definitions:
Name: "vieux/sshfs:latest"
TLSInfo:
- description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate"
+ description: |
+ Information about the issuer of leaf TLS certificates and the trusted root
+ CA certificate.
type: "object"
properties:
TrustRoot:
- description: "The root CA certificate(s) that are used to validate leaf TLS certificates"
+ description: |
+ The root CA certificate(s) that are used to validate leaf TLS
+ certificates.
type: "string"
CertIssuerSubject:
- description: "The base64-url-safe-encoded raw subject bytes of the issuer"
+ description:
+ The base64-url-safe-encoded raw subject bytes of the issuer.
type: "string"
CertIssuerPublicKey:
- description: "The base64-url-safe-encoded raw public key bytes of the issuer"
+ description: |
+ The base64-url-safe-encoded raw public key bytes of the issuer.
type: "string"
example:
TrustRoot: |
@@ -2366,7 +2695,9 @@ definitions:
x-nullable: true
properties:
TaskHistoryRetentionLimit:
- description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks."
+ description: |
+ The number of historic tasks to keep per instance or node. If
+ negative, never remove completed or failed tasks.
type: "integer"
format: "int64"
example: 10
@@ -2380,26 +2711,34 @@ definitions:
format: "uint64"
example: 10000
KeepOldSnapshots:
- description: "The number of snapshots to keep beyond the current snapshot."
+ description: |
+ The number of snapshots to keep beyond the current snapshot.
type: "integer"
format: "uint64"
LogEntriesForSlowFollowers:
- description: "The number of log entries to keep around to sync up slow followers after a snapshot is created."
+ description: |
+ The number of log entries to keep around to sync up slow followers
+ after a snapshot is created.
type: "integer"
format: "uint64"
example: 500
ElectionTick:
description: |
- The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`.
+ The number of ticks that a follower will wait for a message from
+ the leader before becoming a candidate and starting an election.
+ `ElectionTick` must be greater than `HeartbeatTick`.
- A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.
+ A tick currently defaults to one second, so these translate
+ directly to seconds currently, but this is NOT guaranteed.
type: "integer"
example: 3
HeartbeatTick:
description: |
- The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers.
+ The number of ticks between heartbeats. Every HeartbeatTick ticks,
+ the leader will send a heartbeat to the followers.
- A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.
+ A tick currently defaults to one second, so these translate
+ directly to seconds currently, but this is NOT guaranteed.
type: "integer"
example: 1
Dispatcher:
@@ -2408,7 +2747,8 @@ definitions:
x-nullable: true
properties:
HeartbeatPeriod:
- description: "The delay for an agent to send a heartbeat to the dispatcher."
+ description: |
+ The delay for an agent to send a heartbeat to the dispatcher.
type: "integer"
format: "int64"
example: 5000000000
@@ -2423,36 +2763,53 @@ definitions:
format: "int64"
example: 7776000000000000
ExternalCAs:
- description: "Configuration for forwarding signing requests to an external certificate authority."
+ description: |
+ Configuration for forwarding signing requests to an external
+ certificate authority.
type: "array"
items:
type: "object"
properties:
Protocol:
- description: "Protocol for communication with the external CA (currently only `cfssl` is supported)."
+ description: |
+ Protocol for communication with the external CA (currently
+ only `cfssl` is supported).
type: "string"
enum:
- "cfssl"
default: "cfssl"
URL:
- description: "URL where certificate signing requests should be sent."
+ description: |
+ URL where certificate signing requests should be sent.
type: "string"
Options:
- description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver."
+ description: |
+ An object with key/value pairs that are interpreted as
+ protocol-specific options for the external CA driver.
type: "object"
additionalProperties:
type: "string"
CACert:
- description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)."
+ description: |
+ The root CA certificate (in PEM format) this external CA uses
+ to issue TLS certificates (assumed to be to the current swarm
+ root CA certificate if not provided).
type: "string"
SigningCACert:
- description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format."
+ description: |
+ The desired signing CA certificate for all swarm node TLS leaf
+ certificates, in PEM format.
type: "string"
SigningCAKey:
- description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format."
+ description: |
+ The desired signing CA key for all swarm node TLS leaf certificates,
+ in PEM format.
type: "string"
ForceRotate:
- description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`"
+ description: |
+ An integer whose purpose is to force swarm to generate a new
+ signing CA certificate and key, if none have been specified in
+ `SigningCACert` and `SigningCAKey`
format: "uint64"
type: "integer"
EncryptionConfig:
@@ -2460,7 +2817,9 @@ definitions:
type: "object"
properties:
AutoLockManagers:
- description: "If set, generate a key and use it to lock data stored on the managers."
+ description: |
+ If set, generate a key and use it to lock data stored on the
+ managers.
type: "boolean"
example: false
TaskDefaults:
@@ -2526,7 +2885,8 @@ definitions:
TLSInfo:
$ref: "#/definitions/TLSInfo"
RootRotationInProgress:
- description: "Whether there is currently a root CA rotation in progress for the swarm"
+ description: |
+ Whether there is currently a root CA rotation in progress for the swarm
type: "boolean"
example: false
DataPathPort:
@@ -2540,7 +2900,8 @@ definitions:
example: 4789
DefaultAddrPool:
description: |
- Default Address Pool specifies default subnet pools for global scope networks.
+ Default Address Pool specifies default subnet pools for global scope
+ networks.
type: "array"
items:
type: "string"
@@ -2548,7 +2909,8 @@ definitions:
example: ["10.10.0.0/16", "20.20.0.0/16"]
SubnetSize:
description: |
- SubnetSize specifies the subnet size of the networks created from the default subnet pool
+ SubnetSize specifies the subnet size of the networks created from the
+ default subnet pool.
type: "integer"
format: "uint32"
maximum: 29
@@ -2608,7 +2970,9 @@ definitions:
PluginPrivilege:
type: "array"
items:
- description: "Describes a permission accepted by the user upon installing the plugin."
+ description: |
+ Describes a permission accepted by the user upon installing the
+ plugin.
type: "object"
properties:
Name:
@@ -2650,10 +3014,13 @@ definitions:
items:
type: "string"
Hostname:
- description: "The hostname to use for the container, as a valid RFC 1123 hostname."
+ description: |
+ The hostname to use for the container, as a valid
+ [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.
type: "string"
Env:
- description: "A list of environment variables in the form `VAR=value`."
+ description: |
+ A list of environment variables in the form `VAR=value`.
type: "array"
items:
type: "string"
@@ -2665,7 +3032,8 @@ definitions:
type: "string"
Groups:
type: "array"
- description: "A list of additional groups that the container process will run as."
+ description: |
+ A list of additional groups that the container process will run as.
items:
type: "string"
Privileges:
@@ -2681,37 +3049,43 @@ definitions:
example: "0bt9dmxjvjiqermk6xrop3ekq"
description: |
Load credential spec from a Swarm Config with the given ID.
- The specified config must also be present in the Configs field with the Runtime property set.
+ The specified config must also be present in the Configs
+ field with the Runtime property set.
<p><br /></p>
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive.
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
+ > and `CredentialSpec.Config` are mutually exclusive.
File:
type: "string"
example: "spec.json"
description: |
- Load credential spec from this file. The file is read by the daemon, and must be present in the
- `CredentialSpecs` subdirectory in the docker data directory, which defaults to
- `C:\ProgramData\Docker\` on Windows.
+ Load credential spec from this file. The file is read by
+ the daemon, and must be present in the `CredentialSpecs`
+ subdirectory in the docker data directory, which defaults
+ to `C:\ProgramData\Docker\` on Windows.
- For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`.
+ For example, specifying `spec.json` loads
+ `C:\ProgramData\Docker\CredentialSpecs\spec.json`.
<p><br /></p>
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive.
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
+ > and `CredentialSpec.Config` are mutually exclusive.
Registry:
type: "string"
description: |
- Load credential spec from this value in the Windows registry. The specified registry value must be
- located in:
+ Load credential spec from this value in the Windows
+ registry. The specified registry value must be located in:
`HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`
<p><br /></p>
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive.
+ > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
+ > and `CredentialSpec.Config` are mutually exclusive.
SELinuxContext:
type: "object"
description: "SELinux labels of the container"
@@ -2741,7 +3115,9 @@ definitions:
description: "Mount the container's root filesystem as read only."
type: "boolean"
Mounts:
- description: "Specification for mounts to be added to containers created as part of the service."
+ description: |
+ Specification for mounts to be added to containers created as part
+ of the service.
type: "array"
items:
$ref: "#/definitions/Mount"
@@ -2749,7 +3125,9 @@ definitions:
description: "Signal to stop the container."
type: "string"
StopGracePeriod:
- description: "Amount of time to wait for the container to terminate before forcefully killing it."
+ description: |
+ Amount of time to wait for the container to terminate before
+ forcefully killing it.
type: "integer"
format: "int64"
HealthCheck:
@@ -2766,7 +3144,9 @@ definitions:
items:
type: "string"
DNSConfig:
- description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)."
+ description: |
+ Specification for DNS related configurations in resolver configuration
+ file (`resolv.conf`).
type: "object"
properties:
Nameservers:
@@ -2780,22 +3160,28 @@ definitions:
items:
type: "string"
Options:
- description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)."
+ description: |
+ A list of internal resolver variables to be modified (e.g.,
+ `debug`, `ndots:3`, etc.).
type: "array"
items:
type: "string"
Secrets:
- description: "Secrets contains references to zero or more secrets that will be exposed to the service."
+ description: |
+ Secrets contains references to zero or more secrets that will be
+ exposed to the service.
type: "array"
items:
type: "object"
properties:
File:
- description: "File represents a specific target that is backed by a file."
+ description: |
+ File represents a specific target that is backed by a file.
type: "object"
properties:
Name:
- description: "Name represents the final filename in the filesystem."
+ description: |
+ Name represents the final filename in the filesystem.
type: "string"
UID:
description: "UID represents the file UID."
@@ -2808,15 +3194,20 @@ definitions:
type: "integer"
format: "uint32"
SecretID:
- description: "SecretID represents the ID of the specific secret that we're referencing."
+ description: |
+ SecretID represents the ID of the specific secret that we're
+ referencing.
type: "string"
SecretName:
description: |
- SecretName is the name of the secret that this references, but this is just provided for
- lookup/display purposes. The secret in the reference will be identified by its ID.
+ SecretName is the name of the secret that this references,
+ but this is just provided for lookup/display purposes. The
+ secret in the reference will be identified by its ID.
type: "string"
Configs:
- description: "Configs contains references to zero or more configs that will be exposed to the service."
+ description: |
+ Configs contains references to zero or more configs that will be
+ exposed to the service.
type: "array"
items:
type: "object"
@@ -2831,7 +3222,8 @@ definitions:
type: "object"
properties:
Name:
- description: "Name represents the final filename in the filesystem."
+ description: |
+ Name represents the final filename in the filesystem.
type: "string"
UID:
description: "UID represents the file UID."
@@ -2845,29 +3237,39 @@ definitions:
format: "uint32"
Runtime:
description: |
- Runtime represents a target that is not mounted into the container but is used by the task
+ Runtime represents a target that is not mounted into the
+ container but is used by the task
<p><br /><p>
- > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive
+ > **Note**: `Configs.File` and `Configs.Runtime` are mutually
+ > exclusive
type: "object"
ConfigID:
- description: "ConfigID represents the ID of the specific config that we're referencing."
+ description: |
+ ConfigID represents the ID of the specific config that we're
+ referencing.
type: "string"
ConfigName:
description: |
- ConfigName is the name of the config that this references, but this is just provided for
- lookup/display purposes. The config in the reference will be identified by its ID.
+ ConfigName is the name of the config that this references,
+ but this is just provided for lookup/display purposes. The
+ config in the reference will be identified by its ID.
type: "string"
Isolation:
type: "string"
- description: "Isolation technology of the containers running the service. (Windows only)"
+ description: |
+ Isolation technology of the containers running the service.
+ (Windows only)
enum:
- "default"
- "process"
- "hyperv"
Init:
- description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used."
+ description: |
+ Run an init inside the container that forwards signals and reaps
+ processes. This field is omitted if empty, and the default (as
+ configured on the daemon) is used.
type: "boolean"
x-nullable: true
Sysctls:
@@ -2883,10 +3285,11 @@ definitions:
additionalProperties:
type: "string"
# This option is not used by Windows containers
- Capabilities:
+ CapabilityAdd:
type: "array"
description: |
- A list of kernel capabilities to be available for container (this overrides the default set).
+ A list of kernel capabilities to add to the default set
+ for the container.
items:
type: "string"
example:
@@ -2894,6 +3297,31 @@ definitions:
- "CAP_SYS_ADMIN"
- "CAP_SYS_CHROOT"
- "CAP_SYSLOG"
+ CapabilityDrop:
+ type: "array"
+ description: |
+ A list of kernel capabilities to drop from the default set
+ for the container.
+ items:
+ type: "string"
+ example:
+ - "CAP_NET_RAW"
+ Ulimits:
+ description: |
+ A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Name:
+ description: "Name of ulimit"
+ type: "string"
+ Soft:
+ description: "Soft limit"
+ type: "integer"
+ Hard:
+ description: "Hard limit"
+ type: "integer"
NetworkAttachmentSpec:
description: |
Read-only spec type for non-swarm containers attached to swarm overlay
@@ -2911,17 +3339,21 @@ definitions:
description: "ID of the container represented by this task"
type: "string"
Resources:
- description: "Resource requirements which apply to each individual container created as part of the service."
+ description: |
+ Resource requirements which apply to each individual container created
+ as part of the service.
type: "object"
properties:
Limits:
description: "Define resources limits."
- $ref: "#/definitions/ResourceObject"
+ $ref: "#/definitions/Limit"
Reservation:
description: "Define resources reservation."
$ref: "#/definitions/ResourceObject"
RestartPolicy:
- description: "Specification for the restart policy which applies to containers created as part of this service."
+ description: |
+ Specification for the restart policy which applies to containers
+ created as part of this service.
type: "object"
properties:
Condition:
@@ -2936,12 +3368,16 @@ definitions:
type: "integer"
format: "int64"
MaxAttempts:
- description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)."
+ description: |
+ Maximum attempts to restart a given container before giving up
+ (default value is 0, which is ignored).
type: "integer"
format: "int64"
default: 0
Window:
- description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)."
+ description: |
+ Windows is the time window used to evaluate the restart policy
+ (default value is 0, which is unbounded).
type: "integer"
format: "int64"
default: 0
@@ -2949,7 +3385,27 @@ definitions:
type: "object"
properties:
Constraints:
- description: "An array of constraints."
+ description: |
+ An array of constraint expressions to limit the set of nodes where
+ a task can be scheduled. Constraint expressions can either use a
+ _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find
+ nodes that satisfy every expression (AND match). Constraints can
+ match node or Docker Engine labels as follows:
+
+ node attribute | matches | example
+ ---------------------|--------------------------------|-----------------------------------------------
+ `node.id` | Node ID | `node.id==2ivku8v2gvtg4`
+ `node.hostname` | Node hostname | `node.hostname!=node-2`
+ `node.role` | Node role (`manager`/`worker`) | `node.role==manager`
+ `node.platform.os` | Node operating system | `node.platform.os==windows`
+ `node.platform.arch` | Node architecture | `node.platform.arch==x86_64`
+ `node.labels` | User-defined node labels | `node.labels.security==high`
+ `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`
+
+ `engine.labels` apply to Docker Engine labels like operating system,
+ drivers, etc. Swarm administrators add `node.labels` for operational
+ purposes by using the [`node update endpoint`](#operation/NodeUpdate).
+
type: "array"
items:
type: "string"
@@ -2957,8 +3413,13 @@ definitions:
- "node.hostname!=node3.corp.example.com"
- "node.role!=manager"
- "node.labels.type==production"
+ - "node.platform.os==linux"
+ - "node.platform.arch==x86_64"
Preferences:
- description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence."
+ description: |
+ Preferences provide a way to make the scheduler aware of factors
+ such as topology. They are provided in order from highest to
+ lowest precedence.
type: "array"
items:
type: "object"
@@ -2967,7 +3428,8 @@ definitions:
type: "object"
properties:
SpreadDescriptor:
- description: "label descriptor, such as engine.labels.az"
+ description: |
+ label descriptor, such as `engine.labels.az`.
type: "string"
example:
- Spread:
@@ -2975,7 +3437,9 @@ definitions:
- Spread:
SpreadDescriptor: "node.labels.rack"
MaxReplicas:
- description: "Maximum number of replicas for per node (default value is 0, which is unlimited)"
+ description: |
+ Maximum number of replicas for per node (default value is 0, which
+ is unlimited)
type: "integer"
format: "int64"
default: 0
@@ -2989,10 +3453,13 @@ definitions:
items:
$ref: "#/definitions/Platform"
ForceUpdate:
- description: "A counter that triggers an update even if no relevant parameters have been changed."
+ description: |
+ A counter that triggers an update even if no relevant parameters have
+ been changed.
type: "integer"
Runtime:
- description: "Runtime is the type of runtime specified for the task executor."
+ description: |
+ Runtime is the type of runtime specified for the task executor.
type: "string"
Networks:
description: "Specifies which networks the service should attach to."
@@ -3000,7 +3467,10 @@ definitions:
items:
$ref: "#/definitions/NetworkAttachmentConfig"
LogDriver:
- description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified."
+ description: |
+ Specifies the log driver to use for tasks created from this spec. If
+ not present, the default one for the swarm will be used, finally
+ falling back to the engine default if not specified.
type: "object"
properties:
Name:
@@ -3086,6 +3556,12 @@ definitions:
type: "integer"
DesiredState:
$ref: "#/definitions/TaskState"
+ JobIteration:
+ description: |
+ If the Service this Task belongs to is a job-mode service, contains
+ the JobIteration of the Service this Task was created for. Absent if
+ the Task was created for a Replicated or Global Service.
+ $ref: "#/definitions/ObjectVersion"
example:
ID: "0kzzo1i0y4jz6027t0k7aezc7"
Version:
@@ -3178,12 +3654,37 @@ definitions:
format: "int64"
Global:
type: "object"
+ ReplicatedJob:
+ description: |
+ The mode used for services with a finite number of tasks that run
+ to a completed state.
+ type: "object"
+ properties:
+ MaxConcurrent:
+ description: |
+ The maximum number of replicas to run simultaneously.
+ type: "integer"
+ format: "int64"
+ default: 1
+ TotalCompletions:
+ description: |
+ The total number of replicas desired to reach the Completed
+ state. If unset, will default to the value of `MaxConcurrent`
+ type: "integer"
+ format: "int64"
+ GlobalJob:
+ description: |
+ The mode used for services which run a task to the completed state
+ on each valid node.
+ type: "object"
UpdateConfig:
description: "Specification for the update strategy of the service."
type: "object"
properties:
Parallelism:
- description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)."
+ description: |
+ Maximum number of tasks to be updated in one iteration (0 means
+ unlimited parallelism).
type: "integer"
format: "int64"
Delay:
@@ -3191,22 +3692,32 @@ definitions:
type: "integer"
format: "int64"
FailureAction:
- description: "Action to take if an updated task fails to run, or stops running during the update."
+ description: |
+ Action to take if an updated task fails to run, or stops running
+ during the update.
type: "string"
enum:
- "continue"
- "pause"
- "rollback"
Monitor:
- description: "Amount of time to monitor each updated task for failures, in nanoseconds."
+ description: |
+ Amount of time to monitor each updated task for failures, in
+ nanoseconds.
type: "integer"
format: "int64"
MaxFailureRatio:
- description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1."
+ description: |
+ The fraction of tasks that may fail during an update before the
+ failure action is invoked, specified as a floating point number
+ between 0 and 1.
type: "number"
default: 0
Order:
- description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down."
+ description: |
+ The order of operations when rolling out an updated task. Either
+ the old task is shut down before the new task is started, or the
+ new task is started before the old task is shut down.
type: "string"
enum:
- "stop-first"
@@ -3216,29 +3727,42 @@ definitions:
type: "object"
properties:
Parallelism:
- description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)."
+ description: |
+ Maximum number of tasks to be rolled back in one iteration (0 means
+ unlimited parallelism).
type: "integer"
format: "int64"
Delay:
- description: "Amount of time between rollback iterations, in nanoseconds."
+ description: |
+ Amount of time between rollback iterations, in nanoseconds.
type: "integer"
format: "int64"
FailureAction:
- description: "Action to take if an rolled back task fails to run, or stops running during the rollback."
+ description: |
+ Action to take if an rolled back task fails to run, or stops
+ running during the rollback.
type: "string"
enum:
- "continue"
- "pause"
Monitor:
- description: "Amount of time to monitor each rolled back task for failures, in nanoseconds."
+ description: |
+ Amount of time to monitor each rolled back task for failures, in
+ nanoseconds.
type: "integer"
format: "int64"
MaxFailureRatio:
- description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1."
+ description: |
+ The fraction of tasks that may fail during a rollback before the
+ failure action is invoked, specified as a floating point number
+ between 0 and 1.
type: "number"
default: 0
Order:
- description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down."
+ description: |
+ The order of operations when rolling back a task. Either the old
+ task is shut down before the new task is started, or the new task
+ is started before the old task is shut down.
type: "string"
enum:
- "stop-first"
@@ -3301,7 +3825,9 @@ definitions:
- "dnsrr"
default: "vip"
Ports:
- description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used."
+ description: |
+ List of exposed ports that this service is accessible on from the
+ outside. Ports can only be provided if `vip` resolution mode is used.
type: "array"
items:
$ref: "#/definitions/EndpointPortConfig"
@@ -3364,7 +3890,8 @@ definitions:
type: "object"
properties:
RunningTasks:
- description: "The number of tasks for the service currently in the Running state"
+ description: |
+ The number of tasks for the service currently in the Running state.
type: "integer"
format: "uint64"
example: 7
@@ -3378,6 +3905,39 @@ definitions:
type: "integer"
format: "uint64"
example: 10
+ CompletedTasks:
+ description: |
+ The number of tasks for a job that are in the Completed state.
+ This field must be cross-referenced with the service type, as the
+ value of 0 may mean the service is not in a job mode, or it may
+ mean the job-mode service has no tasks yet Completed.
+ type: "integer"
+ format: "uint64"
+ JobStatus:
+ description: |
+ The status of the service when it is in one of ReplicatedJob or
+ GlobalJob modes. Absent on Replicated and Global mode services. The
+ JobIteration is an ObjectVersion, but unlike the Service's version,
+ does not need to be sent with an update request.
+ type: "object"
+ properties:
+ JobIteration:
+ description: |
+ JobIteration is a value increased each time a Job is executed,
+ successfully or otherwise. "Executed", in this case, means the
+ job as a whole has been started, not that an individual Task has
+ been launched. A job is "Executed" when its ServiceSpec is
+ updated. JobIteration can be used to disambiguate Tasks belonging
+ to different executions of a job. Though JobIteration will
+ increase with each subsequent execution, it may not necessarily
+ increase by 1, and so JobIteration should not be used to
+ $ref: "#/definitions/ObjectVersion"
+ LastExecution:
+ description: |
+ The last time, as observed by the server, that this job was
+ started.
+ type: "string"
+ format: "dateTime"
example:
ID: "9mnpnzenvg8p8tdbtq4wvbkcz"
Version:
@@ -3566,7 +4126,7 @@ definitions:
com.example.some-other-label: "some-other-value"
Data:
description: |
- Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))
+ Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))
data to store as secret.
This field is only used to _create_ a secret, and is not returned by
@@ -3574,7 +4134,9 @@ definitions:
type: "string"
example: ""
Driver:
- description: "Name of the secrets driver used to fetch the secret's value from an external secret store"
+ description: |
+ Name of the secrets driver used to fetch the secret's value from an
+ external secret store.
$ref: "#/definitions/Driver"
Templating:
description: |
@@ -3616,7 +4178,7 @@ definitions:
type: "string"
Data:
description: |
- Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))
+ Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))
config data.
type: "string"
Templating:
@@ -3643,6 +4205,168 @@ definitions:
Spec:
$ref: "#/definitions/ConfigSpec"
+ ContainerState:
+ description: |
+ ContainerState stores container's running state. It's part of ContainerJSONBase
+ and will be returned by the "inspect" command.
+ type: "object"
+ properties:
+ Status:
+ description: |
+ String representation of the container state. Can be one of "created",
+ "running", "paused", "restarting", "removing", "exited", or "dead".
+ type: "string"
+ enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
+ example: "running"
+ Running:
+ description: |
+ Whether this container is running.
+
+ Note that a running container can be _paused_. The `Running` and `Paused`
+ booleans are not mutually exclusive:
+
+ When pausing a container (on Linux), the freezer cgroup is used to suspend
+ all processes in the container. Freezing the process requires the process to
+ be running. As a result, paused containers are both `Running` _and_ `Paused`.
+
+ Use the `Status` field instead to determine if a container's state is "running".
+ type: "boolean"
+ example: true
+ Paused:
+ description: "Whether this container is paused."
+ type: "boolean"
+ example: false
+ Restarting:
+ description: "Whether this container is restarting."
+ type: "boolean"
+ example: false
+ OOMKilled:
+ description: |
+ Whether this container has been killed because it ran out of memory.
+ type: "boolean"
+ example: false
+ Dead:
+ type: "boolean"
+ example: false
+ Pid:
+ description: "The process ID of this container"
+ type: "integer"
+ example: 1234
+ ExitCode:
+ description: "The last exit code of this container"
+ type: "integer"
+ example: 0
+ Error:
+ type: "string"
+ StartedAt:
+ description: "The time when this container was last started."
+ type: "string"
+ example: "2020-01-06T09:06:59.461876391Z"
+ FinishedAt:
+ description: "The time when this container last exited."
+ type: "string"
+ example: "2020-01-06T09:07:59.461876391Z"
+ Health:
+ x-nullable: true
+ $ref: "#/definitions/Health"
+
+ SystemVersion:
+ type: "object"
+ description: |
+ Response of Engine API: GET "/version"
+ properties:
+ Platform:
+ type: "object"
+ required: [Name]
+ properties:
+ Name:
+ type: "string"
+ Components:
+ type: "array"
+ description: |
+ Information about system components
+ items:
+ type: "object"
+ x-go-name: ComponentVersion
+ required: [Name, Version]
+ properties:
+ Name:
+ description: |
+ Name of the component
+ type: "string"
+ example: "Engine"
+ Version:
+ description: |
+ Version of the component
+ type: "string"
+ x-nullable: false
+ example: "19.03.12"
+ Details:
+ description: |
+ Key/value pairs of strings with additional information about the
+ component. These values are intended for informational purposes
+ only, and their content is not defined, and not part of the API
+ specification.
+
+ These messages can be printed by the client as information to the user.
+ type: "object"
+ x-nullable: true
+ Version:
+ description: "The version of the daemon"
+ type: "string"
+ example: "19.03.12"
+ ApiVersion:
+ description: |
+ The default (and highest) API version that is supported by the daemon
+ type: "string"
+ example: "1.40"
+ MinAPIVersion:
+ description: |
+ The minimum API version that is supported by the daemon
+ type: "string"
+ example: "1.12"
+ GitCommit:
+ description: |
+ The Git commit of the source code that was used to build the daemon
+ type: "string"
+ example: "48a66213fe"
+ GoVersion:
+ description: |
+ The version Go used to compile the daemon, and the version of the Go
+ runtime in use.
+ type: "string"
+ example: "go1.13.14"
+ Os:
+ description: |
+ The operating system that the daemon is running on ("linux" or "windows")
+ type: "string"
+ example: "linux"
+ Arch:
+ description: |
+ The architecture that the daemon is running on
+ type: "string"
+ example: "amd64"
+ KernelVersion:
+ description: |
+ The kernel version (`uname -r`) that the daemon is running on.
+
+ This field is omitted when empty.
+ type: "string"
+ example: "4.19.76-linuxkit"
+ Experimental:
+ description: |
+ Indicates if the daemon is started with experimental features enabled.
+
+ This field is omitted when empty / false.
+ type: "boolean"
+ example: true
+ BuildTime:
+ description: |
+ The date and time that the daemon was compiled.
+ type: "string"
+ example: "2020-06-22T15:49:27.000000000+00:00"
+
+
SystemInfo:
type: "object"
properties:
@@ -3717,44 +4441,6 @@ definitions:
on Windows.
type: "string"
example: "/var/lib/docker"
- SystemStatus:
- description: |
- Status information about this node (standalone Swarm API).
-
- <p><br /></p>
-
- > **Note**: The information returned in this field is only propagated
- > by the Swarm standalone API, and is empty (`null`) when using
- > built-in swarm mode.
- type: "array"
- items:
- type: "array"
- items:
- type: "string"
- example:
- - ["Role", "primary"]
- - ["State", "Healthy"]
- - ["Strategy", "spread"]
- - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"]
- - ["Nodes", "2"]
- - [" swarm-agent-00", "192.168.99.102:2376"]
- - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"]
- - [" └ Status", "Healthy"]
- - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"]
- - [" └ Reserved CPUs", "0 / 1"]
- - [" └ Reserved Memory", "0 B / 1.021 GiB"]
- - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"]
- - [" └ UpdatedAt", "2017-08-09T10:03:46Z"]
- - [" └ ServerVersion", "17.06.0-ce"]
- - [" swarm-manager", "192.168.99.101:2376"]
- - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"]
- - [" └ Status", "Healthy"]
- - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"]
- - [" └ Reserved CPUs", "0 / 1"]
- - [" └ Reserved Memory", "0 B / 1.021 GiB"]
- - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"]
- - [" └ UpdatedAt", "2017-08-09T10:04:11Z"]
- - [" └ ServerVersion", "17.06.0-ce"]
Plugins:
$ref: "#/definitions/PluginsInfo"
MemoryLimit:
@@ -3766,19 +4452,30 @@ definitions:
type: "boolean"
example: true
KernelMemory:
- description: "Indicates if the host has kernel memory limit support enabled."
+ description: |
+ Indicates if the host has kernel memory limit support enabled.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated
+ > `kmem.limit_in_bytes`.
type: "boolean"
example: true
CpuCfsPeriod:
- description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host."
+ description: |
+ Indicates if CPU CFS(Completely Fair Scheduler) period is supported by
+ the host.
type: "boolean"
example: true
CpuCfsQuota:
- description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host."
+ description: |
+ Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by
+ the host.
type: "boolean"
example: true
CPUShares:
- description: "Indicates if CPU Shares limiting is supported by the host."
+ description: |
+ Indicates if CPU Shares limiting is supported by the host.
type: "boolean"
example: true
CPUSet:
@@ -3808,7 +4505,9 @@ definitions:
type: "boolean"
example: true
Debug:
- description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled."
+ description: |
+ Indicates if the daemon is running in debug-mode / with debug-level
+ logging enabled.
type: "boolean"
example: true
NFd:
@@ -3842,6 +4541,13 @@ definitions:
enum: ["cgroupfs", "systemd", "none"]
default: "cgroupfs"
example: "cgroupfs"
+ CgroupVersion:
+ description: |
+ The version of the cgroup.
+ type: "string"
+ enum: ["1", "2"]
+ default: "1"
+ example: "1"
NEventsListener:
description: "Number of event listeners subscribed."
type: "integer"
@@ -3900,7 +4606,7 @@ definitions:
example: 4
MemTotal:
description: |
- Total amount of physical memory available on the host, in kilobytes (kB).
+ Total amount of physical memory available on the host, in bytes.
type: "integer"
format: "int64"
example: 2095882240
@@ -3988,7 +4694,7 @@ definitions:
<p><br /></p>
- > **Note**: This field is only propagated when using standalone Swarm
+ > **Deprecated**: This field is only propagated when using standalone Swarm
> mode, and overlay networking using an external k/v store. Overlay
> networks with Swarm mode enabled use the built-in raft store, and
> this field will be empty.
@@ -4002,7 +4708,7 @@ definitions:
<p><br /></p>
- > **Note**: This field is only propagated when using standalone Swarm
+ > **Deprecated**: This field is only propagated when using standalone Swarm
> mode, and overlay networking using an external k/v store. Overlay
> networks with Swarm mode enabled use the built-in raft store, and
> this field will be empty.
@@ -4107,6 +4813,25 @@ definitions:
such as number of nodes, and expiration are included.
type: "string"
example: "Community Engine"
+ DefaultAddressPools:
+ description: |
+ List of custom default address pools for local networks, which can be
+ specified in the daemon.json file or dockerd option.
+
+ Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256
+ 10.10.[0-255].0/24 address pools.
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Base:
+ description: "The network address in CIDR format"
+ type: "string"
+ example: "10.10.0.0/16"
+ Size:
+ description: "The network pool size"
+ type: "integer"
+ example: "24"
Warnings:
description: |
List of warnings / informational messages about missing features, or
@@ -4453,19 +5178,23 @@ definitions:
type: "string"
NetworkAttachmentConfig:
- description: "Specifies how a service should be attached to a particular network."
+ description: |
+ Specifies how a service should be attached to a particular network.
type: "object"
properties:
Target:
- description: "The target network for attachment. Must be a network name or ID."
+ description: |
+ The target network for attachment. Must be a network name or ID.
type: "string"
Aliases:
- description: "Discoverable alternate names for the service on this network."
+ description: |
+ Discoverable alternate names for the service on this network.
type: "array"
items:
type: "string"
DriverOpts:
- description: "Driver attachment options for the network target"
+ description: |
+ Driver attachment options for the network target.
type: "object"
additionalProperties:
type: "string"
@@ -4475,32 +5204,42 @@ paths:
get:
summary: "List containers"
description: |
- Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect).
+ Returns a list of containers. For details on the format, see the
+ [inspect endpoint](#operation/ContainerInspect).
- Note that it uses a different, smaller representation of a container than inspecting a single container. For example,
- the list of linked containers is not propagated .
+ Note that it uses a different, smaller representation of a container
+ than inspecting a single container. For example, the list of linked
+ containers is not propagated .
operationId: "ContainerList"
produces:
- "application/json"
parameters:
- name: "all"
in: "query"
- description: "Return all containers. By default, only running containers are shown"
+ description: |
+ Return all containers. By default, only running containers are shown.
type: "boolean"
default: false
- name: "limit"
in: "query"
- description: "Return this number of most recently created containers, including non-running ones."
+ description: |
+ Return this number of most recently created containers, including
+ non-running ones.
type: "integer"
- name: "size"
in: "query"
- description: "Return the size of container as fields `SizeRw` and `SizeRootFs`."
+ description: |
+ Return the size of container as fields `SizeRw` and `SizeRootFs`.
type: "boolean"
default: false
- name: "filters"
in: "query"
description: |
- Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters:
+ Filters to process on the container list, encoded as JSON (a
+ `map[string][]string`). For example, `{"status": ["paused"]}` will
+ only return paused containers.
+
+ Available filters:
- `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`)
- `before`=(`<container id>` or `<container name>`)
@@ -4671,7 +5410,9 @@ paths:
parameters:
- name: "name"
in: "query"
- description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`."
+ description: |
+ Assign the specified name to the container. Must match
+ `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
type: "string"
pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
- name: "body"
@@ -4685,14 +5426,7 @@ paths:
HostConfig:
$ref: "#/definitions/HostConfig"
NetworkingConfig:
- description: "This container's networking configuration."
- type: "object"
- properties:
- EndpointsConfig:
- description: "A mapping of network name to endpoint configuration for that network."
- type: "object"
- additionalProperties:
- $ref: "#/definitions/EndpointSettings"
+ $ref: "#/definitions/NetworkingConfig"
example:
Hostname: ""
Domainname: ""
@@ -4754,6 +5488,14 @@ paths:
- {}
BlkioDeviceWriteIOps:
- {}
+ DeviceRequests:
+ - Driver: "nvidia"
+ Count: -1
+ DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]
+ Capabilities: [["gpu", "nvidia", "compute"]]
+ Options:
+ property1: "string"
+ property2: "string"
MemorySwappiness: 60
OomKillDisable: false
OomScoreAdj: 500
@@ -4885,54 +5627,10 @@ paths:
items:
type: "string"
State:
- description: "The state of the container."
- type: "object"
- properties:
- Status:
- description: |
- The status of the container. For example, `"running"` or `"exited"`.
- type: "string"
- enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
- Running:
- description: |
- Whether this container is running.
-
- Note that a running container can be _paused_. The `Running` and `Paused`
- booleans are not mutually exclusive:
-
- When pausing a container (on Linux), the freezer cgroup is used to suspend
- all processes in the container. Freezing the process requires the process to
- be running. As a result, paused containers are both `Running` _and_ `Paused`.
-
- Use the `Status` field instead to determine if a container's state is "running".
- type: "boolean"
- Paused:
- description: "Whether this container is paused."
- type: "boolean"
- Restarting:
- description: "Whether this container is restarting."
- type: "boolean"
- OOMKilled:
- description: "Whether this container has been killed because it ran out of memory."
- type: "boolean"
- Dead:
- type: "boolean"
- Pid:
- description: "The process ID of this container"
- type: "integer"
- ExitCode:
- description: "The last exit code of this container"
- type: "integer"
- Error:
- type: "string"
- StartedAt:
- description: "The time when this container was last started."
- type: "string"
- FinishedAt:
- description: "The time when this container last exited."
- type: "string"
+ x-nullable: true
+ $ref: "#/definitions/ContainerState"
Image:
- description: "The container's image"
+ description: "The container's image ID"
type: "string"
ResolvConfPath:
type: "string"
@@ -4942,9 +5640,6 @@ paths:
type: "string"
LogPath:
type: "string"
- Node:
- description: "TODO"
- type: "object"
Name:
type: "string"
RestartCount:
@@ -4970,7 +5665,9 @@ paths:
GraphDriver:
$ref: "#/definitions/GraphDriverData"
SizeRw:
- description: "The size of files that have been created or changed by this container."
+ description: |
+ The size of files that have been created or changed by this
+ container.
type: "integer"
format: "int64"
SizeRootFs:
@@ -5002,6 +5699,8 @@ paths:
Domainname: ""
Env:
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Healthcheck:
+ Test: ["CMD-SHELL", "exit 0"]
Hostname: "ba033ac44011"
Image: "ubuntu"
Labels:
@@ -5047,6 +5746,14 @@ paths:
CpuRealtimePeriod: 1000000
CpuRealtimeRuntime: 10000
Devices: []
+ DeviceRequests:
+ - Driver: "nvidia"
+ Count: -1
+ DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]
+ Capabilities: [["gpu", "nvidia", "compute"]]
+ Options:
+ property1: "string"
+ property2: "string"
IpcMode: ""
LxcConf: []
Memory: 0
@@ -5113,6 +5820,14 @@ paths:
Error: ""
ExitCode: 9
FinishedAt: "2015-01-06T15:47:32.080254511Z"
+ Health:
+ Status: "healthy"
+ FailingStreak: 0
+ Log:
+ - Start: "2019-12-22T10:59:05.6385933Z"
+ End: "2019-12-22T10:59:05.8078452Z"
+ ExitCode: 0
+ Output: ""
OOMKilled: false
Dead: false
Paused: false
@@ -5155,7 +5870,9 @@ paths:
/containers/{id}/top:
get:
summary: "List processes running inside a container"
- description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows."
+ description: |
+ On Unix systems, this is done by running the `ps` command. This endpoint
+ is not supported on Windows.
operationId: "ContainerTop"
responses:
200:
@@ -5171,7 +5888,9 @@ paths:
items:
type: "string"
Processes:
- description: "Each process running in the container, where each is process is an array of values corresponding to the titles"
+ description: |
+ Each process running in the container, where each is process
+ is an array of values corresponding to the titles.
type: "array"
items:
type: "array"
@@ -5236,15 +5955,16 @@ paths:
description: |
Get `stdout` and `stderr` logs from a container.
- Note: This endpoint works only for containers with the `json-file` or `journald` logging driver.
+ Note: This endpoint works only for containers with the `json-file` or
+ `journald` logging driver.
operationId: "ContainerLogs"
responses:
200:
description: |
- logs returned as a stream in response body.
- For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
- Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not
- set Content-Type.
+ logs returned as a stream in response body.
+ For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
+ Note that unlike the attach endpoint, the logs endpoint does not
+ upgrade the connection and does not set Content-Type.
schema:
type: "string"
format: "binary"
@@ -5297,7 +6017,9 @@ paths:
default: false
- name: "tail"
in: "query"
- description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ description: |
+ Only return this number of log lines from the end of the logs.
+ Specify as an integer or `all` to output all log lines.
type: "string"
default: "all"
tags: ["Container"]
@@ -5403,6 +6125,22 @@ paths:
If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is
nil then for compatibility with older daemons the length of the
corresponding `cpu_usage.percpu_usage` array should be used.
+
+ On a cgroup v2 host, the following fields are not set
+ * `blkio_stats`: all fields other than `io_service_bytes_recursive`
+ * `cpu_stats`: `cpu_usage.percpu_usage`
+ * `memory_stats`: `max_usage` and `failcnt`
+ Also, `memory_stats.stats` fields are incompatible with cgroup v1.
+
+ To calculate the values shown by the `stats` command of the docker cli tool
+ the following formulas can be used:
+ * used_memory = `memory_stats.usage - memory_stats.stats.cache`
+ * available_memory = `memory_stats.limit`
+ * Memory usage % = `(used_memory / available_memory) * 100.0`
+ * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage`
+ * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage`
+ * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
+ * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0`
operationId: "ContainerStats"
produces: ["application/json"]
responses:
@@ -5521,9 +6259,18 @@ paths:
type: "string"
- name: "stream"
in: "query"
- description: "Stream the output. If false, the stats will be output once and then it will disconnect."
+ description: |
+ Stream the output. If false, the stats will be output once and then
+ it will disconnect.
type: "boolean"
default: true
+ - name: "one-shot"
+ in: "query"
+ description: |
+ Only get a single stat instead of waiting for 2 cycles. Must be used
+ with `stream=false`.
+ type: "boolean"
+ default: false
tags: ["Container"]
/containers/{id}/resize:
post:
@@ -5556,11 +6303,11 @@ paths:
type: "string"
- name: "h"
in: "query"
- description: "Height of the tty session in characters"
+ description: "Height of the TTY session in characters"
type: "integer"
- name: "w"
in: "query"
- description: "Width of the tty session in characters"
+ description: "Width of the TTY session in characters"
type: "integer"
tags: ["Container"]
/containers/{id}/start:
@@ -5591,7 +6338,10 @@ paths:
type: "string"
- name: "detachKeys"
in: "query"
- description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ description: |
+ Override the key sequence for detaching a container. Format is a
+ single character `[a-Z]` or `ctrl-<value>` where `<value>` is one
+ of: `a-z`, `@`, `^`, `[`, `,` or `_`.
type: "string"
tags: ["Container"]
/containers/{id}/stop:
@@ -5657,7 +6407,9 @@ paths:
/containers/{id}/kill:
post:
summary: "Kill a container"
- description: "Send a POSIX signal to a container, defaulting to killing to the container."
+ description: |
+ Send a POSIX signal to a container, defaulting to killing to the
+ container.
operationId: "ContainerKill"
responses:
204:
@@ -5695,7 +6447,9 @@ paths:
/containers/{id}/update:
post:
summary: "Update a container"
- description: "Change various configuration options of a container without having to recreate it."
+ description: |
+ Change various configuration options of a container without having to
+ recreate it.
operationId: "ContainerUpdate"
consumes: ["application/json"]
produces: ["application/json"]
@@ -5795,7 +6549,10 @@ paths:
description: |
Use the freezer cgroup to suspend all processes in a container.
- Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
+ Traditionally, when suspending a process the `SIGSTOP` signal is used,
+ which is observable by the process being suspended. With the freezer
+ cgroup the process is unaware, and unable to capture, that it is being
+ suspended, and subsequently resumed.
operationId: "ContainerPause"
responses:
204:
@@ -5848,15 +6605,20 @@ paths:
post:
summary: "Attach to a container"
description: |
- Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached.
+ Attach to a container to read its output or send it input. You can attach
+ to the same container multiple times and you can reattach to containers
+ that have been detached.
- Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything.
+ Either the `stream` or `logs` parameter must be `true` for this endpoint
+ to do anything.
- See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details.
+ See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/)
+ for more details.
### Hijacking
- This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket.
+ This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`,
+ and `stderr` on the same socket.
This is the response from the daemon for an attach request:
@@ -5867,9 +6629,11 @@ paths:
[STREAM]
```
- After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server.
+ After the headers and two new lines, the TCP connection can now be used
+ for raw, bidirectional communication between the client and server.
- To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers.
+ To hint potential proxies about connection hijacking, the Docker client
+ can also optionally send connection upgrade headers.
For example, the client sends this request to upgrade the connection:
@@ -5879,7 +6643,8 @@ paths:
Connection: Upgrade
```
- The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream:
+ The Docker daemon will respond with a `101 UPGRADED` response, and will
+ similarly follow with the raw stream:
```
HTTP/1.1 101 UPGRADED
@@ -5892,9 +6657,14 @@ paths:
### Stream format
- When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload.
+ When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate),
+ the stream over the hijacked connected is multiplexed to separate out
+ `stdout` and `stderr`. The stream consists of a series of frames, each
+ containing a header and a payload.
- The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`).
+ The header contains the information which the stream writes (`stdout` or
+ `stderr`). It also contains the size of the associated frame encoded in
+ the last four bytes (`uint32`).
It is encoded on the first eight bytes like this:
@@ -5908,9 +6678,11 @@ paths:
- 1: `stdout`
- 2: `stderr`
- `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian.
+ `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size
+ encoded as big endian.
- Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`.
+ Following the header is the payload, which is the specified number of
+ bytes of `STREAM_TYPE`.
The simplest way to implement this protocol is the following:
@@ -5922,7 +6694,10 @@ paths:
### Stream format when using a TTY
- When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`.
+ When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate),
+ the stream is not multiplexed. The data exchanged over the hijacked
+ connection is simply the raw data from the process PTY and client's
+ `stdin`.
operationId: "ContainerAttach"
produces:
@@ -5955,21 +6730,28 @@ paths:
type: "string"
- name: "detachKeys"
in: "query"
- description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ description: |
+ Override the key sequence for detaching a container.Format is a single
+ character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`,
+ `@`, `^`, `[`, `,` or `_`.
type: "string"
- name: "logs"
in: "query"
description: |
Replay previous logs from the container.
- This is useful for attaching to a container that has started and you want to output everything since the container started.
+ This is useful for attaching to a container that has started and you
+ want to output everything since the container started.
- If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output.
+ If `stream` is also enabled, once all the previous output has been
+ returned, it will seamlessly transition into streaming current
+ output.
type: "boolean"
default: false
- name: "stream"
in: "query"
- description: "Stream attached streams from the time the request was made onwards"
+ description: |
+ Stream attached streams from the time the request was made onwards.
type: "boolean"
default: false
- name: "stdin"
@@ -6020,7 +6802,10 @@ paths:
type: "string"
- name: "detachKeys"
in: "query"
- description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`."
+ description: |
+ Override the key sequence for detaching a container.Format is a single
+ character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`,
+ `@`, `^`, `[`, `,`, or `_`.
type: "string"
- name: "logs"
in: "query"
@@ -6093,7 +6878,9 @@ paths:
type: "string"
- name: "condition"
in: "query"
- description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'."
+ description: |
+ Wait until a container state reaches the given condition, either
+ 'not-running' (default), 'next-exit', or 'removed'.
type: "string"
default: "not-running"
tags: ["Container"]
@@ -6121,7 +6908,9 @@ paths:
$ref: "#/definitions/ErrorResponse"
examples:
application/json:
- message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove"
+ message: |
+ You cannot remove a running container: c2ada9df5af8. Stop the
+ container before attempting removal or force remove
500:
description: "server error"
schema:
@@ -6151,7 +6940,10 @@ paths:
/containers/{id}/archive:
head:
summary: "Get information about files in a container"
- description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path."
+ description: |
+ A response header `X-Docker-Container-Path-Stat` is returned, containing
+ a base64 - encoded JSON object with some filesystem header information
+ about the path.
operationId: "ContainerArchiveInfo"
responses:
200:
@@ -6159,7 +6951,9 @@ paths:
headers:
X-Docker-Container-Path-Stat:
type: "string"
- description: "A base64 - encoded JSON object with some filesystem header information about the path"
+ description: |
+ A base64 - encoded JSON object with some filesystem header
+ information about the path
400:
description: "Bad parameter"
schema:
@@ -6168,7 +6962,10 @@ paths:
- type: "object"
properties:
message:
- description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)."
+ description: |
+ The error message. Either "must specify path parameter"
+ (path cannot be empty) or "not a directory" (path was
+ asserted to be a directory but exists as a file).
type: "string"
x-nullable: false
404:
@@ -6210,7 +7007,10 @@ paths:
- type: "object"
properties:
message:
- description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)."
+ description: |
+ The error message. Either "must specify path parameter"
+ (path cannot be empty) or "not a directory" (path was
+ asserted to be a directory but exists as a file).
type: "string"
x-nullable: false
404:
@@ -6276,16 +7076,24 @@ paths:
type: "string"
- name: "noOverwriteDirNonDir"
in: "query"
- description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa."
+ description: |
+ If `1`, `true`, or `True` then it will be an error if unpacking the
+ given content would cause an existing directory to be replaced with
+ a non-directory and vice versa.
type: "string"
- name: "copyUIDGID"
in: "query"
- description: "If “1”, “true”, then it will copy UID/GID maps to the dest file or dir"
+ description: |
+ If `1`, `true`, then it will copy UID/GID maps to the dest file or
+ dir
type: "string"
- name: "inputStream"
in: "body"
required: true
- description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
+ description: |
+ The input stream must be a tar archive compressed with one of the
+ following algorithms: `identity` (no compression), `gzip`, `bzip2`,
+ or `xz`.
schema:
type: "string"
format: "binary"
@@ -6383,7 +7191,10 @@ paths:
- name: "filters"
in: "query"
description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the images list.
+
+ Available filters:
- `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
- `dangling=true`
@@ -6599,7 +7410,11 @@ paths:
in: "query"
type: "string"
description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters:
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the list of build cache objects.
+
+ Available filters:
+
- `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h')
- `id=<id>`
- `parent=<id>`
@@ -6667,6 +7482,10 @@ paths:
in: "query"
description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
type: "string"
+ - name: "message"
+ in: "query"
+ description: "Set commit message for imported image."
+ type: "string"
- name: "inputImage"
in: "body"
description: "Image content if the value `-` has been specified in fromSrc query parameter"
@@ -6675,7 +7494,11 @@ paths:
required: false
- name: "X-Registry-Auth"
in: "header"
- description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
+ description: |
+ A base64url-encoded auth configuration.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
type: "string"
- name: "platform"
in: "query"
@@ -6874,7 +7697,9 @@ paths:
description: |
Push an image to a registry.
- If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`.
+ If you wish to push an image on to a private registry, that image must
+ already have a tag which references the registry. For example,
+ `registry.example.com/myimage:latest`.
The push is cancelled if the HTTP connection is closed.
operationId: "ImagePush"
@@ -6903,7 +7728,11 @@ paths:
type: "string"
- name: "X-Registry-Auth"
in: "header"
- description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
+ description: |
+ A base64url-encoded auth configuration.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
type: "string"
required: true
tags: ["Image"]
@@ -7107,7 +7936,9 @@ paths:
/auth:
post:
summary: "Check auth configuration"
- description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password."
+ description: |
+ Validate credentials for a registry and, if available, get an identity
+ token for accessing the registry without password.
operationId: "SystemAuth"
consumes: ["application/json"]
produces: ["application/json"]
@@ -7170,63 +8001,7 @@ paths:
200:
description: "no error"
schema:
- type: "object"
- title: "SystemVersionResponse"
- properties:
- Platform:
- type: "object"
- required: [Name]
- properties:
- Name:
- type: "string"
- Components:
- type: "array"
- items:
- type: "object"
- x-go-name: ComponentVersion
- required: [Name, Version]
- properties:
- Name:
- type: "string"
- Version:
- type: "string"
- x-nullable: false
- Details:
- type: "object"
- x-nullable: true
-
- Version:
- type: "string"
- ApiVersion:
- type: "string"
- MinAPIVersion:
- type: "string"
- GitCommit:
- type: "string"
- GoVersion:
- type: "string"
- Os:
- type: "string"
- Arch:
- type: "string"
- KernelVersion:
- type: "string"
- Experimental:
- type: "boolean"
- BuildTime:
- type: "string"
- examples:
- application/json:
- Version: "17.04.0"
- Os: "linux"
- KernelVersion: "3.19.0-23-generic"
- GoVersion: "go1.7.5"
- GitCommit: "deadbee"
- Arch: "amd64"
- ApiVersion: "1.27"
- MinAPIVersion: "1.12"
- BuildTime: "2016-06-14T07:09:13.444803460+00:00"
- Experimental: true
+ $ref: "#/definitions/SystemVersion"
500:
description: "server error"
schema:
@@ -7372,13 +8147,13 @@ paths:
Various objects within Docker report events when something happens to them.
- Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update`
+ Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune`
- Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag`
+ Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune`
- Volumes report these events: `create`, `mount`, `unmount`, and `destroy`
+ Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune`
- Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove`
+ Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune`
The Docker daemon reports these events: `reload`
@@ -7390,6 +8165,8 @@ paths:
Configs report these events: `create`, `update`, and `remove`
+ The Builder reports `prune` events
+
operationId: "SystemEvents"
produces:
- "application/json"
@@ -7618,11 +8395,16 @@ paths:
get:
summary: "Export several images"
description: |
- Get a tarball containing all images and metadata for several image repositories.
+ Get a tarball containing all images and metadata for several image
+ repositories.
- For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID.
+ For each value of the `names` parameter: if it is a specific name and
+ tag (e.g. `ubuntu:latest`), then only that image (and its parents) are
+ returned; if it is an image ID, similarly only that image (and its parents)
+ are returned and there would be no names referenced in the 'repositories'
+ file for this image ID.
- For details on the format, see [the export image endpoint](#operation/ImageGet).
+ For details on the format, see the [export image endpoint](#operation/ImageGet).
operationId: "ImageGetAll"
produces:
- "application/x-tar"
@@ -7650,7 +8432,7 @@ paths:
description: |
Load a set of images and tags into a repository.
- For details on the format, see [the export image endpoint](#operation/ImageGet).
+ For details on the format, see the [export image endpoint](#operation/ImageGet).
operationId: "ImageLoad"
consumes:
- "application/x-tar"
@@ -7723,12 +8505,16 @@ paths:
description: "Attach to `stderr` of the exec command."
DetachKeys:
type: "string"
- description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ description: |
+ Override the key sequence for detaching a container. Format is
+ a single character `[a-Z]` or `ctrl-<value>` where `<value>`
+ is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.
Tty:
type: "boolean"
description: "Allocate a pseudo-TTY."
Env:
- description: "A list of environment variables in the form `[\"VAR=value\", ...]`."
+ description: |
+ A list of environment variables in the form `["VAR=value", ...]`.
type: "array"
items:
type: "string"
@@ -7743,10 +8529,14 @@ paths:
default: false
User:
type: "string"
- description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`."
+ description: |
+ The user, and optionally, group to run the exec process inside
+ the container. Format is one of: `user`, `user:group`, `uid`,
+ or `uid:gid`.
WorkingDir:
type: "string"
- description: "The working directory for the exec process inside the container."
+ description: |
+ The working directory for the exec process inside the container.
example:
AttachStdin: false
AttachStdout: true
@@ -7768,7 +8558,10 @@ paths:
/exec/{id}/start:
post:
summary: "Start an exec instance"
- description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command."
+ description: |
+ Starts a previously set up exec instance. If detach is true, this endpoint
+ returns immediately after starting the command. Otherwise, it sets up an
+ interactive session with the command.
operationId: "ExecStart"
consumes:
- "application/json"
@@ -7809,7 +8602,9 @@ paths:
/exec/{id}/resize:
post:
summary: "Resize an exec instance"
- description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance."
+ description: |
+ Resize the TTY session used by an exec instance. This endpoint only works
+ if `tty` was specified as part of creating and starting the exec instance.
operationId: "ExecResize"
responses:
201:
@@ -7929,7 +8724,8 @@ paths:
Warnings:
type: "array"
x-nullable: false
- description: "Warnings that occurred when fetching the list of volumes"
+ description: |
+ Warnings that occurred when fetching the list of volumes.
items:
type: "string"
@@ -7998,7 +8794,8 @@ paths:
title: "VolumeConfig"
properties:
Name:
- description: "The new volume's name. If not specified, Docker generates a name."
+ description: |
+ The new volume's name. If not specified, Docker generates a name.
type: "string"
x-nullable: false
Driver:
@@ -8007,7 +8804,9 @@ paths:
default: "local"
x-nullable: false
DriverOpts:
- description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific."
+ description: |
+ A mapping of driver options and values. These options are
+ passed directly to the driver and are driver specific.
type: "object"
additionalProperties:
type: "string"
@@ -8121,10 +8920,12 @@ paths:
get:
summary: "List networks"
description: |
- Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect).
+ Returns a list of networks. For details on the format, see the
+ [network inspect endpoint](#operation/NetworkInspect).
- Note that it uses a different, smaller representation of a network than inspecting a single network. For example,
- the list of containers attached to the network is not propagated in API versions 1.28 and up.
+ Note that it uses a different, smaller representation of a network than
+ inspecting a single network. For example, the list of containers attached
+ to the network is not propagated in API versions 1.28 and up.
operationId: "NetworkList"
produces:
- "application/json"
@@ -8194,7 +8995,10 @@ paths:
- name: "filters"
in: "query"
description: |
- JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters:
+ JSON encoded value of the filters (a `map[string][]string`) to process
+ on the networks list.
+
+ Available filters:
- `dangling=<boolean>` When set to `true` (or `1`), returns all
networks that are not in use by a container. When set to `false`
@@ -8319,7 +9123,14 @@ paths:
description: "The network's name."
type: "string"
CheckDuplicate:
- description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions."
+ description: |
+ Check for networks with duplicate names. Since Network is
+ primarily keyed based on a random ID and not on the name, and
+ network name is strictly a user-friendly alias to the network
+ which is uniquely identified using ID, there is no guaranteed
+ way to check for duplicates. CheckDuplicate is there to provide
+ a best effort checking of any networks which has the same name
+ but it is not guaranteed to catch all name collisions.
type: "boolean"
Driver:
description: "Name of the network driver plugin to use."
@@ -8329,10 +9140,14 @@ paths:
description: "Restrict external access to the network."
type: "boolean"
Attachable:
- description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode."
+ description: |
+ Globally scoped network is manually attachable by regular
+ containers from workers in swarm mode.
type: "boolean"
Ingress:
- description: "Ingress network is the network which provides the routing-mesh in swarm mode."
+ description: |
+ Ingress network is the network which provides the routing-mesh
+ in swarm mode.
type: "boolean"
IPAM:
description: "Optional custom IP scheme for the network."
@@ -8461,10 +9276,12 @@ paths:
properties:
Container:
type: "string"
- description: "The ID or name of the container to disconnect from the network."
+ description: |
+ The ID or name of the container to disconnect from the network.
Force:
type: "boolean"
- description: "Force the container to disconnect from the network."
+ description: |
+ Force the container to disconnect from the network.
tags: ["Network"]
/networks/prune:
post:
@@ -8521,7 +9338,10 @@ paths:
in: "query"
type: "string"
description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters:
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the plugin list.
+
+ Available filters:
- `capability=<capability name>`
- `enable=<true>|<false>`
@@ -8537,7 +9357,9 @@ paths:
schema:
type: "array"
items:
- description: "Describes a permission the user has to accept upon installing the plugin."
+ description: |
+ Describes a permission the user has to accept upon installing
+ the plugin.
type: "object"
title: "PluginPrivilegeItem"
properties:
@@ -8569,7 +9391,9 @@ paths:
parameters:
- name: "remote"
in: "query"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
tags:
@@ -8580,7 +9404,8 @@ paths:
summary: "Install a plugin"
operationId: "PluginPull"
description: |
- Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
+ Pulls and installs a plugin. After the plugin is installed, it can be
+ enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
produces:
- "application/json"
responses:
@@ -8609,14 +9434,21 @@ paths:
type: "string"
- name: "X-Registry-Auth"
in: "header"
- description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
+ description: |
+ A base64url-encoded auth configuration to use when pulling a plugin
+ from a registry.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
type: "string"
- name: "body"
in: "body"
schema:
type: "array"
items:
- description: "Describes a permission accepted by the user upon installing the plugin."
+ description: |
+ Describes a permission accepted by the user upon installing the
+ plugin.
type: "object"
properties:
Name:
@@ -8661,7 +9493,9 @@ paths:
parameters:
- name: "name"
in: "path"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
tags: ["Plugin"]
@@ -8685,12 +9519,16 @@ paths:
parameters:
- name: "name"
in: "path"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
- name: "force"
in: "query"
- description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container."
+ description: |
+ Disable the plugin before removing. This may result in issues if the
+ plugin is in use by a container.
type: "boolean"
default: false
tags: ["Plugin"]
@@ -8712,7 +9550,9 @@ paths:
parameters:
- name: "name"
in: "path"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
- name: "timeout"
@@ -8739,7 +9579,9 @@ paths:
parameters:
- name: "name"
in: "path"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
tags: ["Plugin"]
@@ -8761,7 +9603,9 @@ paths:
parameters:
- name: "name"
in: "path"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
- name: "remote"
@@ -8774,14 +9618,21 @@ paths:
type: "string"
- name: "X-Registry-Auth"
in: "header"
- description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
+ description: |
+ A base64url-encoded auth configuration to use when pulling a plugin
+ from a registry.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
type: "string"
- name: "body"
in: "body"
schema:
type: "array"
items:
- description: "Describes a permission accepted by the user upon installing the plugin."
+ description: |
+ Describes a permission accepted by the user upon installing the
+ plugin.
type: "object"
properties:
Name:
@@ -8822,7 +9673,9 @@ paths:
parameters:
- name: "name"
in: "query"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
- name: "tarContext"
@@ -8841,7 +9694,9 @@ paths:
parameters:
- name: "name"
in: "path"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
responses:
@@ -8865,7 +9720,9 @@ paths:
parameters:
- name: "name"
in: "path"
- description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ description: |
+ The name of the plugin. The `:latest` tag is optional, and is the
+ default if omitted.
required: true
type: "string"
- name: "body"
@@ -9014,7 +9871,9 @@ paths:
$ref: "#/definitions/NodeSpec"
- name: "version"
in: "query"
- description: "The version number of the node object being updated. This is required to avoid conflicting writes."
+ description: |
+ The version number of the node object being updated. This is required
+ to avoid conflicting writes.
type: "integer"
format: "int64"
required: true
@@ -9075,20 +9934,35 @@ paths:
type: "object"
properties:
ListenAddr:
- description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used."
+ description: |
+ Listen address used for inter-manager communication, as well
+ as determining the networking interface used for the VXLAN
+ Tunnel Endpoint (VTEP). This can either be an address/port
+ combination in the form `192.168.1.1:4567`, or an interface
+ followed by a port number, like `eth0:4567`. If the port number
+ is omitted, the default swarm listening port is used.
type: "string"
AdvertiseAddr:
- description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible."
+ description: |
+ Externally reachable address advertised to other nodes. This
+ can either be an address/port combination in the form
+ `192.168.1.1:4567`, or an interface followed by a port number,
+ like `eth0:4567`. If the port number is omitted, the port
+ number from the listen address is used. If `AdvertiseAddr` is
+ not specified, it will be automatically detected when possible.
type: "string"
DataPathAddr:
description: |
- Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`,
- or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`
- is used.
-
- The `DataPathAddr` specifies the address that global scope network drivers will publish towards other
- nodes in order to reach the containers running on this node. Using this parameter it is possible to
- separate the container data traffic from the management traffic of the cluster.
+ Address or interface to use for data path traffic (format:
+ `<ip|interface>`), for example, `192.168.1.1`, or an interface,
+ like `eth0`. If `DataPathAddr` is unspecified, the same address
+ as `AdvertiseAddr` is used.
+
+ The `DataPathAddr` specifies the address that global scope
+ network drivers will publish towards other nodes in order to
+ reach the containers running on this node. Using this parameter
+ it is possible to separate the container data traffic from the
+ management traffic of the cluster.
type: "string"
DataPathPort:
description: |
@@ -9099,7 +9973,8 @@ paths:
format: "uint32"
DefaultAddrPool:
description: |
- Default Address Pool specifies default subnet pools for global scope networks.
+ Default Address Pool specifies default subnet pools for global
+ scope networks.
type: "array"
items:
type: "string"
@@ -9109,7 +9984,8 @@ paths:
type: "boolean"
SubnetSize:
description: |
- SubnetSize specifies the subnet size of the networks created from the default subnet pool
+ SubnetSize specifies the subnet size of the networks created
+ from the default subnet pool.
type: "integer"
format: "uint32"
Spec:
@@ -9156,24 +10032,37 @@ paths:
type: "object"
properties:
ListenAddr:
- description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)."
+ description: |
+ Listen address used for inter-manager communication if the node
+ gets promoted to manager, as well as determining the networking
+ interface used for the VXLAN Tunnel Endpoint (VTEP).
type: "string"
AdvertiseAddr:
- description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible."
+ description: |
+ Externally reachable address advertised to other nodes. This
+ can either be an address/port combination in the form
+ `192.168.1.1:4567`, or an interface followed by a port number,
+ like `eth0:4567`. If the port number is omitted, the port
+ number from the listen address is used. If `AdvertiseAddr` is
+ not specified, it will be automatically detected when possible.
type: "string"
DataPathAddr:
description: |
- Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`,
- or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`
- is used.
+ Address or interface to use for data path traffic (format:
+ `<ip|interface>`), for example, `192.168.1.1`, or an interface,
+ like `eth0`. If `DataPathAddr` is unspecified, the same addres
+ as `AdvertiseAddr` is used.
- The `DataPathAddr` specifies the address that global scope network drivers will publish towards other
- nodes in order to reach the containers running on this node. Using this parameter it is possible to
- separate the container data traffic from the management traffic of the cluster.
+ The `DataPathAddr` specifies the address that global scope
+ network drivers will publish towards other nodes in order to
+ reach the containers running on this node. Using this parameter
+ it is possible to separate the container data traffic from the
+ management traffic of the cluster.
type: "string"
RemoteAddrs:
- description: "Addresses of manager nodes already participating in the swarm."
+ description: |
+ Addresses of manager nodes already participating in the swarm.
type: "array"
items:
type: "string"
@@ -9204,7 +10093,9 @@ paths:
$ref: "#/definitions/ErrorResponse"
parameters:
- name: "force"
- description: "Force leave swarm, even if this is the last manager or that it will break the cluster."
+ description: |
+ Force leave swarm, even if this is the last manager or that it will
+ break the cluster.
in: "query"
type: "boolean"
default: false
@@ -9236,7 +10127,9 @@ paths:
$ref: "#/definitions/SwarmSpec"
- name: "version"
in: "query"
- description: "The version number of the swarm object being updated. This is required to avoid conflicting writes."
+ description: |
+ The version number of the swarm object being updated. This is
+ required to avoid conflicting writes.
type: "integer"
format: "int64"
required: true
@@ -9339,7 +10232,10 @@ paths:
in: "query"
type: "string"
description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters:
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the services list.
+
+ Available filters:
- `id=<service id>`
- `label=<service label>`
@@ -9348,7 +10244,8 @@ paths:
- name: "status"
in: "query"
type: "boolean"
- description: "Include service status, with count of running and desired tasks"
+ description: |
+ Include service status, with count of running and desired tasks.
tags: ["Service"]
/services/create:
post:
@@ -9471,7 +10368,12 @@ paths:
foo: "bar"
- name: "X-Registry-Auth"
in: "header"
- description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
+ description: |
+ A base64url-encoded auth configuration for pulling from private
+ registries.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
type: "string"
tags: ["Service"]
/services/{id}:
@@ -9607,10 +10509,12 @@ paths:
- name: "version"
in: "query"
- description: "The version number of the service object being updated.
- This is required to avoid conflicting writes.
- This version number should be the value as currently set on the service *before* the update.
- You can find the current version by calling `GET /services/{id}`"
+ description: |
+ The version number of the service object being updated. This is
+ required to avoid conflicting writes.
+ This version number should be the value as currently set on the
+ service *before* the update. You can find the current version by
+ calling `GET /services/{id}`
required: true
type: "integer"
- name: "registryAuthFrom"
@@ -9630,7 +10534,12 @@ paths:
type: "string"
- name: "X-Registry-Auth"
in: "header"
- description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
+ description: |
+ A base64url-encoded auth configuration for pulling from private
+ registries.
+
+ Refer to the [authentication section](#section/Authentication) for
+ details.
type: "string"
tags: ["Service"]
@@ -9638,9 +10547,11 @@ paths:
get:
summary: "Get service logs"
description: |
- Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs).
+ Get `stdout` and `stderr` logs from a service. See also
+ [`/containers/{id}/logs`](#operation/ContainerLogs).
- **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers.
+ **Note**: This endpoint works only for services with the `local`,
+ `json-file` or `journald` logging drivers.
operationId: "ServiceLogs"
responses:
200:
@@ -9701,7 +10612,9 @@ paths:
default: false
- name: "tail"
in: "query"
- description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ description: |
+ Only return this number of log lines from the end of the logs.
+ Specify as an integer or `all` to output all log lines.
type: "string"
default: "all"
tags: ["Service"]
@@ -9842,7 +10755,10 @@ paths:
in: "query"
type: "string"
description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters:
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the tasks list.
+
+ Available filters:
- `desired-state=(running | shutdown | accepted)`
- `id=<task id>`
@@ -9885,9 +10801,11 @@ paths:
get:
summary: "Get task logs"
description: |
- Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs).
+ Get `stdout` and `stderr` logs from a task.
+ See also [`/containers/{id}/logs`](#operation/ContainerLogs).
- **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers.
+ **Note**: This endpoint works only for services with the `local`,
+ `json-file` or `journald` logging drivers.
operationId: "TaskLogs"
responses:
200:
@@ -9948,7 +10866,9 @@ paths:
default: false
- name: "tail"
in: "query"
- description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ description: |
+ Only return this number of log lines from the end of the logs.
+ Specify as an integer or `all` to output all log lines.
type: "string"
default: "all"
tags: ["Task"]
@@ -10002,7 +10922,10 @@ paths:
in: "query"
type: "string"
description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters:
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the secrets list.
+
+ Available filters:
- `id=<secret id>`
- `label=<key> or label=<key>=value`
@@ -10159,10 +11082,15 @@ paths:
in: "body"
schema:
$ref: "#/definitions/SecretSpec"
- description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values."
+ description: |
+ The spec of the secret to update. Currently, only the Labels field
+ can be updated. All other fields must remain unchanged from the
+ [SecretInspect endpoint](#operation/SecretInspect) response values.
- name: "version"
in: "query"
- description: "The version number of the secret object being updated. This is required to avoid conflicting writes."
+ description: |
+ The version number of the secret object being updated. This is
+ required to avoid conflicting writes.
type: "integer"
format: "int64"
required: true
@@ -10201,7 +11129,10 @@ paths:
in: "query"
type: "string"
description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters:
+ A JSON encoded value of the filters (a `map[string][]string`) to
+ process on the configs list.
+
+ Available filters:
- `id=<config id>`
- `label=<key> or label=<key>=value`
@@ -10345,10 +11276,15 @@ paths:
in: "body"
schema:
$ref: "#/definitions/ConfigSpec"
- description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values."
+ description: |
+ The spec of the config to update. Currently, only the Labels field
+ can be updated. All other fields must remain unchanged from the
+ [ConfigInspect endpoint](#operation/ConfigInspect) response values.
- name: "version"
in: "query"
- description: "The version number of the config object being updated. This is required to avoid conflicting writes."
+ description: |
+ The version number of the config object being updated. This is
+ required to avoid conflicting writes.
type: "integer"
format: "int64"
required: true
@@ -10356,7 +11292,8 @@ paths:
/distribution/{name}/json:
get:
summary: "Get image information from the registry"
- description: "Return image digest and platform information by contacting the registry."
+ description: |
+ Return image digest and platform information by contacting the registry.
operationId: "DistributionInspect"
produces:
- "application/json"
@@ -10371,7 +11308,8 @@ paths:
properties:
Descriptor:
type: "object"
- description: "A descriptor struct containing digest, media type, and size"
+ description: |
+ A descriptor struct containing digest, media type, and size.
properties:
MediaType:
type: "string"
@@ -10386,7 +11324,8 @@ paths:
type: "string"
Platforms:
type: "array"
- description: "An array containing all platforms supported by the image"
+ description: |
+ An array containing all platforms supported by the image.
items:
type: "object"
properties:
@@ -10445,11 +11384,13 @@ paths:
post:
summary: "Initialize interactive session"
description: |
- Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities.
+ Start a new interactive session with a server. Session allows server to
+ call back to the client for advanced capabilities.
### Hijacking
- This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection.
+ This endpoint hijacks the HTTP connection to HTTP2 transport that allows
+ the client to expose gPRC services on that connection.
For example, the client sends this request to upgrade the connection:
@@ -10459,7 +11400,8 @@ paths:
Connection: Upgrade
```
- The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream:
+ The Docker daemon responds with a `101 UPGRADED` response follow with
+ the raw stream:
```
HTTP/1.1 101 UPGRADED
diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go
index 54cb236ef..9c464b73e 100644
--- a/vendor/github.com/docker/docker/api/types/client.go
+++ b/vendor/github.com/docker/docker/api/types/client.go
@@ -50,7 +50,7 @@ type ContainerCommitOptions struct {
// ContainerExecInspect holds information returned by exec inspect.
type ContainerExecInspect struct {
- ExecID string
+ ExecID string `json:"ID"`
ContainerID string
Running bool
ExitCode int
@@ -205,7 +205,7 @@ const (
// BuilderV1 is the first generation builder in docker daemon
BuilderV1 BuilderVersion = "1"
// BuilderBuildKit is builder based on moby/buildkit project
- BuilderBuildKit = "2"
+ BuilderBuildKit BuilderVersion = "2"
)
// ImageBuildResponse holds information
diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go
index 178e911a7..3dd133a3a 100644
--- a/vendor/github.com/docker/docker/api/types/configs.go
+++ b/vendor/github.com/docker/docker/api/types/configs.go
@@ -3,6 +3,7 @@ package types // import "github.com/docker/docker/api/types"
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// configs holds structs used for internal communication between the
@@ -15,6 +16,7 @@ type ContainerCreateConfig struct {
Config *container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
+ Platform *specs.Platform
AdjustCPUShares bool
}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go
index f0ee9dde7..63381da36 100644
--- a/vendor/github.com/docker/docker/api/types/container/container_top.go
+++ b/vendor/github.com/docker/docker/api/types/container/container_top.go
@@ -10,7 +10,9 @@ package container // import "github.com/docker/docker/api/types/container"
// swagger:model ContainerTopOKBody
type ContainerTopOKBody struct {
- // Each process running in the container, where each is process is an array of values corresponding to the titles
+ // Each process running in the container, where each is process
+ // is an array of values corresponding to the titles.
+ //
// Required: true
Processes [][]string `json:"Processes"`
diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go
index b8a4b3aa6..2d1cbaa9a 100644
--- a/vendor/github.com/docker/docker/api/types/container/host_config.go
+++ b/vendor/github.com/docker/docker/api/types/container/host_config.go
@@ -361,7 +361,7 @@ type Resources struct {
Devices []DeviceMapping // List of devices to map inside the container
DeviceCgroupRules []string // List of rule to be added to the device cgroup
DeviceRequests []DeviceRequest // List of device requests for device drivers
- KernelMemory int64 // Kernel memory limit (in bytes)
+ KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes
KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
MemoryReservation int64 // Memory soft limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
@@ -403,7 +403,6 @@ type HostConfig struct {
// Applicable to UNIX platforms
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
- Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set)
CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container
DNS []string `json:"Dns"` // List of DNS server to lookup
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go
index 027c6edb7..aa8fba815 100644
--- a/vendor/github.com/docker/docker/api/types/events/events.go
+++ b/vendor/github.com/docker/docker/api/types/events/events.go
@@ -1,6 +1,8 @@
package events // import "github.com/docker/docker/api/types/events"
const (
+ // BuilderEventType is the event type that the builder generates
+ BuilderEventType = "builder"
// ContainerEventType is the event type that containers generate
ContainerEventType = "container"
// DaemonEventType is the event type that daemon generate
diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go
index 7927dbfff..437b184c6 100644
--- a/vendor/github.com/docker/docker/api/types/network/network.go
+++ b/vendor/github.com/docker/docker/api/types/network/network.go
@@ -1,7 +1,6 @@
package network // import "github.com/docker/docker/api/types/network"
import (
"github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/errdefs"
)
// Address represents an IP address
@@ -123,5 +122,5 @@ var acceptedFilters = map[string]bool{
// ValidateFilters validates the list of filter args with the available filters.
func ValidateFilters(filter filters.Args) error {
- return errdefs.InvalidParameter(filter.Validate(acceptedFilters))
+ return filter.Validate(acceptedFilters)
}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go
index 5bbedfcf6..af5e1c0bc 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/container.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/container.go
@@ -5,6 +5,7 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
+ "github.com/docker/go-units"
)
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
@@ -67,11 +68,13 @@ type ContainerSpec struct {
// The format of extra hosts on swarmkit is specified in:
// http://man7.org/linux/man-pages/man5/hosts.5.html
// IP_address canonical_hostname [aliases...]
- Hosts []string `json:",omitempty"`
- DNSConfig *DNSConfig `json:",omitempty"`
- Secrets []*SecretReference `json:",omitempty"`
- Configs []*ConfigReference `json:",omitempty"`
- Isolation container.Isolation `json:",omitempty"`
- Sysctls map[string]string `json:",omitempty"`
- Capabilities []string `json:",omitempty"`
+ Hosts []string `json:",omitempty"`
+ DNSConfig *DNSConfig `json:",omitempty"`
+ Secrets []*SecretReference `json:",omitempty"`
+ Configs []*ConfigReference `json:",omitempty"`
+ Isolation container.Isolation `json:",omitempty"`
+ Sysctls map[string]string `json:",omitempty"`
+ CapabilityAdd []string `json:",omitempty"`
+ CapabilityDrop []string `json:",omitempty"`
+ Ulimits []*units.Ulimit `json:",omitempty"`
}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go
index 6b59711ab..6eb452d24 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/service.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/service.go
@@ -17,6 +17,10 @@ type Service struct {
// listing all tasks for a service, an operation that could be
// computation and network expensive.
ServiceStatus *ServiceStatus `json:",omitempty"`
+
+ // JobStatus is the status of a Service which is in one of ReplicatedJob or
+ // GlobalJob modes. It is absent on Replicated and Global services.
+ JobStatus *JobStatus `json:",omitempty"`
}
// ServiceSpec represents the spec of a service.
@@ -39,8 +43,10 @@ type ServiceSpec struct {
// ServiceMode represents the mode of a service.
type ServiceMode struct {
- Replicated *ReplicatedService `json:",omitempty"`
- Global *GlobalService `json:",omitempty"`
+ Replicated *ReplicatedService `json:",omitempty"`
+ Global *GlobalService `json:",omitempty"`
+ ReplicatedJob *ReplicatedJob `json:",omitempty"`
+ GlobalJob *GlobalJob `json:",omitempty"`
}
// UpdateState is the state of a service update.
@@ -77,6 +83,32 @@ type ReplicatedService struct {
// GlobalService is a kind of ServiceMode.
type GlobalService struct{}
+// ReplicatedJob is the a type of Service which executes a defined Tasks
+// in parallel until the specified number of Tasks have succeeded.
+type ReplicatedJob struct {
+ // MaxConcurrent indicates the maximum number of Tasks that should be
+ // executing simultaneously for this job at any given time. There may be
+ // fewer Tasks that MaxConcurrent executing simultaneously; for example, if
+ // there are fewer than MaxConcurrent tasks needed to reach
+ // TotalCompletions.
+ //
+ // If this field is empty, it will default to a max concurrency of 1.
+ MaxConcurrent *uint64 `json:",omitempty"`
+
+ // TotalCompletions is the total number of Tasks desired to run to
+ // completion.
+ //
+ // If this field is empty, the value of MaxConcurrent will be used.
+ TotalCompletions *uint64 `json:",omitempty"`
+}
+
+// GlobalJob is the type of a Service which executes a Task on every Node
+// matching the Service's placement constraints. These tasks run to completion
+// and then exit.
+//
+// This type is deliberately empty.
+type GlobalJob struct{}
+
const (
// UpdateFailureActionPause PAUSE
UpdateFailureActionPause = "pause"
@@ -142,4 +174,29 @@ type ServiceStatus struct {
// services, this is computed by taking the number of tasks with desired
// state of not-Shutdown.
DesiredTasks uint64
+
+ // CompletedTasks is the number of tasks in the state Completed, if this
+ // service is in ReplicatedJob or GlobalJob mode. This field must be
+ // cross-referenced with the service type, because the default value of 0
+ // may mean that a service is not in a job mode, or it may mean that the
+ // job has yet to complete any tasks.
+ CompletedTasks uint64
+}
+
+// JobStatus is the status of a job-type service.
+type JobStatus struct {
+ // JobIteration is a value increased each time a Job is executed,
+ // successfully or otherwise. "Executed", in this case, means the job as a
+ // whole has been started, not that an individual Task has been launched. A
+ // job is "Executed" when its ServiceSpec is updated. JobIteration can be
+ // used to disambiguate Tasks belonging to different executions of a job.
+ //
+ // Though JobIteration will increase with each subsequent execution, it may
+ // not necessarily increase by 1, and so JobIteration should not be used to
+ // keep track of the number of times a job has been executed.
+ JobIteration Version
+
+ // LastExecution is the time that the job was last executed, as observed by
+ // Swarm manager.
+ LastExecution time.Time `json:",omitempty"`
}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go
index d5a57df5d..a6f7ab7b5 100644
--- a/vendor/github.com/docker/docker/api/types/swarm/task.go
+++ b/vendor/github.com/docker/docker/api/types/swarm/task.go
@@ -56,6 +56,12 @@ type Task struct {
DesiredState TaskState `json:",omitempty"`
NetworksAttachments []NetworkAttachment `json:",omitempty"`
GenericResources []GenericResource `json:",omitempty"`
+
+ // JobIteration is the JobIteration of the Service that this Task was
+ // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is
+ // used to determine which Tasks belong to which run of the job. This field
+ // is absent if the Service mode is Replicated or Global.
+ JobIteration *Version `json:",omitempty"`
}
// TaskSpec represents the spec of a task.
@@ -85,13 +91,21 @@ type TaskSpec struct {
Runtime RuntimeType `json:",omitempty"`
}
-// Resources represents resources (CPU/Memory).
+// Resources represents resources (CPU/Memory) which can be advertised by a
+// node and requested to be reserved for a task.
type Resources struct {
NanoCPUs int64 `json:",omitempty"`
MemoryBytes int64 `json:",omitempty"`
GenericResources []GenericResource `json:",omitempty"`
}
+// Limit describes limits on resources which can be requested by a task.
+type Limit struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+ Pids int64 `json:",omitempty"`
+}
+
// GenericResource represents a "user defined" resource which can
// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
type GenericResource struct {
@@ -119,7 +133,7 @@ type DiscreteGenericResource struct {
// ResourceRequirements represents resources requirements.
type ResourceRequirements struct {
- Limits *Resources `json:",omitempty"`
+ Limits *Limit `json:",omitempty"`
Reservations *Resources `json:",omitempty"`
}
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
index 4cf9a95ff..e3a159912 100644
--- a/vendor/github.com/docker/docker/api/types/types.go
+++ b/vendor/github.com/docker/docker/api/types/types.go
@@ -154,11 +154,11 @@ type Info struct {
Images int
Driver string
DriverStatus [][2]string
- SystemStatus [][2]string
+ SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API
Plugins PluginsInfo
MemoryLimit bool
SwapLimit bool
- KernelMemory bool
+ KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes
KernelMemoryTCP bool
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
CPUCfsQuota bool `json:"CpuCfsQuota"`
@@ -175,6 +175,7 @@ type Info struct {
SystemTime string
LoggingDriver string
CgroupDriver string
+ CgroupVersion string `json:",omitempty"`
NEventsListener int
KernelVersion string
OperatingSystem string
@@ -194,23 +195,24 @@ type Info struct {
Labels []string
ExperimentalBuild bool
ServerVersion string
- ClusterStore string
- ClusterAdvertise string
+ ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
+ ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
Runtimes map[string]Runtime
DefaultRuntime string
Swarm swarm.Info
// LiveRestoreEnabled determines whether containers should be kept
// running when the daemon is shutdown or upon daemon start if
// running containers are detected
- LiveRestoreEnabled bool
- Isolation container.Isolation
- InitBinary string
- ContainerdCommit Commit
- RuncCommit Commit
- InitCommit Commit
- SecurityOptions []string
- ProductLicense string `json:",omitempty"`
- Warnings []string
+ LiveRestoreEnabled bool
+ Isolation container.Isolation
+ InitBinary string
+ ContainerdCommit Commit
+ RuncCommit Commit
+ InitCommit Commit
+ SecurityOptions []string
+ ProductLicense string `json:",omitempty"`
+ DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
+ Warnings []string
}
// KeyValue holds a key/value pair
@@ -218,6 +220,12 @@ type KeyValue struct {
Key, Value string
}
+// NetworkAddressPool is a temp struct used by Info struct
+type NetworkAddressPool struct {
+ Base string
+ Size int
+}
+
// SecurityOpt contains the name and options of a security option
type SecurityOpt struct {
Name string
@@ -318,7 +326,7 @@ type ContainerState struct {
}
// ContainerNode stores information about the node that a container
-// is running on. It's only available in Docker Swarm
+// is running on. It's only used by the Docker Swarm standalone API
type ContainerNode struct {
ID string
IPAddress string `json:"IP"`
@@ -342,7 +350,7 @@ type ContainerJSONBase struct {
HostnamePath string
HostsPath string
LogPath string
- Node *ContainerNode `json:",omitempty"`
+ Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API
Name string
RestartCount int
Driver string
@@ -510,6 +518,16 @@ type Checkpoint struct {
type Runtime struct {
Path string `json:"path"`
Args []string `json:"runtimeArgs,omitempty"`
+
+ // This is exposed here only for internal use
+ // It is not currently supported to specify custom shim configs
+ Shim *ShimConfig `json:"-"`
+}
+
+// ShimConfig is used by runtime to configure containerd shims
+type ShimConfig struct {
+ Binary string
+ Opts interface{}
}
// DiskUsage contains response of Engine API:
diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go
index b5ee96a50..c69b08448 100644
--- a/vendor/github.com/docker/docker/api/types/volume.go
+++ b/vendor/github.com/docker/docker/api/types/volume.go
@@ -27,10 +27,13 @@ type Volume struct {
Name string `json:"Name"`
// The driver specific options used when creating the volume.
+ //
// Required: true
Options map[string]string `json:"Options"`
- // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
+ // The level at which the volume exists. Either `global` for cluster-wide,
+ // or `local` for machine level.
+ //
// Required: true
Scope string `json:"Scope"`
diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go
index 0d4f46a84..8538078dd 100644
--- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go
+++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go
@@ -14,7 +14,9 @@ type VolumeCreateBody struct {
// Required: true
Driver string `json:"Driver"`
- // A mapping of driver options and values. These options are passed directly to the driver and are driver specific.
+ // A mapping of driver options and values. These options are
+ // passed directly to the driver and are driver specific.
+ //
// Required: true
DriverOpts map[string]string `json:"DriverOpts"`
@@ -23,6 +25,7 @@ type VolumeCreateBody struct {
Labels map[string]string `json:"Labels"`
// The new volume's name. If not specified, Docker generates a name.
+ //
// Required: true
Name string `json:"Name"`
}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go
index 8e685d51c..be06179bf 100644
--- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go
+++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go
@@ -16,7 +16,8 @@ type VolumeListOKBody struct {
// Required: true
Volumes []*types.Volume `json:"Volumes"`
- // Warnings that occurred when fetching the list of volumes
+ // Warnings that occurred when fetching the list of volumes.
+ //
// Required: true
Warnings []string `json:"Warnings"`
}
diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go
index 23c2e1e34..9d0f0dcbf 100644
--- a/vendor/github.com/docker/docker/client/client_unix.go
+++ b/vendor/github.com/docker/docker/client/client_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd openbsd darwin solaris illumos
+// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly
package client // import "github.com/docker/docker/client"
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
index 5b795e0c1..b1d5fea5b 100644
--- a/vendor/github.com/docker/docker/client/container_create.go
+++ b/vendor/github.com/docker/docker/client/container_create.go
@@ -5,20 +5,23 @@ import (
"encoding/json"
"net/url"
+ "github.com/containerd/containerd/platforms"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/versions"
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
)
type configWrapper struct {
*container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
+ Platform *specs.Platform
}
// ContainerCreate creates a new container based in the given configuration.
// It can be associated with a name, but it's not mandatory.
-func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) {
+func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) {
var response container.ContainerCreateCreatedBody
if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
@@ -30,7 +33,15 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
hostConfig.AutoRemove = false
}
+ if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil {
+ return response, err
+ }
+
query := url.Values{}
+ if platform != nil {
+ query.Set("platform", platforms.Format(*platform))
+ }
+
if containerName != "" {
query.Set("name", containerName)
}
diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go
index 6ef44c774..0a6488dde 100644
--- a/vendor/github.com/docker/docker/client/container_stats.go
+++ b/vendor/github.com/docker/docker/client/container_stats.go
@@ -24,3 +24,19 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea
osType := getDockerOS(resp.header.Get("Server"))
return types.ContainerStats{Body: resp.body, OSType: osType}, err
}
+
+// ContainerStatsOneShot gets a single stat entry from a container.
+// It differs from `ContainerStats` in that the API should not wait to prime the stats
+func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) {
+ query := url.Values{}
+ query.Set("stream", "0")
+ query.Set("one-shot", "1")
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
+ if err != nil {
+ return types.ContainerStats{}, err
+ }
+
+ osType := getDockerOS(resp.header.Get("Server"))
+ return types.ContainerStats{Body: resp.body, OSType: osType}, err
+}
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
index 001c10288..041bc8d49 100644
--- a/vendor/github.com/docker/docker/client/errors.go
+++ b/vendor/github.com/docker/docker/client/errors.go
@@ -24,8 +24,7 @@ func (err errConnectionFailed) Error() string {
// IsErrConnectionFailed returns true if the error is caused by connection failed.
func IsErrConnectionFailed(err error) bool {
- _, ok := errors.Cause(err).(errConnectionFailed)
- return ok
+ return errors.As(err, &errConnectionFailed{})
}
// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
@@ -42,8 +41,9 @@ type notFound interface {
// IsErrNotFound returns true if the error is a NotFound error, which is returned
// by the API when some object is not found.
func IsErrNotFound(err error) bool {
- if _, ok := err.(notFound); ok {
- return ok
+ var e notFound
+ if errors.As(err, &e) {
+ return true
}
return errdefs.IsNotFound(err)
}
diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go
index cde64be4b..aabad4a91 100644
--- a/vendor/github.com/docker/docker/client/interface.go
+++ b/vendor/github.com/docker/docker/client/interface.go
@@ -16,6 +16,7 @@ import (
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
volumetypes "github.com/docker/docker/api/types/volume"
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
@@ -47,7 +48,7 @@ type CommonAPIClient interface {
type ContainerAPIClient interface {
ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error)
- ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error)
+ ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error)
ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error)
ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error)
ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error)
@@ -67,6 +68,7 @@ type ContainerAPIClient interface {
ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error)
+ ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error)
ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error)
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
index 1cfc48a25..a9af001ef 100644
--- a/vendor/github.com/docker/docker/client/ping.go
+++ b/vendor/github.com/docker/docker/client/ping.go
@@ -17,7 +17,7 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
var ping types.Ping
// Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest()
- // because ping requests are used during API version negotiation, so we want
+ // because ping requests are used during API version negotiation, so we want
// to hit the non-versioned /_ping endpoint, not /v1.xx/_ping
req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil)
if err != nil {
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
index ee15a46ed..813eac2c9 100644
--- a/vendor/github.com/docker/docker/client/request.go
+++ b/vendor/github.com/docker/docker/client/request.go
@@ -134,8 +134,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
// Don't decorate context sentinel errors; users may be comparing to
// them directly.
- switch err {
- case context.Canceled, context.DeadlineExceeded:
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return serverResp, err
}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
index 56bfe55b7..e0428bf98 100644
--- a/vendor/github.com/docker/docker/client/service_create.go
+++ b/vendor/github.com/docker/docker/client/service_create.go
@@ -15,8 +15,7 @@ import (
// ServiceCreate creates a new Service.
func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
- var distErr error
-
+ var response types.ServiceCreateResponse
headers := map[string][]string{
"version": {cli.version},
}
@@ -31,46 +30,28 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
}
if err := validateServiceSpec(service); err != nil {
- return types.ServiceCreateResponse{}, err
+ return response, err
}
// ensure that the image is tagged
- var imgPlatforms []swarm.Platform
- if service.TaskTemplate.ContainerSpec != nil {
+ var resolveWarning string
+ switch {
+ case service.TaskTemplate.ContainerSpec != nil:
if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
service.TaskTemplate.ContainerSpec.Image = taggedImg
}
if options.QueryRegistry {
- var img string
- img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
- if img != "" {
- service.TaskTemplate.ContainerSpec.Image = img
- }
+ resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
}
- }
-
- // ensure that the image is tagged
- if service.TaskTemplate.PluginSpec != nil {
+ case service.TaskTemplate.PluginSpec != nil:
if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
service.TaskTemplate.PluginSpec.Remote = taggedImg
}
if options.QueryRegistry {
- var img string
- img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
- if img != "" {
- service.TaskTemplate.PluginSpec.Remote = img
- }
+ resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
}
}
- if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
- service.TaskTemplate.Placement = &swarm.Placement{}
- }
- if len(imgPlatforms) > 0 {
- service.TaskTemplate.Placement.Platforms = imgPlatforms
- }
-
- var response types.ServiceCreateResponse
resp, err := cli.post(ctx, "/services/create", nil, service, headers)
defer ensureReaderClosed(resp)
if err != nil {
@@ -78,14 +59,45 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
}
err = json.NewDecoder(resp.body).Decode(&response)
-
- if distErr != nil {
- response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image))
+ if resolveWarning != "" {
+ response.Warnings = append(response.Warnings, resolveWarning)
}
return response, err
}
+func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string {
+ var warning string
+ if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil {
+ warning = digestWarning(taskSpec.ContainerSpec.Image)
+ } else {
+ taskSpec.ContainerSpec.Image = img
+ if len(imgPlatforms) > 0 {
+ if taskSpec.Placement == nil {
+ taskSpec.Placement = &swarm.Placement{}
+ }
+ taskSpec.Placement.Platforms = imgPlatforms
+ }
+ }
+ return warning
+}
+
+func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string {
+ var warning string
+ if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil {
+ warning = digestWarning(taskSpec.PluginSpec.Remote)
+ } else {
+ taskSpec.PluginSpec.Remote = img
+ if len(imgPlatforms) > 0 {
+ if taskSpec.Placement == nil {
+ taskSpec.Placement = &swarm.Placement{}
+ }
+ taskSpec.Placement.Platforms = imgPlatforms
+ }
+ }
+ return warning
+}
+
func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
var platforms []swarm.Platform
@@ -119,7 +131,7 @@ func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, ima
// imageWithDigestString takes an image string and a digest, and updates
// the image string if it didn't originally contain a digest. It returns
-// an empty string if there are no updates.
+// image unmodified in other situations.
func imageWithDigestString(image string, dgst digest.Digest) string {
namedRef, err := reference.ParseNormalizedNamed(image)
if err == nil {
@@ -131,7 +143,7 @@ func imageWithDigestString(image string, dgst digest.Digest) string {
}
}
}
- return ""
+ return image
}
// imageWithTagString takes an image string, and returns a tagged image
diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go
index cd0f59e21..c63895f74 100644
--- a/vendor/github.com/docker/docker/client/service_update.go
+++ b/vendor/github.com/docker/docker/client/service_update.go
@@ -15,8 +15,8 @@ import (
// of swarm.Service, which can be found using ServiceInspectWithRaw.
func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) {
var (
- query = url.Values{}
- distErr error
+ query = url.Values{}
+ response = types.ServiceUpdateResponse{}
)
headers := map[string][]string{
@@ -38,46 +38,28 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version
query.Set("version", strconv.FormatUint(version.Index, 10))
if err := validateServiceSpec(service); err != nil {
- return types.ServiceUpdateResponse{}, err
+ return response, err
}
- var imgPlatforms []swarm.Platform
// ensure that the image is tagged
- if service.TaskTemplate.ContainerSpec != nil {
+ var resolveWarning string
+ switch {
+ case service.TaskTemplate.ContainerSpec != nil:
if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
service.TaskTemplate.ContainerSpec.Image = taggedImg
}
if options.QueryRegistry {
- var img string
- img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
- if img != "" {
- service.TaskTemplate.ContainerSpec.Image = img
- }
+ resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
}
- }
-
- // ensure that the image is tagged
- if service.TaskTemplate.PluginSpec != nil {
+ case service.TaskTemplate.PluginSpec != nil:
if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
service.TaskTemplate.PluginSpec.Remote = taggedImg
}
if options.QueryRegistry {
- var img string
- img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
- if img != "" {
- service.TaskTemplate.PluginSpec.Remote = img
- }
+ resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
}
}
- if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
- service.TaskTemplate.Placement = &swarm.Placement{}
- }
- if len(imgPlatforms) > 0 {
- service.TaskTemplate.Placement.Platforms = imgPlatforms
- }
-
- var response types.ServiceUpdateResponse
resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
defer ensureReaderClosed(resp)
if err != nil {
@@ -85,9 +67,8 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version
}
err = json.NewDecoder(resp.body).Decode(&response)
-
- if distErr != nil {
- response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image))
+ if resolveWarning != "" {
+ response.Warnings = append(response.Warnings, resolveWarning)
}
return response, err
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
index 86f5c02b7..eeed67472 100644
--- a/vendor/github.com/docker/docker/pkg/archive/archive.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -27,17 +27,6 @@ import (
"github.com/sirupsen/logrus"
)
-var unpigzPath string
-
-func init() {
- if path, err := exec.LookPath("unpigz"); err != nil {
- logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library")
- } else {
- logrus.Debugf("Using unpigz binary found at path %s", path)
- unpigzPath = path
- }
-}
-
type (
// Compression is the state represents if compressed or not.
Compression int
@@ -158,19 +147,30 @@ func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error)
}
func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
- if unpigzPath == "" {
+ noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
+ var noPigz bool
+
+ if noPigzEnv != "" {
+ var err error
+ noPigz, err = strconv.ParseBool(noPigzEnv)
+ if err != nil {
+ logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
+ }
+ }
+
+ if noPigz {
+ logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
return gzip.NewReader(buf)
}
- disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
- if disablePigzEnv != "" {
- if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil {
- return nil, err
- } else if disablePigz {
- return gzip.NewReader(buf)
- }
+ unpigzPath, err := exec.LookPath("unpigz")
+ if err != nil {
+ logrus.Debugf("unpigz binary not found, falling back to go gzip library")
+ return gzip.NewReader(buf)
}
+ logrus.Debugf("Using %s to decompress", unpigzPath)
+
return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
index 0601f7b0d..f7888e659 100644
--- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -11,6 +11,7 @@ import (
"github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/system"
+ "github.com/moby/sys/mount"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -151,10 +152,11 @@ func mknodChar0Overlay(cleansedOriginalPath string) error {
if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil {
return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy)
}
- mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
- // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
- if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
- return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
+ // lowerdir needs ":" to be escaped: https://github.com/moby/moby/issues/40939#issuecomment-627098286
+ lowerEscaped := strings.ReplaceAll(lower, ":", "\\:")
+ mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerEscaped, upper, work)
+ if err := mount.Mount("overlay", merged, "overlay", mOpts); err != nil {
+ return err
}
mergedDummy := filepath.Join(merged, dummyBase)
if err := os.Remove(mergedDummy); err != nil {
@@ -236,10 +238,11 @@ func createDirWithOverlayOpaque(tmp string) (string, error) {
if err := os.MkdirAll(lowerDummy, 0700); err != nil {
return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy)
}
- mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
- // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
- if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
- return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
+ // lowerdir needs ":" to be escaped: https://github.com/moby/moby/issues/40939#issuecomment-627098286
+ lowerEscaped := strings.ReplaceAll(lower, ":", "\\:")
+ mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerEscaped, upper, work)
+ if err := mount.Mount("overlay", merged, "overlay", mOpts); err != nil {
+ return "", err
}
mergedDummy := filepath.Join(merged, dummyBase)
if err := os.Remove(mergedDummy); err != nil {
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
index d62633603..900661423 100644
--- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -10,9 +10,9 @@ import (
"strings"
"syscall"
+ "github.com/containerd/containerd/sys"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/system"
- rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
@@ -81,7 +81,7 @@ func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- if rsystem.RunningInUserNS() {
+ if sys.RunningInUserNS() {
// cannot create a device if running in user namespace
return nil
}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
index b3af7a422..7569ac15d 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -114,31 +114,6 @@ type IdentityMapping struct {
gids []IDMap
}
-// NewIdentityMapping takes a requested user and group name and
-// using the data from /etc/sub{uid,gid} ranges, creates the
-// proper uid and gid remapping ranges for that user/group pair
-func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) {
- subuidRanges, err := parseSubuid(username)
- if err != nil {
- return nil, err
- }
- subgidRanges, err := parseSubgid(groupname)
- if err != nil {
- return nil, err
- }
- if len(subuidRanges) == 0 {
- return nil, fmt.Errorf("No subuid ranges found for user %q", username)
- }
- if len(subgidRanges) == 0 {
- return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
- }
-
- return &IdentityMapping{
- uids: createIDMap(subuidRanges),
- gids: createIDMap(subgidRanges),
- }, nil
-}
-
// NewIDMappingsFromMaps creates a new mapping from two slices
// Deprecated: this is a temporary shim while transitioning to IDMapping
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping {
@@ -236,10 +211,6 @@ func parseSubidFile(path, username string) (ranges, error) {
s := bufio.NewScanner(subidFile)
for s.Scan() {
- if err := s.Err(); err != nil {
- return rangeList, err
- }
-
text := strings.TrimSpace(s.Text())
if text == "" || strings.HasPrefix(text, "#") {
continue
@@ -260,5 +231,6 @@ func parseSubidFile(path, username string) (ranges, error) {
rangeList = append(rangeList, subIDRange{startid, length})
}
}
- return rangeList, nil
+
+ return rangeList, s.Err()
}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
index 3981ff64d..5defe6459 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -8,12 +8,13 @@ import (
"io"
"os"
"path/filepath"
- "strings"
+ "strconv"
"sync"
"syscall"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/runc/libcontainer/user"
+ "github.com/pkg/errors"
)
var (
@@ -105,14 +106,14 @@ func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
-func LookupUser(username string) (user.User, error) {
+func LookupUser(name string) (user.User, error) {
// first try a local system files lookup using existing capabilities
- usr, err := user.LookupUser(username)
+ usr, err := user.LookupUser(name)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
- usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
+ usr, err = getentUser(name)
if err != nil {
return user.User{}, err
}
@@ -128,11 +129,11 @@ func LookupUID(uid int) (user.User, error) {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
- return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
+ return getentUser(strconv.Itoa(uid))
}
-func getentUser(args string) (user.User, error) {
- reader, err := callGetent(args)
+func getentUser(name string) (user.User, error) {
+ reader, err := callGetent("passwd", name)
if err != nil {
return user.User{}, err
}
@@ -141,21 +142,21 @@ func getentUser(args string) (user.User, error) {
return user.User{}, err
}
if len(users) == 0 {
- return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
+ return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name)
}
return users[0], nil
}
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
-func LookupGroup(groupname string) (user.Group, error) {
+func LookupGroup(name string) (user.Group, error) {
// first try a local system files lookup using existing capabilities
- group, err := user.LookupGroup(groupname)
+ group, err := user.LookupGroup(name)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
- return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
+ return getentGroup(name)
}
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
@@ -167,11 +168,11 @@ func LookupGID(gid int) (user.Group, error) {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
- return getentGroup(fmt.Sprintf("%s %d", "group", gid))
+ return getentGroup(strconv.Itoa(gid))
}
-func getentGroup(args string) (user.Group, error) {
- reader, err := callGetent(args)
+func getentGroup(name string) (user.Group, error) {
+ reader, err := callGetent("group", name)
if err != nil {
return user.Group{}, err
}
@@ -180,18 +181,18 @@ func getentGroup(args string) (user.Group, error) {
return user.Group{}, err
}
if len(groups) == 0 {
- return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
+ return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name)
}
return groups[0], nil
}
-func callGetent(args string) (io.Reader, error) {
+func callGetent(database, key string) (io.Reader, error) {
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
// if no `getent` command on host, can't do anything else
if getentCmd == "" {
- return nil, fmt.Errorf("")
+ return nil, fmt.Errorf("unable to find getent command")
}
- out, err := execCmd(getentCmd, args)
+ out, err := execCmd(getentCmd, database, key)
if err != nil {
exitCode, errC := system.GetExitCode(err)
if errC != nil {
@@ -201,8 +202,7 @@ func callGetent(args string) (io.Reader, error) {
case 1:
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
case 2:
- terms := strings.Split(args, " ")
- return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
+ return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database)
case 3:
return nil, fmt.Errorf("getent database doesn't support enumeration")
default:
@@ -229,3 +229,48 @@ func lazyChown(p string, uid, gid int, stat *system.StatT) error {
}
return os.Chown(p, uid, gid)
}
+
+// NewIdentityMapping takes a requested username and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func NewIdentityMapping(name string) (*IdentityMapping, error) {
+ usr, err := LookupUser(name)
+ if err != nil {
+ return nil, fmt.Errorf("Could not get user for username %s: %v", name, err)
+ }
+
+ uid := strconv.Itoa(usr.Uid)
+
+ subuidRangesWithUserName, err := parseSubuid(name)
+ if err != nil {
+ return nil, err
+ }
+ subgidRangesWithUserName, err := parseSubgid(name)
+ if err != nil {
+ return nil, err
+ }
+
+ subuidRangesWithUID, err := parseSubuid(uid)
+ if err != nil {
+ return nil, err
+ }
+ subgidRangesWithUID, err := parseSubgid(uid)
+ if err != nil {
+ return nil, err
+ }
+
+ subuidRanges := append(subuidRangesWithUserName, subuidRangesWithUID...)
+ subgidRanges := append(subgidRangesWithUserName, subgidRangesWithUID...)
+
+ if len(subuidRanges) == 0 {
+ return nil, errors.Errorf("no subuid ranges found for user %q", name)
+ }
+ if len(subgidRanges) == 0 {
+ return nil, errors.Errorf("no subgid ranges found for user %q", name)
+ }
+
+ return &IdentityMapping{
+ uids: createIDMap(subuidRanges),
+ gids: createIDMap(subgidRanges),
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
index 6272c5a40..bf7ae0564 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
@@ -17,18 +17,13 @@ import (
var (
once sync.Once
userCommand string
-
- cmdTemplates = map[string]string{
- "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
- "useradd": "-r -s /bin/false %s",
- "usermod": "-%s %d-%d %s",
- }
-
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+)
+
+const (
// default length for a UID/GID subordinate range
defaultRangeLen = 65536
defaultRangeStart = 100000
- userMod = "usermod"
)
// AddNamespaceRangesUser takes a username and uses the standard system
@@ -67,7 +62,7 @@ func AddNamespaceRangesUser(name string) (int, int, error) {
return uid, gid, nil
}
-func addUser(userName string) error {
+func addUser(name string) error {
once.Do(func() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
@@ -76,13 +71,18 @@ func addUser(userName string) error {
userCommand = "useradd"
}
})
- if userCommand == "" {
- return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ var args []string
+ switch userCommand {
+ case "adduser":
+ args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name}
+ case "useradd":
+ args = []string{"-r", "-s", "/bin/false", name}
+ default:
+ return fmt.Errorf("cannot add user; no useradd/adduser binary found")
}
- args := fmt.Sprintf(cmdTemplates[userCommand], userName)
- out, err := execCmd(userCommand, args)
- if err != nil {
- return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+
+ if out, err := execCmd(userCommand, args...); err != nil {
+ return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out))
}
return nil
}
@@ -101,7 +101,7 @@ func createSubordinateRanges(name string) error {
if err != nil {
return fmt.Errorf("Can't find available subuid range: %v", err)
}
- out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
+ out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name)
if err != nil {
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
}
@@ -117,7 +117,7 @@ func createSubordinateRanges(name string) error {
if err != nil {
return fmt.Errorf("Can't find available subgid range: %v", err)
}
- out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
+ out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name)
if err != nil {
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
index bcf6a4ffb..1e2d4a7a7 100644
--- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
+++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
@@ -6,7 +6,6 @@ import (
"fmt"
"os/exec"
"path/filepath"
- "strings"
)
func resolveBinary(binname string) (string, error) {
@@ -26,7 +25,7 @@ func resolveBinary(binname string) (string, error) {
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
-func execCmd(cmd, args string) ([]byte, error) {
- execCmd := exec.Command(cmd, strings.Split(args, " ")...)
+func execCmd(cmd string, arg ...string) ([]byte, error) {
+ execCmd := exec.Command(cmd, arg...)
return execCmd.CombinedOutput()
}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
index aa372c20c..cf8d04b1b 100644
--- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -7,8 +7,8 @@ import (
"strings"
"time"
- "github.com/docker/docker/pkg/term"
units "github.com/docker/go-units"
+ "github.com/moby/term"
"github.com/morikuni/aec"
)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go
deleted file mode 100644
index ffd473311..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/flags.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-import (
- "fmt"
- "strings"
-)
-
-var flags = map[string]struct {
- clear bool
- flag int
-}{
- "defaults": {false, 0},
- "ro": {false, RDONLY},
- "rw": {true, RDONLY},
- "suid": {true, NOSUID},
- "nosuid": {false, NOSUID},
- "dev": {true, NODEV},
- "nodev": {false, NODEV},
- "exec": {true, NOEXEC},
- "noexec": {false, NOEXEC},
- "sync": {false, SYNCHRONOUS},
- "async": {true, SYNCHRONOUS},
- "dirsync": {false, DIRSYNC},
- "remount": {false, REMOUNT},
- "mand": {false, MANDLOCK},
- "nomand": {true, MANDLOCK},
- "atime": {true, NOATIME},
- "noatime": {false, NOATIME},
- "diratime": {true, NODIRATIME},
- "nodiratime": {false, NODIRATIME},
- "bind": {false, BIND},
- "rbind": {false, RBIND},
- "unbindable": {false, UNBINDABLE},
- "runbindable": {false, RUNBINDABLE},
- "private": {false, PRIVATE},
- "rprivate": {false, RPRIVATE},
- "shared": {false, SHARED},
- "rshared": {false, RSHARED},
- "slave": {false, SLAVE},
- "rslave": {false, RSLAVE},
- "relatime": {false, RELATIME},
- "norelatime": {true, RELATIME},
- "strictatime": {false, STRICTATIME},
- "nostrictatime": {true, STRICTATIME},
-}
-
-var validFlags = map[string]bool{
- "": true,
- "size": true,
- "mode": true,
- "uid": true,
- "gid": true,
- "nr_inodes": true,
- "nr_blocks": true,
- "mpol": true,
-}
-
-var propagationFlags = map[string]bool{
- "bind": true,
- "rbind": true,
- "unbindable": true,
- "runbindable": true,
- "private": true,
- "rprivate": true,
- "shared": true,
- "rshared": true,
- "slave": true,
- "rslave": true,
-}
-
-// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
-func MergeTmpfsOptions(options []string) ([]string, error) {
- // We use collisions maps to remove duplicates.
- // For flag, the key is the flag value (the key for propagation flag is -1)
- // For data=value, the key is the data
- flagCollisions := map[int]bool{}
- dataCollisions := map[string]bool{}
-
- var newOptions []string
- // We process in reverse order
- for i := len(options) - 1; i >= 0; i-- {
- option := options[i]
- if option == "defaults" {
- continue
- }
- if f, ok := flags[option]; ok && f.flag != 0 {
- // There is only one propagation mode
- key := f.flag
- if propagationFlags[option] {
- key = -1
- }
- // Check to see if there is collision for flag
- if !flagCollisions[key] {
- // We prepend the option and add to collision map
- newOptions = append([]string{option}, newOptions...)
- flagCollisions[key] = true
- }
- continue
- }
- opt := strings.SplitN(option, "=", 2)
- if len(opt) != 2 || !validFlags[opt[0]] {
- return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
- }
- if !dataCollisions[opt[0]] {
- // We prepend the option and add to collision map
- newOptions = append([]string{option}, newOptions...)
- dataCollisions[opt[0]] = true
- }
- }
-
- return newOptions, nil
-}
-
-// Parse fstab type mount options into mount() flags
-// and device specific data
-func parseOptions(options string) (int, string) {
- var (
- flag int
- data []string
- )
-
- for _, o := range strings.Split(options, ",") {
- // If the option does not exist in the flags table or the flag
- // is not supported on the platform,
- // then it is a data value for a specific fs type
- if f, exists := flags[o]; exists && f.flag != 0 {
- if f.clear {
- flag &= ^f.flag
- } else {
- flag |= f.flag
- }
- } else {
- data = append(data, o)
- }
- }
- return flag, strings.Join(data, ",")
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
deleted file mode 100644
index ef35ef905..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// +build freebsd,cgo
-
-package mount // import "github.com/docker/docker/pkg/mount"
-
-/*
-#include <sys/mount.h>
-*/
-import "C"
-
-const (
- // RDONLY will mount the filesystem as read-only.
- RDONLY = C.MNT_RDONLY
-
- // NOSUID will not allow set-user-identifier or set-group-identifier bits to
- // take effect.
- NOSUID = C.MNT_NOSUID
-
- // NOEXEC will not allow execution of any binaries on the mounted file system.
- NOEXEC = C.MNT_NOEXEC
-
- // SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
- SYNCHRONOUS = C.MNT_SYNCHRONOUS
-
- // NOATIME will not update the file access time when reading from a file.
- NOATIME = C.MNT_NOATIME
-)
-
-// These flags are unsupported.
-const (
- BIND = 0
- DIRSYNC = 0
- MANDLOCK = 0
- NODEV = 0
- NODIRATIME = 0
- UNBINDABLE = 0
- RUNBINDABLE = 0
- PRIVATE = 0
- RPRIVATE = 0
- SHARED = 0
- RSHARED = 0
- SLAVE = 0
- RSLAVE = 0
- RBIND = 0
- RELATIVE = 0
- RELATIME = 0
- REMOUNT = 0
- STRICTATIME = 0
- mntDetach = 0
-)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
deleted file mode 100644
index a1b199a31..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-import (
- "golang.org/x/sys/unix"
-)
-
-const (
- // RDONLY will mount the file system read-only.
- RDONLY = unix.MS_RDONLY
-
- // NOSUID will not allow set-user-identifier or set-group-identifier bits to
- // take effect.
- NOSUID = unix.MS_NOSUID
-
- // NODEV will not interpret character or block special devices on the file
- // system.
- NODEV = unix.MS_NODEV
-
- // NOEXEC will not allow execution of any binaries on the mounted file system.
- NOEXEC = unix.MS_NOEXEC
-
- // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
- SYNCHRONOUS = unix.MS_SYNCHRONOUS
-
- // DIRSYNC will force all directory updates within the file system to be done
- // synchronously. This affects the following system calls: create, link,
- // unlink, symlink, mkdir, rmdir, mknod and rename.
- DIRSYNC = unix.MS_DIRSYNC
-
- // REMOUNT will attempt to remount an already-mounted file system. This is
- // commonly used to change the mount flags for a file system, especially to
- // make a readonly file system writeable. It does not change device or mount
- // point.
- REMOUNT = unix.MS_REMOUNT
-
- // MANDLOCK will force mandatory locks on a filesystem.
- MANDLOCK = unix.MS_MANDLOCK
-
- // NOATIME will not update the file access time when reading from a file.
- NOATIME = unix.MS_NOATIME
-
- // NODIRATIME will not update the directory access time.
- NODIRATIME = unix.MS_NODIRATIME
-
- // BIND remounts a subtree somewhere else.
- BIND = unix.MS_BIND
-
- // RBIND remounts a subtree and all possible submounts somewhere else.
- RBIND = unix.MS_BIND | unix.MS_REC
-
- // UNBINDABLE creates a mount which cannot be cloned through a bind operation.
- UNBINDABLE = unix.MS_UNBINDABLE
-
- // RUNBINDABLE marks the entire mount tree as UNBINDABLE.
- RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
-
- // PRIVATE creates a mount which carries no propagation abilities.
- PRIVATE = unix.MS_PRIVATE
-
- // RPRIVATE marks the entire mount tree as PRIVATE.
- RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
-
- // SLAVE creates a mount which receives propagation from its master, but not
- // vice versa.
- SLAVE = unix.MS_SLAVE
-
- // RSLAVE marks the entire mount tree as SLAVE.
- RSLAVE = unix.MS_SLAVE | unix.MS_REC
-
- // SHARED creates a mount which provides the ability to create mirrors of
- // that mount such that mounts and unmounts within any of the mirrors
- // propagate to the other mirrors.
- SHARED = unix.MS_SHARED
-
- // RSHARED marks the entire mount tree as SHARED.
- RSHARED = unix.MS_SHARED | unix.MS_REC
-
- // RELATIME updates inode access times relative to modify or change time.
- RELATIME = unix.MS_RELATIME
-
- // STRICTATIME allows to explicitly request full atime updates. This makes
- // it possible for the kernel to default to relatime or noatime but still
- // allow userspace to override it.
- STRICTATIME = unix.MS_STRICTATIME
-
- mntDetach = unix.MNT_DETACH
-)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
deleted file mode 100644
index cc6c47590..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// +build !linux,!freebsd freebsd,!cgo
-
-package mount // import "github.com/docker/docker/pkg/mount"
-
-// These flags are unsupported.
-const (
- BIND = 0
- DIRSYNC = 0
- MANDLOCK = 0
- NOATIME = 0
- NODEV = 0
- NODIRATIME = 0
- NOEXEC = 0
- NOSUID = 0
- UNBINDABLE = 0
- RUNBINDABLE = 0
- PRIVATE = 0
- RPRIVATE = 0
- SHARED = 0
- RSHARED = 0
- SLAVE = 0
- RSLAVE = 0
- RBIND = 0
- RELATIME = 0
- RELATIVE = 0
- REMOUNT = 0
- STRICTATIME = 0
- SYNCHRONOUS = 0
- RDONLY = 0
- mntDetach = 0
-)
diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go
deleted file mode 100644
index be0631c63..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mount.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-import (
- "sort"
- "strconv"
- "strings"
-
- "github.com/sirupsen/logrus"
-)
-
-// mountError records an error from mount or unmount operation
-type mountError struct {
- op string
- source, target string
- flags uintptr
- data string
- err error
-}
-
-func (e *mountError) Error() string {
- out := e.op + " "
-
- if e.source != "" {
- out += e.source + ":" + e.target
- } else {
- out += e.target
- }
-
- if e.flags != uintptr(0) {
- out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16)
- }
- if e.data != "" {
- out += ", data: " + e.data
- }
-
- out += ": " + e.err.Error()
- return out
-}
-
-// Cause returns the underlying cause of the error
-func (e *mountError) Cause() error {
- return e.err
-}
-
-// FilterFunc is a type defining a callback function
-// to filter out unwanted entries. It takes a pointer
-// to an Info struct (not fully populated, currently
-// only Mountpoint is filled in), and returns two booleans:
-// - skip: true if the entry should be skipped
-// - stop: true if parsing should be stopped after the entry
-type FilterFunc func(*Info) (skip, stop bool)
-
-// PrefixFilter discards all entries whose mount points
-// do not start with a prefix specified
-func PrefixFilter(prefix string) FilterFunc {
- return func(m *Info) (bool, bool) {
- skip := !strings.HasPrefix(m.Mountpoint, prefix)
- return skip, false
- }
-}
-
-// SingleEntryFilter looks for a specific entry
-func SingleEntryFilter(mp string) FilterFunc {
- return func(m *Info) (bool, bool) {
- if m.Mountpoint == mp {
- return false, true // don't skip, stop now
- }
- return true, false // skip, keep going
- }
-}
-
-// ParentsFilter returns all entries whose mount points
-// can be parents of a path specified, discarding others.
-// For example, given `/var/lib/docker/something`, entries
-// like `/var/lib/docker`, `/var` and `/` are returned.
-func ParentsFilter(path string) FilterFunc {
- return func(m *Info) (bool, bool) {
- skip := !strings.HasPrefix(path, m.Mountpoint)
- return skip, false
- }
-}
-
-// GetMounts retrieves a list of mounts for the current running process,
-// with an optional filter applied (use nil for no filter).
-func GetMounts(f FilterFunc) ([]*Info, error) {
- return parseMountTable(f)
-}
-
-// Mounted determines if a specified mountpoint has been mounted.
-// On Linux it looks at /proc/self/mountinfo.
-func Mounted(mountpoint string) (bool, error) {
- entries, err := GetMounts(SingleEntryFilter(mountpoint))
- if err != nil {
- return false, err
- }
-
- return len(entries) > 0, nil
-}
-
-// Mount will mount filesystem according to the specified configuration, on the
-// condition that the target path is *not* already mounted. Options must be
-// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
-// flags.go for supported option flags.
-func Mount(device, target, mType, options string) error {
- flag, data := parseOptions(options)
- if flag&REMOUNT != REMOUNT {
- if mounted, err := Mounted(target); err != nil || mounted {
- return err
- }
- }
- return mount(device, target, mType, uintptr(flag), data)
-}
-
-// ForceMount will mount a filesystem according to the specified configuration,
-// *regardless* if the target path is not already mounted. Options must be
-// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
-// flags.go for supported option flags.
-func ForceMount(device, target, mType, options string) error {
- flag, data := parseOptions(options)
- return mount(device, target, mType, uintptr(flag), data)
-}
-
-// Unmount lazily unmounts a filesystem on supported platforms, otherwise
-// does a normal unmount.
-func Unmount(target string) error {
- return unmount(target, mntDetach)
-}
-
-// RecursiveUnmount unmounts the target and all mounts underneath, starting with
-// the deepsest mount first.
-func RecursiveUnmount(target string) error {
- mounts, err := parseMountTable(PrefixFilter(target))
- if err != nil {
- return err
- }
-
- // Make the deepest mount be first
- sort.Slice(mounts, func(i, j int) bool {
- return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)
- })
-
- for i, m := range mounts {
- logrus.Debugf("Trying to unmount %s", m.Mountpoint)
- err = unmount(m.Mountpoint, mntDetach)
- if err != nil {
- if i == len(mounts)-1 { // last mount
- if mounted, e := Mounted(m.Mountpoint); e != nil || mounted {
- return err
- }
- } else {
- // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem
- logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint)
- }
- }
-
- logrus.Debugf("Unmounted %s", m.Mountpoint)
- }
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
deleted file mode 100644
index 09ad36060..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-/*
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/_iovec.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-*/
-import "C"
-
-import (
- "strings"
- "syscall"
- "unsafe"
-)
-
-func allocateIOVecs(options []string) []C.struct_iovec {
- out := make([]C.struct_iovec, len(options))
- for i, option := range options {
- out[i].iov_base = unsafe.Pointer(C.CString(option))
- out[i].iov_len = C.size_t(len(option) + 1)
- }
- return out
-}
-
-func mount(device, target, mType string, flag uintptr, data string) error {
- isNullFS := false
-
- xs := strings.Split(data, ",")
- for _, x := range xs {
- if x == "bind" {
- isNullFS = true
- }
- }
-
- options := []string{"fspath", target}
- if isNullFS {
- options = append(options, "fstype", "nullfs", "target", device)
- } else {
- options = append(options, "fstype", mType, "from", device)
- }
- rawOptions := allocateIOVecs(options)
- for _, rawOption := range rawOptions {
- defer C.free(rawOption.iov_base)
- }
-
- if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
- return &mountError{
- op: "mount",
- source: device,
- target: target,
- flags: flag,
- err: syscall.Errno(errno),
- }
- }
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
deleted file mode 100644
index a0a1ad236..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-import (
- "golang.org/x/sys/unix"
-)
-
-const (
- // ptypes is the set propagation types.
- ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
-
- // pflags is the full set valid flags for a change propagation call.
- pflags = ptypes | unix.MS_REC | unix.MS_SILENT
-
- // broflags is the combination of bind and read only
- broflags = unix.MS_BIND | unix.MS_RDONLY
-)
-
-// isremount returns true if either device name or flags identify a remount request, false otherwise.
-func isremount(device string, flags uintptr) bool {
- switch {
- // We treat device "" and "none" as a remount request to provide compatibility with
- // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
- case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
- return true
- default:
- return false
- }
-}
-
-func mount(device, target, mType string, flags uintptr, data string) error {
- oflags := flags &^ ptypes
- if !isremount(device, flags) || data != "" {
- // Initial call applying all non-propagation flags for mount
- // or remount with changed data
- if err := unix.Mount(device, target, mType, oflags, data); err != nil {
- return &mountError{
- op: "mount",
- source: device,
- target: target,
- flags: oflags,
- data: data,
- err: err,
- }
- }
- }
-
- if flags&ptypes != 0 {
- // Change the propagation type.
- if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
- return &mountError{
- op: "remount",
- target: target,
- flags: flags & pflags,
- err: err,
- }
- }
- }
-
- if oflags&broflags == broflags {
- // Remount the bind to apply read only.
- if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil {
- return &mountError{
- op: "remount-ro",
- target: target,
- flags: oflags | unix.MS_REMOUNT,
- err: err,
- }
-
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
deleted file mode 100644
index c3e5aec27..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !linux,!freebsd freebsd,!cgo
-
-package mount // import "github.com/docker/docker/pkg/mount"
-
-func mount(device, target, mType string, flag uintptr, data string) error {
- panic("Not implemented")
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
deleted file mode 100644
index ecd03fc02..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-// Info reveals information about a particular mounted filesystem. This
-// struct is populated from the content in the /proc/<pid>/mountinfo file.
-type Info struct {
- // ID is a unique identifier of the mount (may be reused after umount).
- ID int
-
- // Parent indicates the ID of the mount parent (or of self for the top of the
- // mount tree).
- Parent int
-
- // Major indicates one half of the device ID which identifies the device class.
- Major int
-
- // Minor indicates one half of the device ID which identifies a specific
- // instance of device.
- Minor int
-
- // Root of the mount within the filesystem.
- Root string
-
- // Mountpoint indicates the mount point relative to the process's root.
- Mountpoint string
-
- // Opts represents mount-specific options.
- Opts string
-
- // Optional represents optional fields.
- Optional string
-
- // Fstype indicates the type of filesystem, such as EXT3.
- Fstype string
-
- // Source indicates filesystem specific information or "none".
- Source string
-
- // VfsOpts represents per super block options.
- VfsOpts string
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
deleted file mode 100644
index 0af3959dc..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-/*
-#include <sys/param.h>
-#include <sys/ucred.h>
-#include <sys/mount.h>
-*/
-import "C"
-
-import (
- "fmt"
- "reflect"
- "unsafe"
-)
-
-// parseMountTable returns information about mounted filesystems
-func parseMountTable(filter FilterFunc) ([]*Info, error) {
- var rawEntries *C.struct_statfs
-
- count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
- if count == 0 {
- return nil, fmt.Errorf("Failed to call getmntinfo")
- }
-
- var entries []C.struct_statfs
- header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
- header.Cap = count
- header.Len = count
- header.Data = uintptr(unsafe.Pointer(rawEntries))
-
- var out []*Info
- for _, entry := range entries {
- var mountinfo Info
- var skip, stop bool
- mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
-
- if filter != nil {
- // filter out entries we're not interested in
- skip, stop = filter(&mountinfo)
- if skip {
- continue
- }
- }
-
- mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
- mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
-
- out = append(out, &mountinfo)
- if stop {
- break
- }
- }
- return out, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
deleted file mode 100644
index 58ca61f3f..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-
- "github.com/pkg/errors"
-)
-
-func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
- s := bufio.NewScanner(r)
- out := []*Info{}
- var err error
- for s.Scan() {
- if err = s.Err(); err != nil {
- return nil, err
- }
- /*
- See http://man7.org/linux/man-pages/man5/proc.5.html
-
- 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
- (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
-
- (1) mount ID: unique identifier of the mount (may be reused after umount)
- (2) parent ID: ID of parent (or of self for the top of the mount tree)
- (3) major:minor: value of st_dev for files on filesystem
- (4) root: root of the mount within the filesystem
- (5) mount point: mount point relative to the process's root
- (6) mount options: per mount options
- (7) optional fields: zero or more fields of the form "tag[:value]"
- (8) separator: marks the end of the optional fields
- (9) filesystem type: name of filesystem of the form "type[.subtype]"
- (10) mount source: filesystem specific information or "none"
- (11) super options: per super block options
- */
-
- text := s.Text()
- fields := strings.Split(text, " ")
- numFields := len(fields)
- if numFields < 10 {
- // should be at least 10 fields
- return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields)
- }
-
- p := &Info{}
- // ignore any numbers parsing errors, as there should not be any
- p.ID, _ = strconv.Atoi(fields[0])
- p.Parent, _ = strconv.Atoi(fields[1])
- mm := strings.Split(fields[2], ":")
- if len(mm) != 2 {
- return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm)
- }
- p.Major, _ = strconv.Atoi(mm[0])
- p.Minor, _ = strconv.Atoi(mm[1])
-
- p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
- if err != nil {
- return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3])
- }
-
- p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
- if err != nil {
- return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4])
- }
- p.Opts = fields[5]
-
- var skip, stop bool
- if filter != nil {
- // filter out entries we're not interested in
- skip, stop = filter(p)
- if skip {
- continue
- }
- }
-
- // one or more optional fields, when a separator (-)
- i := 6
- for ; i < numFields && fields[i] != "-"; i++ {
- switch i {
- case 6:
- p.Optional = fields[6]
- default:
- /* NOTE there might be more optional fields before the such as
- fields[7]...fields[N] (where N < sepIndex), although
- as of Linux kernel 4.15 the only known ones are
- mount propagation flags in fields[6]. The correct
- behavior is to ignore any unknown optional fields.
- */
- }
- }
- if i == numFields {
- return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text)
- }
-
- // There should be 3 fields after the separator...
- if i+4 > numFields {
- return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text)
- }
- // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
- // (like "//serv/My Documents") _may_ end up having a space in the last field
- // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs
- // option unc= is ignored, so a space should not appear. In here we ignore
- // those "extra" fields caused by extra spaces.
- p.Fstype = fields[i+1]
- p.Source = fields[i+2]
- p.VfsOpts = fields[i+3]
-
- out = append(out, p)
- if stop {
- break
- }
- }
- return out, nil
-}
-
-// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
-// bind mounts
-func parseMountTable(filter FilterFunc) ([]*Info, error) {
- f, err := os.Open("/proc/self/mountinfo")
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseInfoFile(f, filter)
-}
-
-// PidMountInfo collects the mounts for a specific process ID. If the process
-// ID is unknown, it is better to use `GetMounts` which will inspect
-// "/proc/self/mountinfo" instead.
-func PidMountInfo(pid int) ([]*Info, error) {
- f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseInfoFile(f, nil)
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
deleted file mode 100644
index fd16d3ed6..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !windows,!linux,!freebsd freebsd,!cgo
-
-package mount // import "github.com/docker/docker/pkg/mount"
-
-import (
- "fmt"
- "runtime"
-)
-
-func parseMountTable(f FilterFunc) ([]*Info, error) {
- return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
deleted file mode 100644
index 27e0f6976..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-func parseMountTable(f FilterFunc) ([]*Info, error) {
- // Do NOT return an error!
- return nil, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
deleted file mode 100644
index db3882874..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package mount // import "github.com/docker/docker/pkg/mount"
-
-// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakeShared(mountPoint string) error {
- return ensureMountedAs(mountPoint, SHARED)
-}
-
-// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakeRShared(mountPoint string) error {
- return ensureMountedAs(mountPoint, RSHARED)
-}
-
-// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakePrivate(mountPoint string) error {
- return ensureMountedAs(mountPoint, PRIVATE)
-}
-
-// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
-// enabled. See the supported options in flags.go for further reference.
-func MakeRPrivate(mountPoint string) error {
- return ensureMountedAs(mountPoint, RPRIVATE)
-}
-
-// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakeSlave(mountPoint string) error {
- return ensureMountedAs(mountPoint, SLAVE)
-}
-
-// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakeRSlave(mountPoint string) error {
- return ensureMountedAs(mountPoint, RSLAVE)
-}
-
-// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
-// enabled. See the supported options in flags.go for further reference.
-func MakeUnbindable(mountPoint string) error {
- return ensureMountedAs(mountPoint, UNBINDABLE)
-}
-
-// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
-// option enabled. See the supported options in flags.go for further reference.
-func MakeRUnbindable(mountPoint string) error {
- return ensureMountedAs(mountPoint, RUNBINDABLE)
-}
-
-// MakeMount ensures that the file or directory given is a mount point,
-// bind mounting it to itself it case it is not.
-func MakeMount(mnt string) error {
- mounted, err := Mounted(mnt)
- if err != nil {
- return err
- }
- if mounted {
- return nil
- }
-
- return mount(mnt, mnt, "none", uintptr(BIND), "")
-}
-
-func ensureMountedAs(mnt string, flags int) error {
- if err := MakeMount(mnt); err != nil {
- return err
- }
-
- return mount("", mnt, "none", uintptr(flags), "")
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go b/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go
deleted file mode 100644
index 4be427685..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build !windows
-
-package mount // import "github.com/docker/docker/pkg/mount"
-
-import "golang.org/x/sys/unix"
-
-func unmount(target string, flags int) error {
- err := unix.Unmount(target, flags)
- if err == nil || err == unix.EINVAL {
- // Ignore "not mounted" error here. Note the same error
- // can be returned if flags are invalid, so this code
- // assumes that the flags value is always correct.
- return nil
- }
-
- return &mountError{
- op: "umount",
- target: target,
- flags: uintptr(flags),
- err: err,
- }
-}
diff --git a/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go
deleted file mode 100644
index a88ad3577..000000000
--- a/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build windows
-
-package mount // import "github.com/docker/docker/pkg/mount"
-
-func unmount(target string, flag int) error {
- panic("Not implemented")
-}
diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
index 3c42cff5e..2dfb66af0 100644
--- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
+++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
@@ -231,6 +231,9 @@ var (
// Dame Mary Lucy Cartwright - British mathematician who was one of the first to study what is now known as chaos theory. Also known for Cartwright's theorem which finds applications in signal processing. https://en.wikipedia.org/wiki/Mary_Cartwright
"cartwright",
+ // George Washington Carver - American agricultural scientist and inventor. He was the most prominent black scientist of the early 20th century. https://en.wikipedia.org/wiki/George_Washington_Carver
+ "carver",
+
// Vinton Gray Cerf - American Internet pioneer, recognised as one of "the fathers of the Internet". With Robert Elliot Kahn, he designed TCP and IP, the primary data communication protocols of the Internet and other computer networks. https://en.wikipedia.org/wiki/Vint_Cerf
"cerf",
@@ -452,6 +455,9 @@ var (
// Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil
"jang",
+ // Mae Carol Jemison - is an American engineer, physician, and former NASA astronaut. She became the first black woman to travel in space when she served as a mission specialist aboard the Space Shuttle Endeavour - https://en.wikipedia.org/wiki/Mae_Jemison
+ "jemison",
+
// Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik
"jennings",
@@ -491,7 +497,7 @@ var (
// Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana
"khorana",
- // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby
+ // Jack Kilby invented silicon integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby
"kilby",
// Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch
@@ -626,7 +632,7 @@ var (
// Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1
"northcutt",
- // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce
+ // Robert Noyce invented silicon integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce
"noyce",
// Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
index e1d134a5d..b4646277a 100644
--- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -130,12 +130,10 @@ func mkdirWithACL(name string, sddl string) error {
// by the daemon. This SHOULD be treated as absolute from a docker processing
// perspective.
func IsAbs(path string) bool {
- if !filepath.IsAbs(path) {
- if !strings.HasPrefix(path, string(os.PathSeparator)) {
- return false
- }
+ if filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) {
+ return true
}
- return true
+ return false
}
// The origin of the functions below here are the golang OS and windows packages,
diff --git a/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/docker/docker/pkg/system/init_unix.go
deleted file mode 100644
index c2bb0f4cc..000000000
--- a/vendor/github.com/docker/docker/pkg/system/init_unix.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !windows
-
-package system // import "github.com/docker/docker/pkg/system"
-
-// InitLCOW does nothing since LCOW is a windows only feature
-func InitLCOW(experimental bool) {
-}
-
-// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported.
-func ContainerdRuntimeSupported(_ bool, _ string) bool {
- return true
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go
index 7e4ac55d7..a91288c60 100644
--- a/vendor/github.com/docker/docker/pkg/system/init_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go
@@ -3,26 +3,15 @@ package system // import "github.com/docker/docker/pkg/system"
import (
"os"
- "github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
var (
- // lcowSupported determines if Linux Containers on Windows are supported.
- lcowSupported = false
-
// containerdRuntimeSupported determines if ContainerD should be the runtime.
// As of March 2019, this is an experimental feature.
containerdRuntimeSupported = false
)
-// InitLCOW sets whether LCOW is supported or not. Requires RS5+
-func InitLCOW(experimental bool) {
- if experimental && osversion.Build() >= osversion.RS5 {
- lcowSupported = true
- }
-}
-
// InitContainerdRuntime sets whether to use ContainerD for runtime
// on Windows. This is an experimental feature still in development, and
// also requires an environment variable to be set (so as not to turn the
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go
index 5be3e2182..0f00028fb 100644
--- a/vendor/github.com/docker/docker/pkg/system/lcow.go
+++ b/vendor/github.com/docker/docker/pkg/system/lcow.go
@@ -1,32 +1,48 @@
+// +build windows,!no_lcow
+
package system // import "github.com/docker/docker/pkg/system"
import (
- "runtime"
"strings"
+ "github.com/Microsoft/hcsshim/osversion"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
-// IsOSSupported determines if an operating system is supported by the host
-func IsOSSupported(os string) bool {
- if strings.EqualFold(runtime.GOOS, os) {
- return true
- }
- if LCOWSupported() && strings.EqualFold(os, "linux") {
- return true
+var (
+ // lcowSupported determines if Linux Containers on Windows are supported.
+ lcowSupported = false
+)
+
+// InitLCOW sets whether LCOW is supported or not. Requires RS5+
+func InitLCOW(experimental bool) {
+ if experimental && osversion.Build() >= osversion.RS5 {
+ lcowSupported = true
}
- return false
+}
+
+func LCOWSupported() bool {
+ return lcowSupported
}
// ValidatePlatform determines if a platform structure is valid.
// TODO This is a temporary windows-only function, should be replaced by
// comparison of worker capabilities
func ValidatePlatform(platform specs.Platform) error {
- if runtime.GOOS == "windows" {
- if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) {
- return errors.Errorf("unsupported os %s", platform.OS)
- }
+ if !IsOSSupported(platform.OS) {
+ return errors.Errorf("unsupported os %s", platform.OS)
}
return nil
}
+
+// IsOSSupported determines if an operating system is supported by the host
+func IsOSSupported(os string) bool {
+ if strings.EqualFold("windows", os) {
+ return true
+ }
+ if LCOWSupported() && strings.EqualFold(os, "linux") {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
deleted file mode 100644
index 26397fb8a..000000000
--- a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !windows
-
-package system // import "github.com/docker/docker/pkg/system"
-
-// LCOWSupported returns true if Linux containers on Windows are supported.
-func LCOWSupported() bool {
- return false
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go
new file mode 100644
index 000000000..3d3cf775a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go
@@ -0,0 +1,28 @@
+// +build !windows windows,no_lcow
+
+package system // import "github.com/docker/docker/pkg/system"
+import (
+ "runtime"
+ "strings"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// InitLCOW does nothing since LCOW is a windows only feature
+func InitLCOW(_ bool) {}
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return false
+}
+
+// ValidatePlatform determines if a platform structure is valid. This function
+// is used for LCOW, and is a no-op on non-windows platforms.
+func ValidatePlatform(_ specs.Platform) error {
+ return nil
+}
+
+// IsOSSupported determines if an operating system is supported by the host.
+func IsOSSupported(os string) bool {
+ return strings.EqualFold(runtime.GOOS, os)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
deleted file mode 100644
index f0139df8f..000000000
--- a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package system // import "github.com/docker/docker/pkg/system"
-
-// LCOWSupported returns true if Linux containers on Windows are supported.
-func LCOWSupported() bool {
- return lcowSupported
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
index 188f2c295..22a56136c 100644
--- a/vendor/github.com/docker/docker/pkg/system/path_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go
@@ -1,24 +1,27 @@
package system // import "github.com/docker/docker/pkg/system"
-import "syscall"
+import "golang.org/x/sys/windows"
// GetLongPathName converts Windows short pathnames to full pathnames.
// For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
// It is a no-op on non-Windows platforms
func GetLongPathName(path string) (string, error) {
// See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg
- p := syscall.StringToUTF16(path)
+ p, err := windows.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
b := p // GetLongPathName says we can reuse buffer
- n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
if n > uint32(len(b)) {
b = make([]uint16, n)
- _, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ _, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
}
- return syscall.UTF16ToString(b), nil
+ return windows.UTF16ToString(b), nil
}
diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go
index 0195a891b..79aebb527 100644
--- a/vendor/github.com/docker/docker/pkg/system/process_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go
@@ -3,6 +3,9 @@
package system // import "github.com/docker/docker/pkg/system"
import (
+ "fmt"
+ "io/ioutil"
+ "strings"
"syscall"
"golang.org/x/sys/unix"
@@ -22,3 +25,20 @@ func IsProcessAlive(pid int) bool {
func KillProcess(pid int) {
unix.Kill(pid, unix.SIGKILL)
}
+
+// IsProcessZombie return true if process has a state with "Z"
+// http://man7.org/linux/man-pages/man5/proc.5.html
+func IsProcessZombie(pid int) (bool, error) {
+ statPath := fmt.Sprintf("/proc/%d/stat", pid)
+ dataBytes, err := ioutil.ReadFile(statPath)
+ if err != nil {
+ return false, err
+ }
+ data := string(dataBytes)
+ sdata := strings.SplitN(data, " ", 4)
+ if len(sdata) >= 3 && sdata[2] == "Z" {
+ return true, nil
+ }
+
+ return false, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go
index 4e70c97b1..09bdfa0ca 100644
--- a/vendor/github.com/docker/docker/pkg/system/process_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/process_windows.go
@@ -13,6 +13,6 @@ func IsProcessAlive(pid int) bool {
func KillProcess(pid int) {
p, err := os.FindProcess(pid)
if err == nil {
- p.Kill()
+ _ = p.Kill()
}
}
diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go
index b31099180..9e251dc15 100644
--- a/vendor/github.com/docker/docker/pkg/system/rm.go
+++ b/vendor/github.com/docker/docker/pkg/system/rm.go
@@ -5,7 +5,7 @@ import (
"syscall"
"time"
- "github.com/docker/docker/pkg/mount"
+ "github.com/moby/sys/mount"
"github.com/pkg/errors"
)
@@ -63,12 +63,8 @@ func EnsureRemoveAll(dir string) error {
return err
}
- if mounted, _ := mount.Mounted(pe.Path); mounted {
- if e := mount.Unmount(pe.Path); e != nil {
- if mounted, _ := mount.Mounted(pe.Path); mounted {
- return errors.Wrapf(e, "error while removing %s", dir)
- }
- }
+ if e := mount.Unmount(pe.Path); e != nil {
+ return errors.Wrapf(e, "error while removing %s", dir)
}
if exitOnErr[pe.Path] == maxRetry {
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go
index c1c0ee9f3..ea55c3dbb 100644
--- a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go
@@ -1,3 +1,5 @@
+// +build freebsd netbsd
+
package system // import "github.com/docker/docker/pkg/system"
import "syscall"
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
new file mode 100644
index 000000000..6a51ccd64
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
@@ -0,0 +1,13 @@
+package system // import "github.com/docker/docker/pkg/system"
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
index 919a412a7..905d10f15 100644
--- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -9,9 +9,3 @@ import "golang.org/x/sys/unix"
func Unmount(dest string) error {
return unix.Unmount(dest, 0)
}
-
-// CommandLineToArgv should not be used on Unix.
-// It simply returns commandLine in the only element in the returned array.
-func CommandLineToArgv(commandLine string) ([]string, error) {
- return []string{commandLine}, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
index 1711130bc..1588aa3ef 100644
--- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -10,36 +10,36 @@ import (
)
const (
- OWNER_SECURITY_INFORMATION = 0x00000001
- GROUP_SECURITY_INFORMATION = 0x00000002
- DACL_SECURITY_INFORMATION = 0x00000004
- SACL_SECURITY_INFORMATION = 0x00000008
- LABEL_SECURITY_INFORMATION = 0x00000010
- ATTRIBUTE_SECURITY_INFORMATION = 0x00000020
- SCOPE_SECURITY_INFORMATION = 0x00000040
+ OWNER_SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.OWNER_SECURITY_INFORMATION
+ GROUP_SECURITY_INFORMATION = windows.GROUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.GROUP_SECURITY_INFORMATION
+ DACL_SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.DACL_SECURITY_INFORMATION
+ SACL_SECURITY_INFORMATION = windows.SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SACL_SECURITY_INFORMATION
+ LABEL_SECURITY_INFORMATION = windows.LABEL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.LABEL_SECURITY_INFORMATION
+ ATTRIBUTE_SECURITY_INFORMATION = windows.ATTRIBUTE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.ATTRIBUTE_SECURITY_INFORMATION
+ SCOPE_SECURITY_INFORMATION = windows.SCOPE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SCOPE_SECURITY_INFORMATION
PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080
ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100
- BACKUP_SECURITY_INFORMATION = 0x00010000
- PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000
- PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000
- UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000
- UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000
+ BACKUP_SECURITY_INFORMATION = windows.BACKUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.BACKUP_SECURITY_INFORMATION
+ PROTECTED_DACL_SECURITY_INFORMATION = windows.PROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_DACL_SECURITY_INFORMATION
+ PROTECTED_SACL_SECURITY_INFORMATION = windows.PROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_SACL_SECURITY_INFORMATION
+ UNPROTECTED_DACL_SECURITY_INFORMATION = windows.UNPROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_DACL_SECURITY_INFORMATION
+ UNPROTECTED_SACL_SECURITY_INFORMATION = windows.UNPROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_SACL_SECURITY_INFORMATION
)
const (
- SE_UNKNOWN_OBJECT_TYPE = iota
- SE_FILE_OBJECT
- SE_SERVICE
- SE_PRINTER
- SE_REGISTRY_KEY
- SE_LMSHARE
- SE_KERNEL_OBJECT
- SE_WINDOW_OBJECT
- SE_DS_OBJECT
- SE_DS_OBJECT_ALL
- SE_PROVIDER_DEFINED_OBJECT
- SE_WMIGUID_OBJECT
- SE_REGISTRY_WOW64_32KEY
+ SE_UNKNOWN_OBJECT_TYPE = windows.SE_UNKNOWN_OBJECT_TYPE // Deprecated: use golang.org/x/sys/windows.SE_UNKNOWN_OBJECT_TYPE
+ SE_FILE_OBJECT = windows.SE_FILE_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_FILE_OBJECT
+ SE_SERVICE = windows.SE_SERVICE // Deprecated: use golang.org/x/sys/windows.SE_SERVICE
+ SE_PRINTER = windows.SE_PRINTER // Deprecated: use golang.org/x/sys/windows.SE_PRINTER
+ SE_REGISTRY_KEY = windows.SE_REGISTRY_KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_KEY
+ SE_LMSHARE = windows.SE_LMSHARE // Deprecated: use golang.org/x/sys/windows.SE_LMSHARE
+ SE_KERNEL_OBJECT = windows.SE_KERNEL_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_KERNEL_OBJECT
+ SE_WINDOW_OBJECT = windows.SE_WINDOW_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WINDOW_OBJECT
+ SE_DS_OBJECT = windows.SE_DS_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT
+ SE_DS_OBJECT_ALL = windows.SE_DS_OBJECT_ALL // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT_ALL
+ SE_PROVIDER_DEFINED_OBJECT = windows.SE_PROVIDER_DEFINED_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_PROVIDER_DEFINED_OBJECT
+ SE_WMIGUID_OBJECT = windows.SE_WMIGUID_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WMIGUID_OBJECT
+ SE_REGISTRY_WOW64_32KEY = windows.SE_REGISTRY_WOW64_32KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_WOW64_32KEY
)
const (
@@ -64,6 +64,7 @@ var (
type OSVersion = osversion.OSVersion
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported)
type osVersionInfoEx struct {
OSVersionInfoSize uint32
MajorVersion uint32
@@ -86,8 +87,6 @@ func GetOSVersion() OSVersion {
}
// IsWindowsClient returns true if the SKU is client
-// @engine maintainers - this function should not be removed or modified as it
-// is used to enforce licensing restrictions on Windows.
func IsWindowsClient() bool {
osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
@@ -101,33 +100,10 @@ func IsWindowsClient() bool {
// Unmount is a platform-specific helper function to call
// the unmount syscall. Not supported on Windows
-func Unmount(dest string) error {
+func Unmount(_ string) error {
return nil
}
-// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
-func CommandLineToArgv(commandLine string) ([]string, error) {
- var argc int32
-
- argsPtr, err := windows.UTF16PtrFromString(commandLine)
- if err != nil {
- return nil, err
- }
-
- argv, err := windows.CommandLineToArgv(argsPtr, &argc)
- if err != nil {
- return nil, err
- }
- defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv))))
-
- newArgs := make([]string, argc)
- for i, v := range (*argv)[:argc] {
- newArgs[i] = string(windows.UTF16ToString((*v)[:]))
- }
-
- return newArgs, nil
-}
-
// HasWin32KSupport determines whether containers that depend on win32k can
// run on this machine. Win32k is the driver used to implement windowing.
func HasWin32KSupport() bool {
@@ -137,6 +113,7 @@ func HasWin32KSupport() bool {
return ntuserApiset.Load() == nil
}
+// Deprecated: use golang.org/x/sys/windows.SetNamedSecurityInfo()
func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) {
r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
if r0 != 0 {
@@ -145,11 +122,12 @@ func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInforma
return
}
+// Deprecated: uses golang.org/x/sys/windows.SecurityDescriptorFromString() and golang.org/x/sys/windows.SECURITY_DESCRIPTOR.DACL()
func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) {
r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0)
if r1 == 0 {
if e1 != 0 {
- result = syscall.Errno(e1)
+ result = e1
} else {
result = syscall.EINVAL
}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
index d4f1a57fb..95b609fe7 100644
--- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -10,24 +10,23 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
dest := make([]byte, 128)
sz, errno := unix.Lgetxattr(path, attr, dest)
- switch {
- case errno == unix.ENODATA:
- return nil, nil
- case errno == unix.ERANGE:
- // 128 byte array might just not be good enough. A dummy buffer is used
- // to get the real size of the xattrs on disk
+ for errno == unix.ERANGE {
+ // Buffer too small, use zero-sized buffer to get the actual size
sz, errno = unix.Lgetxattr(path, attr, []byte{})
if errno != nil {
return nil, errno
}
dest = make([]byte, sz)
sz, errno = unix.Lgetxattr(path, attr, dest)
- if errno != nil {
- return nil, errno
- }
+ }
+
+ switch {
+ case errno == unix.ENODATA:
+ return nil, nil
case errno != nil:
return nil, errno
}
+
return dest[:sz], nil
}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go
deleted file mode 100644
index 87bca8d4a..000000000
--- a/vendor/github.com/docker/docker/pkg/term/ascii.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package term // import "github.com/docker/docker/pkg/term"
-
-import (
- "fmt"
- "strings"
-)
-
-// ASCII list the possible supported ASCII key sequence
-var ASCII = []string{
- "ctrl-@",
- "ctrl-a",
- "ctrl-b",
- "ctrl-c",
- "ctrl-d",
- "ctrl-e",
- "ctrl-f",
- "ctrl-g",
- "ctrl-h",
- "ctrl-i",
- "ctrl-j",
- "ctrl-k",
- "ctrl-l",
- "ctrl-m",
- "ctrl-n",
- "ctrl-o",
- "ctrl-p",
- "ctrl-q",
- "ctrl-r",
- "ctrl-s",
- "ctrl-t",
- "ctrl-u",
- "ctrl-v",
- "ctrl-w",
- "ctrl-x",
- "ctrl-y",
- "ctrl-z",
- "ctrl-[",
- "ctrl-\\",
- "ctrl-]",
- "ctrl-^",
- "ctrl-_",
-}
-
-// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
-func ToBytes(keys string) ([]byte, error) {
- codes := []byte{}
-next:
- for _, key := range strings.Split(keys, ",") {
- if len(key) != 1 {
- for code, ctrl := range ASCII {
- if ctrl == key {
- codes = append(codes, byte(code))
- continue next
- }
- }
- if key == "DEL" {
- codes = append(codes, 127)
- } else {
- return nil, fmt.Errorf("Unknown character: '%s'", key)
- }
- } else {
- codes = append(codes, key[0])
- }
- }
- return codes, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go
deleted file mode 100644
index da733e584..000000000
--- a/vendor/github.com/docker/docker/pkg/term/proxy.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package term // import "github.com/docker/docker/pkg/term"
-
-import (
- "io"
-)
-
-// EscapeError is special error which returned by a TTY proxy reader's Read()
-// method in case its detach escape sequence is read.
-type EscapeError struct{}
-
-func (EscapeError) Error() string {
- return "read escape sequence"
-}
-
-// escapeProxy is used only for attaches with a TTY. It is used to proxy
-// stdin keypresses from the underlying reader and look for the passed in
-// escape key sequence to signal a detach.
-type escapeProxy struct {
- escapeKeys []byte
- escapeKeyPos int
- r io.Reader
-}
-
-// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader
-// and detects when the specified escape keys are read, in which case the Read
-// method will return an error of type EscapeError.
-func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader {
- return &escapeProxy{
- escapeKeys: escapeKeys,
- r: r,
- }
-}
-
-func (r *escapeProxy) Read(buf []byte) (int, error) {
- nr, err := r.r.Read(buf)
-
- if len(r.escapeKeys) == 0 {
- return nr, err
- }
-
- preserve := func() {
- // this preserves the original key presses in the passed in buffer
- nr += r.escapeKeyPos
- preserve := make([]byte, 0, r.escapeKeyPos+len(buf))
- preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...)
- preserve = append(preserve, buf...)
- r.escapeKeyPos = 0
- copy(buf[0:nr], preserve)
- }
-
- if nr != 1 || err != nil {
- if r.escapeKeyPos > 0 {
- preserve()
- }
- return nr, err
- }
-
- if buf[0] != r.escapeKeys[r.escapeKeyPos] {
- if r.escapeKeyPos > 0 {
- preserve()
- }
- return nr, nil
- }
-
- if r.escapeKeyPos == len(r.escapeKeys)-1 {
- return 0, EscapeError{}
- }
-
- // Looks like we've got an escape key, but we need to match again on the next
- // read.
- // Store the current escape key we found so we can look for the next one on
- // the next read.
- // Since this is an escape key, make sure we don't let the caller read it
- // If later on we find that this is not the escape sequence, we'll add the
- // keys back
- r.escapeKeyPos++
- return nr - r.escapeKeyPos, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go
deleted file mode 100644
index 01bcaa8ab..000000000
--- a/vendor/github.com/docker/docker/pkg/term/tc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build !windows
-
-package term // import "github.com/docker/docker/pkg/term"
-
-import (
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-func tcget(fd uintptr, p *Termios) syscall.Errno {
- _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
- return err
-}
-
-func tcset(fd uintptr, p *Termios) syscall.Errno {
- _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
- return err
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go
deleted file mode 100644
index 0589a9551..000000000
--- a/vendor/github.com/docker/docker/pkg/term/term.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// +build !windows
-
-// Package term provides structures and helper functions to work with
-// terminal (state, sizes).
-package term // import "github.com/docker/docker/pkg/term"
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "os/signal"
-
- "golang.org/x/sys/unix"
-)
-
-var (
- // ErrInvalidState is returned if the state of the terminal is invalid.
- ErrInvalidState = errors.New("Invalid terminal state")
-)
-
-// State represents the state of the terminal.
-type State struct {
- termios Termios
-}
-
-// Winsize represents the size of the terminal window.
-type Winsize struct {
- Height uint16
- Width uint16
- x uint16
- y uint16
-}
-
-// StdStreams returns the standard streams (stdin, stdout, stderr).
-func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
- return os.Stdin, os.Stdout, os.Stderr
-}
-
-// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
-func GetFdInfo(in interface{}) (uintptr, bool) {
- var inFd uintptr
- var isTerminalIn bool
- if file, ok := in.(*os.File); ok {
- inFd = file.Fd()
- isTerminalIn = IsTerminal(inFd)
- }
- return inFd, isTerminalIn
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd uintptr) bool {
- var termios Termios
- return tcget(fd, &termios) == 0
-}
-
-// RestoreTerminal restores the terminal connected to the given file descriptor
-// to a previous state.
-func RestoreTerminal(fd uintptr, state *State) error {
- if state == nil {
- return ErrInvalidState
- }
- if err := tcset(fd, &state.termios); err != 0 {
- return err
- }
- return nil
-}
-
-// SaveState saves the state of the terminal connected to the given file descriptor.
-func SaveState(fd uintptr) (*State, error) {
- var oldState State
- if err := tcget(fd, &oldState.termios); err != 0 {
- return nil, err
- }
-
- return &oldState, nil
-}
-
-// DisableEcho applies the specified state to the terminal connected to the file
-// descriptor, with echo disabled.
-func DisableEcho(fd uintptr, state *State) error {
- newState := state.termios
- newState.Lflag &^= unix.ECHO
-
- if err := tcset(fd, &newState); err != 0 {
- return err
- }
- handleInterrupt(fd, state)
- return nil
-}
-
-// SetRawTerminal puts the terminal connected to the given file descriptor into
-// raw mode and returns the previous state. On UNIX, this puts both the input
-// and output into raw mode. On Windows, it only puts the input into raw mode.
-func SetRawTerminal(fd uintptr) (*State, error) {
- oldState, err := MakeRaw(fd)
- if err != nil {
- return nil, err
- }
- handleInterrupt(fd, oldState)
- return oldState, err
-}
-
-// SetRawTerminalOutput puts the output of terminal connected to the given file
-// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
-// state. On Windows, it disables LF -> CRLF translation.
-func SetRawTerminalOutput(fd uintptr) (*State, error) {
- return nil, nil
-}
-
-func handleInterrupt(fd uintptr, state *State) {
- sigchan := make(chan os.Signal, 1)
- signal.Notify(sigchan, os.Interrupt)
- go func() {
- for range sigchan {
- // quit cleanly and the new terminal item is on a new line
- fmt.Println()
- signal.Stop(sigchan)
- close(sigchan)
- RestoreTerminal(fd, state)
- os.Exit(1)
- }
- }()
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
deleted file mode 100644
index 6e83b59e9..000000000
--- a/vendor/github.com/docker/docker/pkg/term/term_windows.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package term // import "github.com/docker/docker/pkg/term"
-
-import (
- "io"
- "os"
- "os/signal"
- "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE
-
- "github.com/Azure/go-ansiterm/winterm"
- windowsconsole "github.com/docker/docker/pkg/term/windows"
-)
-
-// State holds the console mode for the terminal.
-type State struct {
- mode uint32
-}
-
-// Winsize is used for window size.
-type Winsize struct {
- Height uint16
- Width uint16
-}
-
-// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console
-var vtInputSupported bool
-
-// StdStreams returns the standard streams (stdin, stdout, stderr).
-func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
- // Turn on VT handling on all std handles, if possible. This might
- // fail, in which case we will fall back to terminal emulation.
- var emulateStdin, emulateStdout, emulateStderr bool
- fd := os.Stdin.Fd()
- if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil {
- emulateStdin = true
- } else {
- vtInputSupported = true
- }
- // Unconditionally set the console mode back even on failure because SetConsoleMode
- // remembers invalid bits on input handles.
- winterm.SetConsoleMode(fd, mode)
- }
-
- fd = os.Stdout.Fd()
- if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
- emulateStdout = true
- } else {
- winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
- }
- }
-
- fd = os.Stderr.Fd()
- if mode, err := winterm.GetConsoleMode(fd); err == nil {
- // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
- if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
- emulateStderr = true
- } else {
- winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
- }
- }
-
- // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and
- // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as
- // go-ansiterm hasn't switch to x/sys/windows.
- // TODO: switch back to x/sys/windows once go-ansiterm has switched
- if emulateStdin {
- stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE)
- } else {
- stdIn = os.Stdin
- }
-
- if emulateStdout {
- stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
- } else {
- stdOut = os.Stdout
- }
-
- if emulateStderr {
- stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
- } else {
- stdErr = os.Stderr
- }
-
- return
-}
-
-// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
-func GetFdInfo(in interface{}) (uintptr, bool) {
- return windowsconsole.GetHandleInfo(in)
-}
-
-// GetWinsize returns the window size based on the specified file descriptor.
-func GetWinsize(fd uintptr) (*Winsize, error) {
- info, err := winterm.GetConsoleScreenBufferInfo(fd)
- if err != nil {
- return nil, err
- }
-
- winsize := &Winsize{
- Width: uint16(info.Window.Right - info.Window.Left + 1),
- Height: uint16(info.Window.Bottom - info.Window.Top + 1),
- }
-
- return winsize, nil
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd uintptr) bool {
- return windowsconsole.IsConsole(fd)
-}
-
-// RestoreTerminal restores the terminal connected to the given file descriptor
-// to a previous state.
-func RestoreTerminal(fd uintptr, state *State) error {
- return winterm.SetConsoleMode(fd, state.mode)
-}
-
-// SaveState saves the state of the terminal connected to the given file descriptor.
-func SaveState(fd uintptr) (*State, error) {
- mode, e := winterm.GetConsoleMode(fd)
- if e != nil {
- return nil, e
- }
-
- return &State{mode: mode}, nil
-}
-
-// DisableEcho disables echo for the terminal connected to the given file descriptor.
-// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
-func DisableEcho(fd uintptr, state *State) error {
- mode := state.mode
- mode &^= winterm.ENABLE_ECHO_INPUT
- mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
- err := winterm.SetConsoleMode(fd, mode)
- if err != nil {
- return err
- }
-
- // Register an interrupt handler to catch and restore prior state
- restoreAtInterrupt(fd, state)
- return nil
-}
-
-// SetRawTerminal puts the terminal connected to the given file descriptor into
-// raw mode and returns the previous state. On UNIX, this puts both the input
-// and output into raw mode. On Windows, it only puts the input into raw mode.
-func SetRawTerminal(fd uintptr) (*State, error) {
- state, err := MakeRaw(fd)
- if err != nil {
- return nil, err
- }
-
- // Register an interrupt handler to catch and restore prior state
- restoreAtInterrupt(fd, state)
- return state, err
-}
-
-// SetRawTerminalOutput puts the output of terminal connected to the given file
-// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
-// state. On Windows, it disables LF -> CRLF translation.
-func SetRawTerminalOutput(fd uintptr) (*State, error) {
- state, err := SaveState(fd)
- if err != nil {
- return nil, err
- }
-
- // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this
- // version of Windows.
- winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN)
- return state, err
-}
-
-// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be restored.
-func MakeRaw(fd uintptr) (*State, error) {
- state, err := SaveState(fd)
- if err != nil {
- return nil, err
- }
-
- mode := state.mode
-
- // See
- // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
- // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
-
- // Disable these modes
- mode &^= winterm.ENABLE_ECHO_INPUT
- mode &^= winterm.ENABLE_LINE_INPUT
- mode &^= winterm.ENABLE_MOUSE_INPUT
- mode &^= winterm.ENABLE_WINDOW_INPUT
- mode &^= winterm.ENABLE_PROCESSED_INPUT
-
- // Enable these modes
- mode |= winterm.ENABLE_EXTENDED_FLAGS
- mode |= winterm.ENABLE_INSERT_MODE
- mode |= winterm.ENABLE_QUICK_EDIT_MODE
- if vtInputSupported {
- mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT
- }
-
- err = winterm.SetConsoleMode(fd, mode)
- if err != nil {
- return nil, err
- }
- return state, nil
-}
-
-func restoreAtInterrupt(fd uintptr, state *State) {
- sigchan := make(chan os.Signal, 1)
- signal.Notify(sigchan, os.Interrupt)
-
- go func() {
- _ = <-sigchan
- RestoreTerminal(fd, state)
- os.Exit(0)
- }()
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
deleted file mode 100644
index 48b16f520..000000000
--- a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build darwin freebsd openbsd netbsd
-
-package term // import "github.com/docker/docker/pkg/term"
-
-import (
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-const (
- getTermios = unix.TIOCGETA
- setTermios = unix.TIOCSETA
-)
-
-// Termios is the Unix API for terminal I/O.
-type Termios unix.Termios
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd uintptr) (*State, error) {
- var oldState State
- if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
- return nil, err
- }
-
- newState := oldState.termios
- newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
- newState.Oflag &^= unix.OPOST
- newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
- newState.Cflag &^= (unix.CSIZE | unix.PARENB)
- newState.Cflag |= unix.CS8
- newState.Cc[unix.VMIN] = 1
- newState.Cc[unix.VTIME] = 0
-
- if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
- return nil, err
- }
-
- return &oldState, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
deleted file mode 100644
index 6d4c63fdb..000000000
--- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package term // import "github.com/docker/docker/pkg/term"
-
-import (
- "golang.org/x/sys/unix"
-)
-
-const (
- getTermios = unix.TCGETS
- setTermios = unix.TCSETS
-)
-
-// Termios is the Unix API for terminal I/O.
-type Termios unix.Termios
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd uintptr) (*State, error) {
- termios, err := unix.IoctlGetTermios(int(fd), getTermios)
- if err != nil {
- return nil, err
- }
-
- var oldState State
- oldState.termios = Termios(*termios)
-
- termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
- termios.Oflag &^= unix.OPOST
- termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
- termios.Cflag &^= (unix.CSIZE | unix.PARENB)
- termios.Cflag |= unix.CS8
- termios.Cc[unix.VMIN] = 1
- termios.Cc[unix.VTIME] = 0
-
- if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil {
- return nil, err
- }
- return &oldState, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
deleted file mode 100644
index 1d7c452cc..000000000
--- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// +build windows
-
-package windowsconsole // import "github.com/docker/docker/pkg/term/windows"
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- "strings"
- "unsafe"
-
- ansiterm "github.com/Azure/go-ansiterm"
- "github.com/Azure/go-ansiterm/winterm"
-)
-
-const (
- escapeSequence = ansiterm.KEY_ESC_CSI
-)
-
-// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
-type ansiReader struct {
- file *os.File
- fd uintptr
- buffer []byte
- cbBuffer int
- command []byte
-}
-
-// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
-// Windows console input handle.
-func NewAnsiReader(nFile int) io.ReadCloser {
- initLogger()
- file, fd := winterm.GetStdFile(nFile)
- return &ansiReader{
- file: file,
- fd: fd,
- command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
- buffer: make([]byte, 0),
- }
-}
-
-// Close closes the wrapped file.
-func (ar *ansiReader) Close() (err error) {
- return ar.file.Close()
-}
-
-// Fd returns the file descriptor of the wrapped file.
-func (ar *ansiReader) Fd() uintptr {
- return ar.fd
-}
-
-// Read reads up to len(p) bytes of translated input events into p.
-func (ar *ansiReader) Read(p []byte) (int, error) {
- if len(p) == 0 {
- return 0, nil
- }
-
- // Previously read bytes exist, read as much as we can and return
- if len(ar.buffer) > 0 {
- logger.Debugf("Reading previously cached bytes")
-
- originalLength := len(ar.buffer)
- copiedLength := copy(p, ar.buffer)
-
- if copiedLength == originalLength {
- ar.buffer = make([]byte, 0, len(p))
- } else {
- ar.buffer = ar.buffer[copiedLength:]
- }
-
- logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
- return copiedLength, nil
- }
-
- // Read and translate key events
- events, err := readInputEvents(ar.fd, len(p))
- if err != nil {
- return 0, err
- } else if len(events) == 0 {
- logger.Debug("No input events detected")
- return 0, nil
- }
-
- keyBytes := translateKeyEvents(events, []byte(escapeSequence))
-
- // Save excess bytes and right-size keyBytes
- if len(keyBytes) > len(p) {
- logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
- ar.buffer = keyBytes[len(p):]
- keyBytes = keyBytes[:len(p)]
- } else if len(keyBytes) == 0 {
- logger.Debug("No key bytes returned from the translator")
- return 0, nil
- }
-
- copiedLength := copy(p, keyBytes)
- if copiedLength != len(keyBytes) {
- return 0, errors.New("unexpected copy length encountered")
- }
-
- logger.Debugf("Read p[%d]: % x", copiedLength, p)
- logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
- return copiedLength, nil
-}
-
-// readInputEvents polls until at least one event is available.
-func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
- // Determine the maximum number of records to retrieve
- // -- Cast around the type system to obtain the size of a single INPUT_RECORD.
- // unsafe.Sizeof requires an expression vs. a type-reference; the casting
- // tricks the type system into believing it has such an expression.
- recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
- countRecords := maxBytes / recordSize
- if countRecords > ansiterm.MAX_INPUT_EVENTS {
- countRecords = ansiterm.MAX_INPUT_EVENTS
- } else if countRecords == 0 {
- countRecords = 1
- }
- logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
-
- // Wait for and read input events
- events := make([]winterm.INPUT_RECORD, countRecords)
- nEvents := uint32(0)
- eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
- if err != nil {
- return nil, err
- }
-
- if eventsExist {
- err = winterm.ReadConsoleInput(fd, events, &nEvents)
- if err != nil {
- return nil, err
- }
- }
-
- // Return a slice restricted to the number of returned records
- logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
- return events[:nEvents], nil
-}
-
-// KeyEvent Translation Helpers
-
-var arrowKeyMapPrefix = map[uint16]string{
- winterm.VK_UP: "%s%sA",
- winterm.VK_DOWN: "%s%sB",
- winterm.VK_RIGHT: "%s%sC",
- winterm.VK_LEFT: "%s%sD",
-}
-
-var keyMapPrefix = map[uint16]string{
- winterm.VK_UP: "\x1B[%sA",
- winterm.VK_DOWN: "\x1B[%sB",
- winterm.VK_RIGHT: "\x1B[%sC",
- winterm.VK_LEFT: "\x1B[%sD",
- winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1
- winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4
- winterm.VK_INSERT: "\x1B[2%s~",
- winterm.VK_DELETE: "\x1B[3%s~",
- winterm.VK_PRIOR: "\x1B[5%s~",
- winterm.VK_NEXT: "\x1B[6%s~",
- winterm.VK_F1: "",
- winterm.VK_F2: "",
- winterm.VK_F3: "\x1B[13%s~",
- winterm.VK_F4: "\x1B[14%s~",
- winterm.VK_F5: "\x1B[15%s~",
- winterm.VK_F6: "\x1B[17%s~",
- winterm.VK_F7: "\x1B[18%s~",
- winterm.VK_F8: "\x1B[19%s~",
- winterm.VK_F9: "\x1B[20%s~",
- winterm.VK_F10: "\x1B[21%s~",
- winterm.VK_F11: "\x1B[23%s~",
- winterm.VK_F12: "\x1B[24%s~",
-}
-
-// translateKeyEvents converts the input events into the appropriate ANSI string.
-func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
- var buffer bytes.Buffer
- for _, event := range events {
- if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
- buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
- }
- }
-
- return buffer.Bytes()
-}
-
-// keyToString maps the given input event record to the corresponding string.
-func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
- if keyEvent.UnicodeChar == 0 {
- return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
- }
-
- _, alt, control := getControlKeys(keyEvent.ControlKeyState)
- if control {
- // TODO(azlinux): Implement following control sequences
- // <Ctrl>-D Signals the end of input from the keyboard; also exits current shell.
- // <Ctrl>-H Deletes the first character to the left of the cursor. Also called the ERASE key.
- // <Ctrl>-Q Restarts printing after it has been stopped with <Ctrl>-s.
- // <Ctrl>-S Suspends printing on the screen (does not stop the program).
- // <Ctrl>-U Deletes all characters on the current line. Also called the KILL key.
- // <Ctrl>-E Quits current command and creates a core
-
- }
-
- // <Alt>+Key generates ESC N Key
- if !control && alt {
- return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
- }
-
- return string(keyEvent.UnicodeChar)
-}
-
-// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
-func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
- shift, alt, control := getControlKeys(controlState)
- modifier := getControlKeysModifier(shift, alt, control)
-
- if format, ok := arrowKeyMapPrefix[key]; ok {
- return fmt.Sprintf(format, escapeSequence, modifier)
- }
-
- if format, ok := keyMapPrefix[key]; ok {
- return fmt.Sprintf(format, modifier)
- }
-
- return ""
-}
-
-// getControlKeys extracts the shift, alt, and ctrl key states.
-func getControlKeys(controlState uint32) (shift, alt, control bool) {
- shift = 0 != (controlState & winterm.SHIFT_PRESSED)
- alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
- control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
- return shift, alt, control
-}
-
-// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
-func getControlKeysModifier(shift, alt, control bool) string {
- if shift && alt && control {
- return ansiterm.KEY_CONTROL_PARAM_8
- }
- if alt && control {
- return ansiterm.KEY_CONTROL_PARAM_7
- }
- if shift && control {
- return ansiterm.KEY_CONTROL_PARAM_6
- }
- if control {
- return ansiterm.KEY_CONTROL_PARAM_5
- }
- if shift && alt {
- return ansiterm.KEY_CONTROL_PARAM_4
- }
- if alt {
- return ansiterm.KEY_CONTROL_PARAM_3
- }
- if shift {
- return ansiterm.KEY_CONTROL_PARAM_2
- }
- return ""
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
deleted file mode 100644
index 7799a03fc..000000000
--- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// +build windows
-
-package windowsconsole // import "github.com/docker/docker/pkg/term/windows"
-
-import (
- "io"
- "os"
-
- ansiterm "github.com/Azure/go-ansiterm"
- "github.com/Azure/go-ansiterm/winterm"
-)
-
-// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
-type ansiWriter struct {
- file *os.File
- fd uintptr
- infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO
- command []byte
- escapeSequence []byte
- inAnsiSequence bool
- parser *ansiterm.AnsiParser
-}
-
-// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
-// Windows console output handle.
-func NewAnsiWriter(nFile int) io.Writer {
- initLogger()
- file, fd := winterm.GetStdFile(nFile)
- info, err := winterm.GetConsoleScreenBufferInfo(fd)
- if err != nil {
- return nil
- }
-
- parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
- logger.Infof("newAnsiWriter: parser %p", parser)
-
- aw := &ansiWriter{
- file: file,
- fd: fd,
- infoReset: info,
- command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
- escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
- parser: parser,
- }
-
- logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
- logger.Infof("newAnsiWriter: %v", aw)
- return aw
-}
-
-func (aw *ansiWriter) Fd() uintptr {
- return aw.fd
-}
-
-// Write writes len(p) bytes from p to the underlying data stream.
-func (aw *ansiWriter) Write(p []byte) (total int, err error) {
- if len(p) == 0 {
- return 0, nil
- }
-
- logger.Infof("Write: % x", p)
- logger.Infof("Write: %s", string(p))
- return aw.parser.Parse(p)
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go
deleted file mode 100644
index 527401975..000000000
--- a/vendor/github.com/docker/docker/pkg/term/windows/console.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build windows
-
-package windowsconsole // import "github.com/docker/docker/pkg/term/windows"
-
-import (
- "os"
-
- "github.com/Azure/go-ansiterm/winterm"
-)
-
-// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
-func GetHandleInfo(in interface{}) (uintptr, bool) {
- switch t := in.(type) {
- case *ansiReader:
- return t.Fd(), true
- case *ansiWriter:
- return t.Fd(), true
- }
-
- var inFd uintptr
- var isTerminal bool
-
- if file, ok := in.(*os.File); ok {
- inFd = file.Fd()
- isTerminal = IsConsole(inFd)
- }
- return inFd, isTerminal
-}
-
-// IsConsole returns true if the given file descriptor is a Windows Console.
-// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
-func IsConsole(fd uintptr) bool {
- _, e := winterm.GetConsoleMode(fd)
- return e == nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
deleted file mode 100644
index 7e8f265d4..000000000
--- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build windows
-// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
-// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
-// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
-
-package windowsconsole // import "github.com/docker/docker/pkg/term/windows"
-
-import (
- "io/ioutil"
- "os"
- "sync"
-
- ansiterm "github.com/Azure/go-ansiterm"
- "github.com/sirupsen/logrus"
-)
-
-var logger *logrus.Logger
-var initOnce sync.Once
-
-func initLogger() {
- initOnce.Do(func() {
- logFile := ioutil.Discard
-
- if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
- logFile, _ = os.Create("ansiReaderWriter.log")
- }
-
- logger = &logrus.Logger{
- Out: logFile,
- Formatter: new(logrus.TextFormatter),
- Level: logrus.DebugLevel,
- }
- })
-}
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go
deleted file mode 100644
index a19663ad8..000000000
--- a/vendor/github.com/docker/docker/pkg/term/winsize.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build !windows
-
-package term // import "github.com/docker/docker/pkg/term"
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// GetWinsize returns the window size based on the specified file descriptor.
-func GetWinsize(fd uintptr) (*Winsize, error) {
- uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
- ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
- return ws, err
-}
-
-// SetWinsize tries to set the specified window size for the specified file descriptor.
-func SetWinsize(fd uintptr, ws *Winsize) error {
- uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y}
- return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws)
-}