diff options
Diffstat (limited to 'vendor/github.com')
99 files changed, 4507 insertions, 1315 deletions
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index cb16fa89c..3b4f6e9de 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -28,12 +28,15 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # See https://github.com/containers/libpod/blob/master/contrib/cirrus/README.md#test_build_cache_images_task-task - _BUILT_IMAGE_SUFFIX: "libpod-6224667180531712" - FEDORA_CACHE_IMAGE_NAME: "fedora-32-${_BUILT_IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}" - PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}" - + FEDORA_NAME: "fedora-32" + PRIOR_FEDORA_NAME: "fedora-31" + UBUNTU_NAME: "ubuntu-20" + PRIOR_UBUNTU_NAME: "ubuntu-19" + _BUILT_IMAGE_SUFFIX: "libpod-6508632441356288" + FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}" + PRIOR_UBUNTU_CACHE_IMAGE_NAME: "${PRIOR_UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}" #### #### Command variables to help avoid duplication @@ -157,7 +160,7 @@ gce_instance: env: matrix: - CROSS_TARGET: darwin + CROSS_TARGET: bin/buildah.darwin setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}' diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore index fe45e198a..947216443 100644 --- a/vendor/github.com/containers/buildah/.gitignore +++ b/vendor/github.com/containers/buildah/.gitignore @@ -1,4 +1,5 @@ docs/buildah*.1 +/bin /buildah /imgtype /build/ @@ -6,3 +7,4 @@ tests/tools/build Dockerfile* !/tests/bud/*/Dockerfile* *.swp +result diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 0664b4040..a3f5f2f11 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -670,7 +670,6 @@ Bump back to 1.8-dev ## v1.7.3 (2019-04-16) -* Tue Apr 16, 2019 Tom Sweeney <tsweeney@redhat.com> 1.7.3 imagebuildah: don't leak image structs Add Dockerfiles for buildahimages Bump to Replace golang 1.10 with 1.12 diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index ced48a95a..892c25810 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -33,33 +33,36 @@ CNI_COMMIT := $(shell sed -n 's;\tgithub.com/containernetworking/cni \([^ \n]*\) RUNC_COMMIT := v1.0.0-rc8 LIBSECCOMP_COMMIT := release-2.3 -EXTRALDFLAGS := -LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT)' $(EXTRALDFLAGS) +EXTRA_LDFLAGS ?= +LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go util/*.go LINTFLAGS ?= -all: buildah imgtype docs +all: bin/buildah bin/imgtype docs -.PHONY: static -static: $(SOURCES) - $(MAKE) SECURITYTAGS="$(SECURITYTAGS)" STORAGETAGS=$(STATIC_STORAGETAGS) EXTRALDFLAGS='-ldflags "-extldflags '-static'"' BUILDAH=buildah.static binary +nixpkgs: + @nix run -f channel:nixpkgs-unstable nix-prefetch-git -c nix-prefetch-git \ + --no-deepClone https://github.com/nixos/nixpkgs > nix/nixpkgs.json -.PHONY: binary -binary: $(SOURCES) - $(GO_BUILD) $(LDFLAGS) -o $(BUILDAH) $(BUILDFLAGS) ./cmd/buildah +.PHONY: bin/buildah +bin/buildah: $(SOURCES) + $(GO_BUILD) $(LDFLAGS) -o $@ $(BUILDFLAGS) ./cmd/buildah -buildah: binary +.PHONY: buildah +buildah: bin/buildah -darwin: - GOOS=darwin $(GO_BUILD) $(LDFLAGS) -o buildah.darwin -tags "containers_image_openpgp" ./cmd/buildah +.PHONY: bin/buildah.darwin +bin/buildah.darwin: + GOOS=darwin $(GO_BUILD) $(LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah -imgtype: *.go docker/*.go util/*.go tests/imgtype/imgtype.go - $(GO_BUILD) $(LDFLAGS) -o imgtype $(BUILDFLAGS) ./tests/imgtype/imgtype.go +.PHONY: bin/imgtype +bin/imgtype: *.go docker/*.go util/*.go tests/imgtype/imgtype.go + $(GO_BUILD) $(LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/imgtype/imgtype.go .PHONY: clean clean: - $(RM) -r buildah imgtype build buildah.static buildah.darwin tests/testreport/testreport + $(RM) -r bin tests/testreport/testreport $(MAKE) -C docs clean .PHONY: docs @@ -105,7 +108,7 @@ install.cni.sudo: gopath .PHONY: install install: - install -D -m0755 buildah $(DESTDIR)/$(BINDIR)/buildah + install -D -m0755 bin/buildah $(DESTDIR)/$(BINDIR)/buildah $(MAKE) -C docs install .PHONY: uninstall diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index b5f0993fa..8a96ed931 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -28,7 +28,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.15.0" + Version = "1.16.0-dev" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index daf5a4f19..15834256c 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -4,7 +4,7 @@ go 1.12 require ( github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 - github.com/containers/common v0.14.0 + github.com/containers/common v0.15.2 github.com/containers/image/v5 v5.5.1 github.com/containers/ocicrypt v1.0.2 github.com/containers/storage v1.20.2 @@ -21,11 +21,11 @@ require ( github.com/onsi/gomega v1.10.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 - github.com/opencontainers/runc v1.0.0-rc90 + github.com/opencontainers/runc v1.0.0-rc91 github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 github.com/opencontainers/runtime-tools v0.9.0 github.com/opencontainers/selinux v1.5.2 - github.com/openshift/imagebuilder v1.1.5 + github.com/openshift/imagebuilder v1.1.6 github.com/pkg/errors v0.9.1 github.com/seccomp/containers-golang v0.5.0 github.com/seccomp/libseccomp-golang v0.9.1 @@ -34,9 +34,9 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.6.1 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 - github.com/vishvananda/netlink v1.1.0 // indirect - go.etcd.io/bbolt v1.3.4 + go.etcd.io/bbolt v1.3.5 golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 + golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 ) diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index ece611305..718d44909 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -29,11 +29,14 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -48,17 +51,14 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc= github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containers/common v0.14.0 h1:hiZFDPf6ajKiDmojN5f5X3gboKPO73NLrYb0RXfrQiA= -github.com/containers/common v0.14.0/go.mod h1:9olhlE+WhYof1npnMJdyRMX14/yIUint6zyHzcyRVAg= -github.com/containers/image/v5 v5.4.4 h1:JSanNn3v/BMd3o0MEvO4R4OKNuoJUSzVGQAI1+0FMXE= -github.com/containers/image/v5 v5.4.4/go.mod h1:g7cxNXitiLi6pEr9/L9n/0wfazRuhDKXU15kV86N8h8= +github.com/containers/common v0.15.2 h1:KNNnSxeWRlghZPTVu07pjMWCRKvDObWykglf4ZFVDVI= +github.com/containers/common v0.15.2/go.mod h1:rhpXuGLTEKsk/xX/x0iKGHjRadMHpBd2ZiNDugwXPEM= github.com/containers/image/v5 v5.5.1 h1:h1FCOXH6Ux9/p/E4rndsQOC4yAdRU0msRTfLVeQ7FDQ= github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo= github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M= -github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ= github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk= github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -66,7 +66,10 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -111,6 +114,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -122,6 +127,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -166,13 +172,10 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc= -github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY= github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= @@ -196,6 +199,7 @@ github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJd github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -204,6 +208,7 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s= github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -229,12 +234,11 @@ github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA= github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc91 h1:Tp8LWs5G8rFpzTsbRjAtQkPVexhCu0bnANE5IfIhJ6g= +github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E= github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= @@ -245,8 +249,8 @@ github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWO github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q= github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= -github.com/openshift/imagebuilder v1.1.5 h1:WAIHV6cGF9e0AcLBA7RIi7XbFoB7R+e/MWu1I+1NUOM= -github.com/openshift/imagebuilder v1.1.5/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= +github.com/openshift/imagebuilder v1.1.6 h1:1+YzRxIIefY4QqtCImx6rg+75QrKNfBoPAKxgMo/khM= +github.com/openshift/imagebuilder v1.1.6/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -311,8 +315,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -327,10 +329,9 @@ github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= -github.com/vbauerster/mpb/v5 v5.0.4 h1:w7l/tJfHmtIOKZkU+bhbDZOUxj1kln9jy4DUOp3Tl14= -github.com/vbauerster/mpb/v5 v5.0.4/go.mod h1:fvzasBUyuo35UyuA6sSOlVhpLoNQsp2nBdHw7OiSUU8= github.com/vbauerster/mpb/v5 v5.2.2 h1:zIICVOm+XD+uV6crpSORaL6I0Q1WqOdvxZTp+r3L9cw= github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= @@ -350,6 +351,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -376,7 +379,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= @@ -406,13 +408,11 @@ golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index a9e0641c0..12dca36b9 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -177,6 +177,8 @@ type BuildOptions struct { // OciDecryptConfig contains the config that can be used to decrypt an image if it is // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig + // Jobs is the number of stages to run in parallel. If not specified it defaults to 1. + Jobs *int } // BuildDockerfiles parses a set of one or more Dockerfiles (which may be diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index a156313df..93b25887f 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -9,6 +9,7 @@ import ( "sort" "strconv" "strings" + "sync" "time" "github.com/containers/buildah" @@ -29,6 +30,7 @@ import ( "github.com/openshift/imagebuilder/dockerfile/parser" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" ) // builtinAllowedBuildArgs is list of built-in allowed build args. Normally we @@ -102,6 +104,11 @@ type Executor struct { maxPullPushRetries int retryPullPushDelay time.Duration ociDecryptConfig *encconfig.DecryptConfig + lastError error + terminatedStage map[string]struct{} + stagesLock sync.Mutex + stagesSemaphore *semaphore.Weighted + jobs int } // NewExecutor creates a new instance of the imagebuilder.Executor interface. @@ -139,7 +146,13 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod transientMounts = append([]Mount{Mount(mount)}, transientMounts...) } + jobs := 1 + if options.Jobs != nil { + jobs = *options.Jobs + } + exec := Executor{ + stages: make(map[string]*StageExecutor), store: store, contextDir: options.ContextDirectory, excludes: excludes, @@ -191,6 +204,8 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod maxPullPushRetries: options.MaxPullPushRetries, retryPullPushDelay: options.PullPushRetryDelay, ociDecryptConfig: options.OciDecryptConfig, + terminatedStage: make(map[string]struct{}), + jobs: jobs, } if exec.err == nil { exec.err = os.Stderr @@ -236,10 +251,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod // startStage creates a new stage executor that will be referenced whenever a // COPY or ADD statement uses a --from=NAME flag. -func (b *Executor) startStage(stage *imagebuilder.Stage, stages int, output string) *StageExecutor { - if b.stages == nil { - b.stages = make(map[string]*StageExecutor) - } +func (b *Executor) startStage(stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor { stageExec := &StageExecutor{ executor: b, index: stage.Position, @@ -277,6 +289,35 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e return imageRef, nil } +func (b *Executor) waitForStage(ctx context.Context, name string) error { + stage := b.stages[name] + if stage == nil { + return errors.Errorf("unknown stage %q", name) + } + for { + if b.lastError != nil { + return b.lastError + } + if stage.stage == nil { + return nil + } + + b.stagesLock.Lock() + _, terminated := b.terminatedStage[name] + b.stagesLock.Unlock() + + if terminated { + return nil + } + + b.stagesSemaphore.Release(1) + time.Sleep(time.Millisecond * 10) + if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil { + return errors.Wrapf(err, "error reacquiring job semaphore") + } + } +} + // getImageHistory returns the history of imageID. func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.History, error) { imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID) @@ -295,6 +336,55 @@ func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.Hi return oci.History, nil } +func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) { + stage := stages[stageIndex] + ib := stage.Builder + node := stage.Node + base, err := ib.From(node) + + // If this is the last stage, then the image that we produce at + // its end should be given the desired output name. + output := "" + if stageIndex == len(stages)-1 { + output = b.output + } + + if err != nil { + logrus.Debugf("Build(node.Children=%#v)", node.Children) + return "", nil, err + } + + b.stagesLock.Lock() + stageExecutor := b.startStage(&stage, stages, output) + b.stagesLock.Unlock() + + // If this a single-layer build, or if it's a multi-layered + // build and b.forceRmIntermediateCtrs is set, make sure we + // remove the intermediate/build containers, regardless of + // whether or not the stage's build fails. + if b.forceRmIntermediateCtrs || !b.layers { + b.stagesLock.Lock() + cleanupStages[stage.Position] = stageExecutor + b.stagesLock.Unlock() + } + + // Build this stage. + if imageID, ref, err = stageExecutor.Execute(ctx, base); err != nil { + return "", nil, err + } + + // The stage succeeded, so remove its build container if we're + // told to delete successful intermediate/build containers for + // multi-layered builds. + if b.removeIntermediateCtrs { + b.stagesLock.Lock() + cleanupStages[stage.Position] = stageExecutor + b.stagesLock.Unlock() + } + + return imageID, ref, nil +} + // Build takes care of the details of running Prepare/Execute/Commit/Delete // over each of the one or more parsed Dockerfiles and stages. func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) { @@ -314,12 +404,16 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // Clean up any containers associated with the final container // built by a stage, for stages that succeeded, since we no // longer need their filesystem contents. + + b.stagesLock.Lock() for _, stage := range cleanupStages { if err := stage.Delete(); err != nil { logrus.Debugf("Failed to cleanup stage containers: %v", err) lastErr = err } } + b.stagesLock.Unlock() + cleanupStages = nil // Clean up any builders that we used to get data from images. for _, builder := range b.containerMap { @@ -404,61 +498,87 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } } - // Run through the build stages, one at a time. - for stageIndex, stage := range stages { - var lastErr error + type Result struct { + Index int + ImageID string + Ref reference.Canonical + Error error + } - ib := stage.Builder - node := stage.Node - base, err := ib.From(node) - if err != nil { - logrus.Debugf("Build(node.Children=%#v)", node.Children) - return "", nil, err - } + ch := make(chan Result) - // If this is the last stage, then the image that we produce at - // its end should be given the desired output name. - output := "" - if stageIndex == len(stages)-1 { - output = b.output - } + jobs := int64(b.jobs) + if jobs < 0 { + return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer") + } else if jobs == 0 { + jobs = int64(len(stages)) + } - stageExecutor := b.startStage(&stage, len(stages), output) + b.stagesSemaphore = semaphore.NewWeighted(jobs) - // If this a single-layer build, or if it's a multi-layered - // build and b.forceRmIntermediateCtrs is set, make sure we - // remove the intermediate/build containers, regardless of - // whether or not the stage's build fails. - if b.forceRmIntermediateCtrs || !b.layers { - cleanupStages[stage.Position] = stageExecutor - } + var wg sync.WaitGroup + wg.Add(len(stages)) - // Build this stage. - if imageID, ref, err = stageExecutor.Execute(ctx, base); err != nil { - lastErr = err - } - if lastErr != nil { - return "", nil, lastErr + go func() { + for stageIndex := range stages { + index := stageIndex + // Acquire the semaphore before creating the goroutine so we are sure they + // run in the specified order. + if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil { + b.lastError = err + return + } + go func() { + defer b.stagesSemaphore.Release(1) + defer wg.Done() + imageID, ref, err = b.buildStage(ctx, cleanupStages, stages, index) + if err != nil { + ch <- Result{ + Index: index, + Error: err, + } + return + } + + ch <- Result{ + Index: index, + ImageID: imageID, + Ref: ref, + Error: nil, + } + }() } + }() + go func() { + wg.Wait() + close(ch) + }() - // The stage succeeded, so remove its build container if we're - // told to delete successful intermediate/build containers for - // multi-layered builds. - if b.removeIntermediateCtrs { - cleanupStages[stage.Position] = stageExecutor + for r := range ch { + stage := stages[r.Index] + + b.stagesLock.Lock() + b.terminatedStage[stage.Name] = struct{}{} + b.stagesLock.Unlock() + + if r.Error != nil { + b.lastError = r.Error + return "", nil, r.Error } // If this is an intermediate stage, make a note of the ID, so // that we can look it up later. - if stageIndex < len(stages)-1 && imageID != "" { - b.imageMap[stage.Name] = imageID + if r.Index < len(stages)-1 && r.ImageID != "" { + b.imageMap[stage.Name] = r.ImageID // We're not populating the cache with intermediate // images, so add this one to the list of images that // we'll remove later. if !b.layers { - cleanupImages = append(cleanupImages, imageID) + cleanupImages = append(cleanupImages, r.ImageID) } - imageID = "" + } + if r.Index == len(stages)-1 { + imageID = r.ImageID } } diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 7ba5e2e96..9dc5c1b97 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -46,7 +46,7 @@ import ( type StageExecutor struct { executor *Executor index int - stages int + stages imagebuilder.Stages name string builder *buildah.Builder preserved int @@ -753,16 +753,23 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, stage := s.stage ib := stage.Builder checkForLayers := s.executor.layers && s.executor.useCache - moreStages := s.index < s.stages-1 + moreStages := s.index < len(s.stages)-1 lastStage := !moreStages imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)]) rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)]) // If the base image's name corresponds to the result of an earlier - // stage, substitute that image's ID for the base image's name here. - // If not, then go on assuming that it's just a regular image that's - // either in local storage, or one that we have to pull from a - // registry. + // stage, make sure that stage has finished building an image, and + // substitute that image's ID for the base image's name here. If not, + // then go on assuming that it's just a regular image that's either in + // local storage, or one that we have to pull from a registry. + for _, previousStage := range s.stages[:s.index] { + if previousStage.Name == base { + if err := s.executor.waitForStage(ctx, previousStage.Name); err != nil { + return "", nil, err + } + } + } if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage { base = stageImage } @@ -870,6 +877,9 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command) } if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index { + if err := s.executor.waitForStage(ctx, otherStage.name); err != nil { + return "", nil, err + } mountPoint = otherStage.mountPoint } else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil { return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1]) @@ -1230,8 +1240,12 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer } s.builder.SetHostname(config.Hostname) s.builder.SetDomainname(config.Domainname) - s.builder.SetArchitecture(s.executor.architecture) - s.builder.SetOS(s.executor.os) + if s.executor.architecture != "" { + s.builder.SetArchitecture(s.executor.architecture) + } + if s.executor.os != "" { + s.builder.SetOS(s.executor.os) + } s.builder.SetUser(config.User) s.builder.ClearPorts() for p := range config.ExposedPorts { diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index 1a457f34c..977013a39 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -80,6 +80,7 @@ type BudResults struct { Tag []string Target string TLSVerify bool + Jobs int } // FromAndBugResults represents the results for common flags @@ -182,6 +183,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet { fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image") fs.StringVar(&flags.Target, "target", "", "set the target build stage to build") fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") + fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel") return fs } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 3b7d7587f..4bb760683 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -126,15 +126,15 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) { commonOpts := &buildah.CommonBuildOptions{ AddHost: addHost, - CgroupParent: c.Flag("cgroup-parent").Value.String(), CPUPeriod: cpuPeriod, CPUQuota: cpuQuota, CPUSetCPUs: c.Flag("cpuset-cpus").Value.String(), CPUSetMems: c.Flag("cpuset-mems").Value.String(), CPUShares: cpuShares, + CgroupParent: c.Flag("cgroup-parent").Value.String(), + DNSOptions: dnsOptions, DNSSearch: dnsSearch, DNSServers: dnsServers, - DNSOptions: dnsOptions, HTTPProxy: httpProxy, Memory: memoryLimit, MemorySwap: memorySwap, diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go index 0a6d44195..880fbf674 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go @@ -44,7 +44,7 @@ func DeviceFromPath(device string) ([]configs.Device, error) { } for _, d := range srcDevices { d.Path = filepath.Join(dst, filepath.Base(d.Path)) - d.Permissions = permissions + d.Permissions = configs.DevicePermissions(permissions) devs = append(devs, *d) } return devs, nil diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index fd2597d62..d419bb082 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -139,7 +139,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { GID: &d.Gid, } g.AddDevice(sDev) - g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, d.Permissions) + g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, string(d.Permissions)) } setupMaskedPaths(g) diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index c652a66f2..80c478505 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -165,9 +165,12 @@ type ContainersConfig struct { // ShmSize holds the size of /dev/shm. ShmSize string `toml:"shm_size,omitempty"` - //TZ sets the timezone inside the container + // TZ sets the timezone inside the container TZ string `toml:"tz,omitempty"` + // Umask is the umask inside the container. + Umask string `toml:"umask,omitempty"` + // UTSNS indicates how to create a UTS namespace for the container UTSNS string `toml:"utsns,omitempty"` @@ -198,7 +201,7 @@ type EngineConfig struct { // The first path pointing to a valid file will be used. ConmonPath []string `toml:"conmon_path,omitempty"` - //DetachKeys is the sequence of keys used to detach a container. + // DetachKeys is the sequence of keys used to detach a container. DetachKeys string `toml:"detach_keys,omitempty"` // EnablePortReservation determines whether engine will reserve ports on the @@ -272,12 +275,20 @@ type EngineConfig struct { // Indicates whether the application should be running in Remote mode Remote bool `toml:"-"` + // RemoteURI is deprecated, see ActiveService // RemoteURI containers connection information used to connect to remote system. RemoteURI string `toml:"remote_uri,omitempty"` - // Identity key file for RemoteURI + // RemoteIdentity is deprecated, ServiceDestinations + // RemoteIdentity key file for RemoteURI RemoteIdentity string `toml:"remote_identity,omitempty"` + // ActiveService index to Destinations added v2.0.3 + ActiveService string `toml:"active_service,omitempty"` + + // Destinations mapped by service Names + ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` + // RuntimePath is the path to OCI runtime binary for launching containers. // The first path pointing to a valid file will be used This is used only // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be @@ -393,6 +404,15 @@ type NetworkConfig struct { NetworkConfigDir string `toml:"network_config_dir,omitempty"` } +// Destination represents destination for remote service +type Destination struct { + // URI, required. Example: ssh://root@example.com:22/run/podman/podman.sock + URI string `toml:"uri"` + + // Identity file with ssh key, optional + Identity string `toml:"identity,omitempty"` +} + // NewConfig creates a new Config. It starts with an empty config and, if // specified, merges the config at `userConfigPath` path. Depending if we're // running as root or rootless, we then merge the system configuration followed @@ -582,6 +602,10 @@ func (c *ContainersConfig) Validate() error { return err } + if err := c.validateUmask(); err != nil { + return err + } + if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize { return fmt.Errorf("log size max should be negative or >= %d", OCIBufSize) } @@ -598,9 +622,17 @@ func (c *ContainersConfig) Validate() error { // execution checks. It returns an `error` on validation failure, otherwise // `nil`. func (c *NetworkConfig) Validate() error { - if c.NetworkConfigDir != _cniConfigDir { - err := isDirectory(c.NetworkConfigDir) + expectedConfigDir := _cniConfigDir + if unshare.IsRootless() { + home, err := unshare.HomeDir() if err != nil { + return err + } + expectedConfigDir = filepath.Join(home, _cniConfigDirRootless) + } + if c.NetworkConfigDir != expectedConfigDir { + err := isDirectory(c.NetworkConfigDir) + if err != nil && !os.IsNotExist(err) { return errors.Wrapf(err, "invalid network_config_dir: %s", c.NetworkConfigDir) } } @@ -828,9 +860,9 @@ func stringsEq(a, b []string) bool { } var ( - configOnce sync.Once - configErr error - config *Config + configErr error + configMutex sync.Mutex + config *Config ) // Default returns the default container config. @@ -845,9 +877,16 @@ var ( // The system defaults container config files can be overwritten using the // CONTAINERS_CONF environment variable. This is usually done for testing. func Default() (*Config, error) { - configOnce.Do(func() { - config, configErr = NewConfig("") - }) + configMutex.Lock() + defer configMutex.Unlock() + if config != nil || configErr != nil { + return config, configErr + } + return defConfig() +} + +func defConfig() (*Config, error) { + config, configErr = NewConfig("") return config, configErr } @@ -879,8 +918,8 @@ func customConfigFile() (string, error) { return OverrideContainersConfig, nil } -//ReadCustomConfig reads the custom config and only generates a config based on it -//If the custom config file does not exists, function will return an empty config +// ReadCustomConfig reads the custom config and only generates a config based on it +// If the custom config file does not exists, function will return an empty config func ReadCustomConfig() (*Config, error) { path, err := customConfigFile() if err != nil { @@ -936,3 +975,34 @@ func (c *Config) Write() error { } return nil } + +// Reload clean the cached config and reloads the configuration from containers.conf files +// This function is meant to be used for long-running processes that need to reload potential changes made to +// the cached containers.conf files. +func Reload() (*Config, error) { + configMutex.Lock() + defer configMutex.Unlock() + return defConfig() +} + +func (c *Config) ActiveDestination() (string, string, error) { + if uri, found := os.LookupEnv("CONTAINER_HOST"); found { + var ident string + if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found { + ident = v + } + return uri, ident, nil + } + + switch { + case c.Engine.ActiveService != "": + d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService] + if !found { + return "", "", errors.Errorf("%q service destination not found", c.Engine.ActiveService) + } + return d.URI, d.Identity, nil + case c.Engine.RemoteURI != "": + return c.Engine.RemoteURI, c.Engine.RemoteIdentity, nil + } + return "", "", errors.New("no service destination configured") +} diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index a6ab33c50..282eb80b7 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "path/filepath" + "regexp" "syscall" units "github.com/docker/go-units" @@ -88,6 +89,14 @@ func (c *ContainersConfig) validateTZ() error { return nil } +func (c *ContainersConfig) validateUmask() error { + validUmask := regexp.MustCompile(`^[0-7]{1,4}$`) + if !validUmask.MatchString(c.Umask) { + return fmt.Errorf("Not a valid Umask %s", c.Umask) + } + return nil +} + func isRemote() bool { return false } diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go index 61dd159ad..7fd9202bb 100644 --- a/vendor/github.com/containers/common/pkg/config/config_remote.go +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -27,3 +27,7 @@ func (c *ContainersConfig) validateUlimits() error { func (c *ContainersConfig) validateTZ() error { return nil } + +func (c *ContainersConfig) validateUmask() error { + return nil +} diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 80afbb9bc..780df2a22 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -210,6 +210,10 @@ # # tz = "" +# Set umask inside the container +# +# umask="0022" + # Default way to to create a UTS namespace for the container # Options are: # `private` Create private UTS Namespace for the container. @@ -340,14 +344,6 @@ # Whether to pull new image before running a container # pull_policy = "missing" -# Default Remote URI to access the Podman service. -# Examples: -# rootless "unix://run/user/$UID/podman/podman.sock" (Default) -# rootfull "unix://run/podman/podman.sock.(Default) -# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock -# remote rootfull ssh://root@10.10.1.136:22/run/podman/podman.sock -# remote_uri= "" - # Directory for persistent engine files (database, etc) # By default, this will be configured relative to where the containers/storage # stores containers @@ -386,6 +382,22 @@ # Number of seconds to wait for container to exit before sending kill signal. # stop_timeout = 10 +# Index to the active service +# active_service = production + +# map of service destinations +# [service_destinations] +# [service_destinations.production] +# URI to access the Podman service +# Examples: +# rootless "unix://run/user/$UID/podman/podman.sock" (Default) +# rootfull "unix://run/podman/podman.sock (Default) +# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock +# remote rootfull ssh://root@10.10.1.136:22/run/podman/podman.sock +# uri="ssh://user@production.example.com/run/user/1001/podman/podman.sock" +# Path to file containing ssh identity key +# identity = "~/.ssh/id_rsa" + # Paths to look for a valid OCI runtime (runc, runv, kata, etc) [engine.runtimes] # runc = [ diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 60811637f..57b703f53 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -11,7 +11,6 @@ import ( "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" - "github.com/containers/common/pkg/sysinfo" "github.com/containers/storage" "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/selinux/go-selinux" @@ -93,8 +92,10 @@ const ( // InstallPrefix is the prefix where podman will be installed. // It can be overridden at build time. _installPrefix = "/usr" - // _cniConfigDir is the directory where cni plugins are found + // _cniConfigDir is the directory where cni configuration is found _cniConfigDir = "/etc/cni/net.d/" + // _cniConfigDirRootless is the directory where cni plugins are found + _cniConfigDirRootless = ".config/cni/net.d/" // CgroupfsCgroupsManager represents cgroupfs native cgroup manager CgroupfsCgroupsManager = "cgroupfs" // DefaultApparmorProfile specifies the default apparmor profile for the container. @@ -139,6 +140,8 @@ func DefaultConfig() (*Config, error) { netns := "bridge" + cniConfig := _cniConfigDir + defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath if unshare.IsRootless() { home, err := unshare.HomeDir() @@ -153,6 +156,7 @@ func DefaultConfig() (*Config, error) { } } netns = "slirp4netns" + cniConfig = filepath.Join(home, _cniConfigDirRootless) } cgroupNS := "host" @@ -192,13 +196,14 @@ func DefaultConfig() (*Config, error) { SeccompProfile: SeccompDefaultPath, ShmSize: DefaultShmSize, TZ: "", + Umask: "0022", UTSNS: "private", UserNS: "host", UserNSSize: DefaultUserNSSize, }, Network: NetworkConfig{ DefaultNetwork: "podman", - NetworkConfigDir: _cniConfigDir, + NetworkConfigDir: cniConfig, CNIPluginDirs: cniBinDir, }, Engine: *defaultEngineConfig, @@ -484,15 +489,16 @@ func (c *Config) Ulimits() []string { // PidsLimit returns the default maximum number of pids to use in containers func (c *Config) PidsLimit() int64 { if unshare.IsRootless() { - if c.Engine.CgroupManager == SystemdCgroupsManager { - cgroup2, _ := cgroupv2.Enabled() - if cgroup2 { - return c.Containers.PidsLimit - } + if c.Engine.CgroupManager != SystemdCgroupsManager { + return 0 + } + cgroup2, _ := cgroupv2.Enabled() + if !cgroup2 { return 0 } } - return sysinfo.GetDefaultPidsLimit() + + return c.Containers.PidsLimit } // DetachKeys returns the default detach keys to detach from a container @@ -504,3 +510,7 @@ func (c *Config) DetachKeys() string { func (c *Config) TZ() string { return c.Containers.TZ } + +func (c *Config) Umask() string { + return c.Containers.Umask +} diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go index e02f52192..02e5c4ac2 100644 --- a/vendor/github.com/containers/common/pkg/config/systemd.go +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -2,7 +2,17 @@ package config +import ( + "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/storage/pkg/unshare" +) + func defaultCgroupManager() string { + enabled, err := cgroupv2.Enabled() + if err == nil && !enabled && unshare.IsRootless() { + return CgroupfsCgroupsManager + } + return SystemdCgroupsManager } func defaultEventsLogger() string { diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index bbbca741c..6b226eabe 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.15.1" +const Version = "0.18.0" diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 15bf47baf..ce99cb0a5 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -19,12 +19,12 @@ env: ### FEDORA_NAME: "fedora-32" PRIOR_FEDORA_NAME: "fedora-31" - UBUNTU_NAME: "ubuntu-19" - PRIOR_UBUNTU_NAME: "ubuntu-18" + UBUNTU_NAME: "ubuntu-20" + PRIOR_UBUNTU_NAME: "ubuntu-19" # GCE project where images live IMAGE_PROJECT: "libpod-218412" - _BUILT_IMAGE_SUFFIX: "libpod-6224667180531712" # From the packer output of 'build_vm_images_script' + _BUILT_IMAGE_SUFFIX: "libpod-6508632441356288" FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}" @@ -104,7 +104,7 @@ lint_task: env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: - image: golang:1.12 + image: golang:1.13 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -140,7 +140,7 @@ meta_task: vendor_task: container: - image: golang:1.13 + image: golang:1.14 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 769e37e15..0369d0b1e 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.20.2 +1.21.2 diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go index 7553a93eb..5bfbb49e2 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -23,6 +23,7 @@ type directLVMConfig struct { ThinpMetaPercent uint64 AutoExtendPercent uint64 AutoExtendThreshold uint64 + MetaDataSize string } var ( @@ -121,15 +122,19 @@ func checkDevHasFS(dev string) error { } func verifyBlockDevice(dev string, force bool) error { - realPath, err := filepath.Abs(dev) + absPath, err := filepath.Abs(dev) if err != nil { return errors.Errorf("unable to get absolute path for %s: %s", dev, err) } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + realPath, err := filepath.EvalSymlinks(absPath) + if err != nil { return errors.Errorf("failed to canonicalise path for %s: %s", dev, err) } - if err := checkDevAvailable(realPath); err != nil { - return err + if err := checkDevAvailable(absPath); err != nil { + logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath) + if err := checkDevAvailable(realPath); err != nil { + return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath) + } } if err := checkDevInVG(realPath); err != nil { return err @@ -205,8 +210,11 @@ func setupDirectLVM(cfg directLVMConfig) error { if cfg.ThinpMetaPercent == 0 { cfg.ThinpMetaPercent = 1 } + if cfg.MetaDataSize == "" { + cfg.MetaDataSize = "128k" + } - out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput() + out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput() if err != nil { return errors.Wrap(err, string(out)) } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index ff6e297f4..cba3d05ea 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -101,6 +101,7 @@ type DeviceSet struct { // Options dataLoopbackSize int64 + metaDataSize string metaDataLoopbackSize int64 baseFsSize uint64 filesystem string @@ -1748,7 +1749,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { // - Managed by container storage // - The target of this device is at major <maj> and minor <min> // - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino) + devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino) logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) // Check for the existence of the thin-pool device @@ -2708,6 +2709,8 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ devices.mountOptions = joinMountOptions(devices.mountOptions, val) case "dm.metadatadev": devices.metadataDevice = val + case "dm.metadata_size": + devices.metaDataSize = val case "dm.datadev": devices.dataDevice = val case "dm.thinpooldev": @@ -2743,6 +2746,8 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ return nil, err } + case "dm.metaDataSize": + lvmSetupConfig.MetaDataSize = val case "dm.min_free_space": if !strings.HasSuffix(val, "%") { return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 930a57a97..fc7010645 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -892,19 +892,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } - // If the lowers list is still empty, use an empty lower so that we can still force an - // SELinux context for the mount. - - // if we are doing a readOnly mount, and there is only one lower - // We should just return the lower directory, no reason to mount. - if !readWrite && d.options.mountProgram == "" { - if len(absLowers) == 0 { - return path.Join(dir, "empty"), nil - } - if len(absLowers) == 1 { - return absLowers[0], nil - } - } if len(absLowers) == 0 { absLowers = append(absLowers, path.Join(dir, "empty")) relLowers = append(relLowers, path.Join(id, "empty")) diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index 01ac1827b..36101194e 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -5,24 +5,24 @@ require ( github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 github.com/Microsoft/hcsshim v0.8.9 github.com/docker/go-units v0.4.0 - github.com/hashicorp/go-multierror v1.0.0 - github.com/klauspost/compress v1.10.7 + github.com/hashicorp/go-multierror v1.1.0 + github.com/klauspost/compress v1.10.10 github.com/klauspost/pgzip v1.2.4 github.com/mattn/go-shellwords v1.0.10 github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/runc v1.0.0-rc90 - github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 - github.com/opencontainers/selinux v1.5.2 + github.com/opencontainers/runc v1.0.0-rc91 + github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 + github.com/opencontainers/selinux v1.6.0 github.com/pkg/errors v0.9.1 github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 github.com/sirupsen/logrus v1.6.0 - github.com/stretchr/testify v1.6.0 + github.com/stretchr/testify v1.6.1 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/tchap/go-patricia v2.3.0+incompatible github.com/vbatts/tar-split v0.11.1 golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 - golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 + golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 gotest.tools v2.2.0+incompatible ) diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index 2b5a279c6..d9b0d6396 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -5,11 +5,17 @@ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6tr github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/checkpoint-restore/go-criu/v4 v4.0.2 h1:jt+rnBIhFtPw0fhtpYGcUOilh4aO9Hj7r+YLEtf30uA= +github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3 h1:qcqzLJa2xCo9sgdCzpT/SJSYxROTEstuhf7ZBHMirms= +github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v1.0.0 h1:fU3UuQapBs+zLJu82NhR11Rif1ny2zfMMAyPJzSN5tQ= +github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -18,6 +24,12 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -25,6 +37,8 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -34,20 +48,22 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I= +github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -62,16 +78,23 @@ github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvO github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI= +github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA= -github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc91 h1:Tp8LWs5G8rFpzTsbRjAtQkPVexhCu0bnANE5IfIhJ6g= +github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q= -github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E= +github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -80,6 +103,12 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= @@ -89,16 +118,24 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho= -github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -124,10 +161,14 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE= +golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -137,6 +178,8 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index a8ebf9e1e..dc21f75fd 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -772,7 +772,20 @@ func (r *layerStore) Mounted(id string) (int, error) { } func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { - if !r.IsReadWrite() { + + // check whether options include ro option + hasReadOnlyOpt := func(opts []string) bool { + for _, item := range opts { + if item == "ro" { + return true + } + } + return false + } + + // You are not allowed to mount layers from readonly stores if they + // are not mounted read/only. + if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) { return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) } r.mountsLockfile.Lock() @@ -1000,6 +1013,7 @@ func (r *layerStore) deleteInternal(id string) error { if layer.MountPoint != "" { delete(r.bymount, layer.MountPoint) } + r.deleteInDigestMap(id) toDeleteIndex := -1 for i, candidate := range r.layers { if candidate.ID == id { @@ -1031,6 +1045,27 @@ func (r *layerStore) deleteInternal(id string) error { return err } +func (r *layerStore) deleteInDigestMap(id string) { + for digest, layers := range r.bycompressedsum { + for i, layerID := range layers { + if layerID == id { + layers = append(layers[:i], layers[i+1:]...) + r.bycompressedsum[digest] = layers + break + } + } + } + for digest, layers := range r.byuncompressedsum { + for i, layerID := range layers { + if layerID == id { + layers = append(layers[:i], layers[i+1:]...) + r.byuncompressedsum[digest] = layers + break + } + } + } +} + func (r *layerStore) Delete(id string) error { layer, ok := r.lookup(id) if !ok { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index bf819a801..78744e0f3 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -390,16 +390,20 @@ func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { return mode } -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header +// ReadSecurityXattrToTarHeader reads security.capability, security,image +// xattrs from filesystem to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - capability, err := system.Lgetxattr(path, "security.capability") - if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform { - return err - } - if capability != nil { + if hdr.Xattrs == nil { hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) + } + for _, xattr := range []string{"security.capability", "security.ima"} { + capability, err := system.Lgetxattr(path, xattr) + if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform { + return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path) + } + if capability != nil { + hdr.Xattrs[xattr] = string(capability) + } } return nil } diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index 4a35997ea..c2fa2109e 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -39,6 +39,10 @@ type ThinpoolOptionsConfig struct { // log_level sets the log level of devicemapper. LogLevel string `toml:"log_level"` + // MetadataSize specifies the size of the metadata for the thinpool + // It will be used with the `pvcreate --metadata` option. + MetadataSize string `toml:"metadatasize"` + // MinFreeSpace specifies the min free space percent in a thin pool // require for new device creation to MinFreeSpace string `toml:"min_free_space"` @@ -218,6 +222,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { if options.Thinpool.LogLevel != "" { doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel)) } + if options.Thinpool.MetadataSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize)) + } if options.Thinpool.MinFreeSpace != "" { doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace)) } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 6429d6254..35104b2fd 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -35,9 +35,9 @@ type lockfile struct { // necessary. func openLock(path string, ro bool) (int, error) { if ro { - return unix.Open(path, os.O_RDONLY, 0) + return unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC, 0) } - return unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) + return unix.Open(path, os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH) } // createLockerForPath returns a Locker object, possibly (depending on the platform) @@ -106,7 +106,6 @@ func (l *lockfile) lock(lType int16, recursive bool) { if err != nil { panic(fmt.Sprintf("error opening %q: %v", l.file, err)) } - unix.CloseOnExec(fd) l.fd = uintptr(fd) // Optimization: only use the (expensive) fcntl syscall when diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go index 1eff82e8e..a08fb674d 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go @@ -4,19 +4,30 @@ import ( "fmt" "os" "os/user" + "sync" "github.com/pkg/errors" ) +var ( + homeDirOnce sync.Once + homeDirErr error + homeDir string +) + // HomeDir returns the home directory for the current user. func HomeDir() (string, error) { - home := os.Getenv("HOME") - if home == "" { - usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) - if err != nil { - return "", errors.Wrapf(err, "unable to resolve HOME directory") + homeDirOnce.Do(func() { + home := os.Getenv("HOME") + if home == "" { + usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) + if err != nil { + homeDir, homeDirErr = "", errors.Wrapf(err, "unable to resolve HOME directory") + return + } + homeDir, homeDirErr = usr.HomeDir, nil } - home = usr.HomeDir - } - return home, nil + homeDir, homeDirErr = home, nil + }) + return homeDir, homeDirErr } diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 19909e9c6..a8ec9d98b 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -67,7 +67,7 @@ additionalimagestores = [ # squashed down to the default uid in the container. These images will have no # separation between the users in the container. Only supported for the overlay # and vfs drivers. -#ignore_chown_errors = false +#ignore_chown_errors = "false" # Path to an helper program to use for mounting the file system instead of mounting it # directly. @@ -132,6 +132,10 @@ mountopt = "nodev" # device. # mkfsarg = "" +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + # Size is used to set a maximum size of the container image. # size = "" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 263f4edb2..56e1e545b 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -265,6 +265,15 @@ type Store interface { // Wipe removes all known layers, images, and containers. Wipe() error + // MountImage mounts an image to temp directory and returns the mount point. + // MountImage allows caller to mount an image. Images will always + // be mounted read/only + MountImage(id string, mountOptions []string, mountLabel string) (string, error) + + // Unmount attempts to unmount an image, given an ID. + // Returns whether or not the layer is still mounted. + UnmountImage(id string, force bool) (bool, error) + // Mount attempts to mount a layer, image, or container for access, and // returns the pathname if it succeeds. // Note if the mountLabel == "", the default label for the container @@ -346,6 +355,9 @@ type Store interface { // Names returns the list of names for a layer, image, or container. Names(id string) ([]string, error) + // Free removes the store from the list of stores + Free() + // SetNames changes the list of names for a layer, image, or container. // Duplicate names are removed from the list automatically. SetNames(id string, names []string) error @@ -2591,25 +2603,11 @@ func (s *store) Version() ([][2]string, error) { return [][2]string{}, nil } -func (s *store) Mount(id, mountLabel string) (string, error) { - container, err := s.Container(id) - var ( - uidMap, gidMap []idtools.IDMap - mountOpts []string - ) - if err == nil { - uidMap, gidMap = container.UIDMap, container.GIDMap - id = container.LayerID - mountOpts = container.MountOpts() - } +func (s *store) mount(id string, options drivers.MountOpts) (string, error) { rlstore, err := s.LayerStore() if err != nil { return "", err } - - s.graphLock.Lock() - defer s.graphLock.Unlock() - rlstore.Lock() defer rlstore.Unlock() if modified, err := rlstore.Modified(); modified || err != nil { @@ -2630,17 +2628,49 @@ func (s *store) Mount(id, mountLabel string) (string, error) { } if rlstore.Exists(id) { - options := drivers.MountOpts{ - MountLabel: mountLabel, - UidMaps: uidMap, - GidMaps: gidMap, - Options: mountOpts, - } return rlstore.Mount(id, options) } return "", ErrLayerUnknown } +func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) { + // Append ReadOnly option to mountOptions + img, err := s.Image(id) + if err != nil { + return "", err + } + + if err := validateMountOptions(mountOpts); err != nil { + return "", err + } + options := drivers.MountOpts{ + MountLabel: mountLabel, + Options: append(mountOpts, "ro"), + } + + return s.mount(img.TopLayer, options) +} + +func (s *store) Mount(id, mountLabel string) (string, error) { + container, err := s.Container(id) + var ( + uidMap, gidMap []idtools.IDMap + mountOpts []string + ) + if err == nil { + uidMap, gidMap = container.UIDMap, container.GIDMap + id = container.LayerID + mountOpts = container.MountOpts() + } + options := drivers.MountOpts{ + MountLabel: mountLabel, + UidMaps: uidMap, + GidMaps: gidMap, + Options: mountOpts, + } + return s.mount(id, options) +} + func (s *store) Mounted(id string) (int, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID @@ -2660,6 +2690,14 @@ func (s *store) Mounted(id string) (int, error) { return rlstore.Mounted(id) } +func (s *store) UnmountImage(id string, force bool) (bool, error) { + img, err := s.Image(id) + if err != nil { + return false, err + } + return s.Unmount(img.TopLayer, force) +} + func (s *store) Unmount(id string, force bool) (bool, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID @@ -3601,3 +3639,13 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro } return nil, nil } + +// Free removes the store from the list of stores +func (s *store) Free() { + for i := 0; i < len(stores); i++ { + if stores[i] == s { + stores = append(stores[:i], stores[i+1:]...) + return + } + } +} diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index 34ff6a77a..e2b56da2f 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -229,7 +229,7 @@ func subtractHostIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDMap { case used.HostID <= avail.HostID && used.HostID+used.Size >= avail.HostID+avail.Size: return nil case used.HostID <= avail.HostID && used.HostID+used.Size > avail.HostID && used.HostID+used.Size < avail.HostID+avail.Size: - newContainerID := used.HostID + used.Size + newContainerID := avail.ContainerID + used.Size newHostID := used.HostID + used.Size r := idtools.IDMap{ ContainerID: newContainerID, @@ -275,7 +275,7 @@ func subtractContainerIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDM return nil case used.ContainerID <= avail.ContainerID && used.ContainerID+used.Size > avail.ContainerID && used.ContainerID+used.Size < avail.ContainerID+avail.Size: newContainerID := used.ContainerID + used.Size - newHostID := used.HostID + used.Size + newHostID := avail.HostID + used.Size r := idtools.IDMap{ ContainerID: newContainerID, HostID: newHostID, @@ -297,7 +297,7 @@ func subtractContainerIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDM } r2 := idtools.IDMap{ ContainerID: used.ContainerID + used.Size, - HostID: used.HostID + used.Size, + HostID: avail.HostID + used.Size, Size: avail.ContainerID + avail.Size - used.ContainerID - used.Size, } return []idtools.IDMap{r1, r2} diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 235bf6d41..101f5cc7a 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "io/ioutil" "os" "os/exec" "os/user" @@ -82,31 +83,92 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) { return path, nil } -func getRootlessRuntimeDir(rootlessUID int) (string, error) { - runtimeDir, err := homedir.GetRuntimeDir() +type rootlessRuntimeDirEnvironment interface { + getProcCommandFile() string + getRunUserDir() string + getTmpPerUserDir() string + + homeDirGetRuntimeDir() (string, error) + systemLstat(string) (*system.StatT, error) + homedirGet() string +} + +type rootlessRuntimeDirEnvironmentImplementation struct { + procCommandFile string + runUserDir string + tmpPerUserDir string +} + +func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string { + return env.procCommandFile +} +func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string { + return env.runUserDir +} +func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string { + return env.tmpPerUserDir +} +func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) { + return homedir.GetRuntimeDir() +} +func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) { + return system.Lstat(path) +} +func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string { + return homedir.Get() +} + +func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool { + st, err := env.systemLstat(dir) + return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 +} + +// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing. +// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function. +func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) { + runtimeDir, err := env.homeDirGetRuntimeDir() if err == nil { return runtimeDir, nil } - tmpDir := fmt.Sprintf("/run/user/%d", rootlessUID) - st, err := system.Stat(tmpDir) - if err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 { - return tmpDir, nil + + initCommand, err := ioutil.ReadFile(env.getProcCommandFile()) + if err != nil || string(initCommand) == "systemd" { + runUserDir := env.getRunUserDir() + if isRootlessRuntimeDirOwner(runUserDir, env) { + return runUserDir, nil + } } - tmpDir = fmt.Sprintf("%s/%d", os.TempDir(), rootlessUID) - if err := os.MkdirAll(tmpDir, 0700); err != nil { - logrus.Errorf("failed to create %s: %v", tmpDir, err) - } else { - return tmpDir, nil + + tmpPerUserDir := env.getTmpPerUserDir() + if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { + if err := os.Mkdir(tmpPerUserDir, 0700); err != nil { + logrus.Errorf("failed to create temp directory for user: %v", err) + } else { + return tmpPerUserDir, nil + } + } else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) { + return tmpPerUserDir, nil } - home := homedir.Get() - if home == "" { - return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty") + + homeDir := env.homedirGet() + if homeDir == "" { + return "", errors.New("neither XDG_RUNTIME_DIR not temp dir nor HOME was set non-empty") } - resolvedHome, err := filepath.EvalSymlinks(home) + resolvedHomeDir, err := filepath.EvalSymlinks(homeDir) if err != nil { - return "", errors.Wrapf(err, "cannot resolve %s", home) + return "", errors.Wrapf(err, "cannot resolve %s", homeDir) } - return filepath.Join(resolvedHome, "rundir"), nil + return filepath.Join(resolvedHomeDir, "rundir"), nil +} + +func getRootlessRuntimeDir(rootlessUID int) (string, error) { + return getRootlessRuntimeDirIsolated( + rootlessRuntimeDirEnvironmentImplementation{ + "/proc/1/comm", + fmt.Sprintf("/run/user/%d", rootlessUID), + fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID), + }, + ) } // getRootlessDirInfo returns the parent path of where the storage for containers and @@ -248,3 +310,18 @@ func validRootlessStoragePathFormat(path string) error { } return nil } + +func validateMountOptions(mountOptions []string) error { + var Empty struct{} + // Add invalid options for ImageMount() here. + invalidOptions := map[string]struct{}{ + "rw": Empty, + } + + for _, opt := range mountOptions { + if _, ok := invalidOptions[opt]; ok { + return fmt.Errorf(" %q option not supported", opt) + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml index 304a83595..24b80388f 100644 --- a/vendor/github.com/hashicorp/go-multierror/.travis.yml +++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml @@ -9,4 +9,4 @@ branches: only: - master -script: make test testrace +script: env GO111MODULE=on make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index ead5830f7..e92fa614c 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -14,9 +14,10 @@ be a list of errors. If the caller knows this, they can unwrap the list and access the errors. If the caller doesn't know, the error formats to a nice human-readable format. -`go-multierror` implements the -[errwrap](https://github.com/hashicorp/errwrap) interface so that it can -be used with that library, as well. +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. ## Installation and Docs @@ -81,6 +82,39 @@ if err := something(); err != nil { } ``` +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + **Returning a multierror only if there are errors** If you build a `multierror.Error`, you can use the `ErrorOrNil` function diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod index 2534331d5..0afe8e6f9 100644 --- a/vendor/github.com/hashicorp/go-multierror/go.mod +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -1,3 +1,5 @@ module github.com/hashicorp/go-multierror +go 1.14 + require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum index 85b1f8ff3..e8238e9ec 100644 --- a/vendor/github.com/hashicorp/go-multierror/go.sum +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -1,4 +1,2 @@ -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 000000000..9c29efb7f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index 89b1422d1..d05dd9269 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -1,6 +1,7 @@ package multierror import ( + "errors" "fmt" ) @@ -49,3 +50,69 @@ func (e *Error) GoString() string { func (e *Error) WrappedErrors() []error { return e.Errors } + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index f2a80b5d0..ac3640dc9 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -5,11 +5,9 @@ It offers a very wide range of compression / speed trade-off, while being backed A high performance compression algorithm is implemented. For now focused on speed. This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. -Note that custom dictionaries are not supported yet, so if your code relies on that, -you cannot use the package as-is. +Note that custom dictionaries are only supported for decompression. This package is pure Go and without use of "unsafe". -If a significant speedup can be achieved using "unsafe", it may be added as an option later. The `zstd` package is provided as open source software using a Go standard license. @@ -142,80 +140,96 @@ Using the Encoder for both a stream and individual blocks concurrently is safe. I have collected some speed examples to compare speed and compression against other compressors. * `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `gzstd` is gzip standard library. `zstd` is the Datadog cgo library. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. * `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default". * `insize`/`outsize` is the input/output size. * `millis` is the number of milliseconds used for compression. * `mb/s` is megabytes (2^20 bytes) per second. ``` -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833033 5840 163.30 -enwik9 zskp 2 1000000000 317822183 8449 112.87 -enwik9 gzstd 1 1000000000 382578136 13627 69.98 -enwik9 gzstd 3 1000000000 349139651 22344 42.68 -enwik9 zstd 1 1000000000 357416379 4838 197.12 -enwik9 zstd 3 1000000000 313734522 7556 126.21 +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73101992 643 313.87 +silesia.tar zskp 2 211947520 67504318 969 208.38 +silesia.tar zskp 3 211947520 65177448 1899 106.44 -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 234981983 5100 357.42 -gob-stream zskp 2 1911399616 208674003 6698 272.15 -gob-stream gzstd 1 1911399616 357382641 14727 123.78 -gob-stream gzstd 3 1911399616 327835097 17005 107.19 -gob-stream zstd 1 1911399616 250787165 4075 447.22 -gob-stream zstd 3 1911399616 208191888 5511 330.77 - -Highly compressible JSON file. Similar to logs in a lot of ways. -https://files.klauspost.com/compress/adresser.001.gz - -file out level insize outsize millis mb/s -adresser.001 zskp 1 1073741824 18510122 1477 692.83 -adresser.001 zskp 2 1073741824 19831697 1705 600.59 -adresser.001 gzstd 1 1073741824 47755503 3079 332.47 -adresser.001 gzstd 3 1073741824 40052381 3051 335.63 -adresser.001 zstd 1 1073741824 16135896 994 1030.18 -adresser.001 zstd 3 1073741824 17794465 905 1131.49 +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1654 122.21 +silesia.tar gzkp 1 211947520 80369488 1168 173.06 -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3648168838 33398 244.38 -rawstudio-mint14.tar zskp 2 8558382592 3376721436 50962 160.16 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 84712 96.35 -rawstudio-mint14.tar gzstd 3 8558382592 3740711978 176344 46.28 -rawstudio-mint14.tar zstd 1 8558382592 3607859742 27903 292.51 -rawstudio-mint14.tar zstd 3 8558382592 3341710879 46700 174.77 +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 235022249 3088 590.30 +gob-stream zskp 2 1911399616 205669791 3786 481.34 +gob-stream zskp 3 1911399616 185792019 9324 195.48 +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream gzstd 1 1911399616 357382641 10251 177.82 +gob-stream gzkp 1 1911399616 362156523 5695 320.08 -The test data is designed to test archivers in realistic backup scenarios. -http://mattmahoney.net/dc/10gb.html +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html -file out level insize outsize millis mb/s -10gb.tar zskp 1 10065157632 4883149814 45715 209.97 -10gb.tar zskp 2 10065157632 4638110010 60970 157.44 -10gb.tar gzstd 1 10065157632 5198296126 97769 98.18 -10gb.tar gzstd 3 10065157632 4932665487 313427 30.63 -10gb.tar zstd 1 10065157632 4940796535 40391 237.65 -10gb.tar zstd 3 10065157632 4638618579 52911 181.42 +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343848582 3609 264.18 +enwik9 zskp 2 1000000000 317276632 5746 165.97 +enwik9 zskp 3 1000000000 294540704 11725 81.34 +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 gzstd 1 1000000000 382578136 9604 99.30 +enwik9 gzkp 1 1000000000 383825945 6544 145.73 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 +github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 +github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 +github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73025800 1108 182.26 -silesia.tar zskp 2 211947520 67674684 1599 126.41 -silesia.tar gzstd 1 211947520 80007735 2515 80.37 -silesia.tar gzstd 3 211947520 73133380 4259 47.45 -silesia.tar zstd 1 211947520 73513991 933 216.64 -silesia.tar zstd 3 211947520 66793301 1377 146.79 +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 +rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 +rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 +rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 +nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 +nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 +nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 ``` ### Converters @@ -315,13 +329,13 @@ Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-fo Dictionaries are added individually to Decoders. Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `RegisterDict(data)` with the dictionary data before starting any decompression. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. The dictionary will be used automatically for the data that specifies them. - A re-used Decoder will still contain the dictionaries registered. -When registering a dictionary with the same ID it will override the existing. +When registering multiple dictionaries with the same ID, the last one will be used. ### Allocation-less operation @@ -364,36 +378,42 @@ These are some examples of performance compared to [datadog cgo library](https:/ The first two are streaming decodes and the last are smaller inputs. ``` -BenchmarkDecoderSilesia-8 20 642550210 ns/op 329.85 MB/s 3101 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 100 384930000 ns/op 550.61 MB/s 451878 B/op 9713 allocs/op - -BenchmarkDecoderEnwik9-2 10 3146000080 ns/op 317.86 MB/s 2649 B/op 9 allocs/op -BenchmarkDecoderEnwik9Cgo-2 20 1905900000 ns/op 524.69 MB/s 1125120 B/op 45785 allocs/op - -BenchmarkDecoder_DecodeAll/z000000.zst-8 200 7049994 ns/op 138.26 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000001.zst-8 100000 19560 ns/op 97.49 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000002.zst-8 5000 297599 ns/op 236.99 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000003.zst-8 2000 725502 ns/op 141.17 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000004.zst-8 200000 9314 ns/op 54.54 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000005.zst-8 10000 137500 ns/op 104.72 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000006.zst-8 500 2316009 ns/op 206.06 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000007.zst-8 20000 64499 ns/op 344.90 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000008.zst-8 50000 24900 ns/op 219.56 MB/s 40 B/op 2 allocs/op -BenchmarkDecoder_DecodeAll/z000009.zst-8 1000 2348999 ns/op 154.01 MB/s 40 B/op 2 allocs/op - -BenchmarkDecoder_DecodeAllCgo/z000000.zst-8 500 4268005 ns/op 228.38 MB/s 1228849 B/op 3 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000001.zst-8 100000 15250 ns/op 125.05 MB/s 2096 B/op 3 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000002.zst-8 10000 147399 ns/op 478.49 MB/s 73776 B/op 3 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000003.zst-8 5000 320798 ns/op 319.27 MB/s 139312 B/op 3 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000004.zst-8 200000 10004 ns/op 50.77 MB/s 560 B/op 3 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000005.zst-8 20000 73599 ns/op 195.64 MB/s 19120 B/op 3 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000006.zst-8 1000 1119003 ns/op 426.48 MB/s 557104 B/op 3 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000007.zst-8 20000 103450 ns/op 215.04 MB/s 71296 B/op 9 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000008.zst-8 100000 20130 ns/op 271.58 MB/s 6192 B/op 3 allocs/op -BenchmarkDecoder_DecodeAllCgo/z000009.zst-8 2000 1123500 ns/op 322.00 MB/s 368688 B/op 3 allocs/op +BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op +BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op + +BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op +BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op + +Concurrent performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op + +BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op ``` -This reflects the performance around May 2019, but this may be out of date. +This reflects the performance around May 2020, but this may be out of date. # Contributions diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 4f0eba22f..c584f6aab 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -444,9 +444,9 @@ func fuzzFseEncoder(data []byte) int { } // encode will encode the block and append the output in b.output. -func (b *blockEnc) encode(raw bool) error { +func (b *blockEnc) encode(raw, rawAllLits bool) error { if len(b.sequences) == 0 { - return b.encodeLits(raw) + return b.encodeLits(rawAllLits) } // We want some difference if len(b.literals) > (b.size - (b.size >> 5)) { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 75bf05bc9..66b51bf2d 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -85,6 +85,13 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { d.current.output = make(chan decodeOutput, d.o.concurrent) d.current.flushed = true + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + // Create decoders d.decoders = make(chan *blockDec, d.o.concurrent) for i := 0; i < d.o.concurrent; i++ { @@ -399,19 +406,6 @@ func (d *Decoder) Close() { d.current.err = ErrDecoderClosed } -// RegisterDict will load a dictionary -func (d *Decoder) RegisterDict(b []byte) error { - dc, err := loadDict(b) - if err != nil { - return err - } - if d.dicts == nil { - d.dicts = make(map[uint32]dict, 1) - } - d.dicts[dc.id] = *dc - return nil -} - // IOReadCloser returns the decoder as an io.ReadCloser for convenience. // Any changes to the decoder will be reflected, so the returned ReadCloser // can be reused along with the decoder. diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 2ac9cd2dd..284d38449 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -18,6 +18,7 @@ type decoderOptions struct { lowMem bool concurrent int maxDecodedSize uint64 + dicts []dict } func (o *decoderOptions) setDefault() { @@ -66,3 +67,18 @@ func WithDecoderMaxMemory(n uint64) DOption { return nil } } + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index bf42bb1cf..c56d2241f 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -280,7 +280,7 @@ func (e *Encoder) nextBlock(final bool) error { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(e.o.noEntropy) + err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) } switch err { case errIncompressible: @@ -491,7 +491,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(blk.literals) != len(src) || len(src) != e.o.blockSize { // Output directly to dst blk.output = dst - err = blk.encode(e.o.noEntropy) + err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) } switch err { @@ -528,7 +528,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(e.o.noEntropy) + err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) } switch err { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 3fc03097a..dfac14ddd 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -12,16 +12,18 @@ type EOption func(*encoderOptions) error // options retains accumulated state of multiple options. type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - customWindow bool + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool } func (o *encoderOptions) setDefault() { @@ -207,6 +209,10 @@ func WithEncoderLevel(l EncoderLevel) EOption { o.windowSize = 16 << 20 } } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + return nil } } @@ -221,6 +227,18 @@ func WithZeroFrames(b bool) EOption { } } +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + // WithNoEntropyCompression will always skip entropy compression of literals. // This can be useful if content has matches, but unlikely to benefit from entropy // compression. Usually the slight speed improvement is not worth enabling this. diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 356956ba2..690428cd2 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -178,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { r.err = ErrSnappyCorrupt return written, r.err } - err = r.block.encode(false) + err = r.block.encode(false, false) switch err { case errIncompressible: r.block.popOffsets() diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go index c0a965923..a16a68e97 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go @@ -3,8 +3,6 @@ package cgroups import ( - "fmt" - "github.com/opencontainers/runc/libcontainer/configs" ) @@ -27,48 +25,27 @@ type Manager interface { // Destroys the cgroup set Destroy() error - // The option func SystemdCgroups() and Cgroupfs() require following attributes: - // Paths map[string]string - // Cgroups *configs.Cgroup - // Paths maps cgroup subsystem to path at which it is mounted. - // Cgroups specifies specific cgroup settings for the various subsystems - - // Returns cgroup paths to save in a state file and to be able to - // restore the object later. - GetPaths() map[string]string - - // GetUnifiedPath returns the unified path when running in unified mode. - // The value corresponds to the all values of GetPaths() map. - // - // GetUnifiedPath returns error when running in hybrid mode as well as - // in legacy mode. - GetUnifiedPath() (string, error) + // Path returns a cgroup path to the specified controller/subsystem. + // For cgroupv2, the argument is unused and can be empty. + Path(string) string // Sets the cgroup as configured. Set(container *configs.Config) error - // Gets the cgroup as configured. - GetCgroups() (*configs.Cgroup, error) -} - -type NotFoundError struct { - Subsystem string -} + // GetPaths returns cgroup path(s) to save in a state file in order to restore later. + // + // For cgroup v1, a key is cgroup subsystem name, and the value is the path + // to the cgroup for this subsystem. + // + // For cgroup v2 unified hierarchy, a key is "", and the value is the unified path. + GetPaths() map[string]string -func (e *NotFoundError) Error() string { - return fmt.Sprintf("mountpoint for %s not found", e.Subsystem) -} + // GetCgroups returns the cgroup data as configured. + GetCgroups() (*configs.Cgroup, error) -func NewNotFoundError(sub string) error { - return &NotFoundError{ - Subsystem: sub, - } -} + // GetFreezerState retrieves the current FreezerState of the cgroup. + GetFreezerState() (configs.FreezerState, error) -func IsNotFound(err error) bool { - if err == nil { - return false - } - _, ok := err.(*NotFoundError) - return ok + // Whether the cgroup path exists or not + Exists() bool } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go index 8eeedc55b..7ac816605 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go @@ -20,6 +20,12 @@ type CpuUsage struct { // Total CPU time consumed per core. // Units: nanoseconds. PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + // CPU time consumed per core in kernel mode + // Units: nanoseconds. + PercpuUsageInKernelmode []uint64 `json:"percpu_usage_in_kernelmode"` + // CPU time consumed per core in user mode + // Units: nanoseconds. + PercpuUsageInUsermode []uint64 `json:"percpu_usage_in_usermode"` // Time spent by tasks of the cgroup in kernel mode. // Units: nanoseconds. UsageInKernelmode uint64 `json:"usage_in_kernelmode"` @@ -51,12 +57,33 @@ type MemoryStats struct { KernelUsage MemoryData `json:"kernel_usage,omitempty"` // usage of kernel TCP memory KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"` + // usage of memory pages by NUMA node + // see chapter 5.6 of memory controller documentation + PageUsageByNUMA PageUsageByNUMA `json:"page_usage_by_numa,omitempty"` // if true, memory usage is accounted for throughout a hierarchy of cgroups. UseHierarchy bool `json:"use_hierarchy"` Stats map[string]uint64 `json:"stats,omitempty"` } +type PageUsageByNUMA struct { + // Embedding is used as types can't be recursive. + PageUsageByNUMAInner + Hierarchical PageUsageByNUMAInner `json:"hierarchical,omitempty"` +} + +type PageUsageByNUMAInner struct { + Total PageStats `json:"total,omitempty"` + File PageStats `json:"file,omitempty"` + Anon PageStats `json:"anon,omitempty"` + Unevictable PageStats `json:"unevictable,omitempty"` +} + +type PageStats struct { + Total uint64 `json:"total,omitempty"` + Nodes map[uint8]uint64 `json:"nodes,omitempty"` +} + type PidsStats struct { // number of pids in the cgroup Current uint64 `json:"current,omitempty"` diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index dbcc58f5b..6e88b5dff 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -4,6 +4,7 @@ package cgroups import ( "bufio" + "errors" "fmt" "io" "io/ioutil" @@ -12,7 +13,6 @@ import ( "strconv" "strings" "sync" - "syscall" "time" units "github.com/docker/go-units" @@ -20,7 +20,6 @@ import ( ) const ( - CgroupNamePrefix = "name=" CgroupProcesses = "cgroup.procs" unifiedMountpoint = "/sys/fs/cgroup" ) @@ -40,8 +39,8 @@ var HugePageSizeUnitList = []string{"B", "KB", "MB", "GB", "TB", "PB"} // IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode. func IsCgroup2UnifiedMode() bool { isUnifiedOnce.Do(func() { - var st syscall.Statfs_t - if err := syscall.Statfs(unifiedMountpoint, &st); err != nil { + var st unix.Statfs_t + if err := unix.Statfs(unifiedMountpoint, &st); err != nil { panic("cannot statfs cgroup root") } isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC @@ -49,191 +48,19 @@ func IsCgroup2UnifiedMode() bool { return isUnified } -// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt -func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) { - if IsCgroup2UnifiedMode() { - return unifiedMountpoint, nil - } - mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem) - return mnt, err -} - -func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, error) { - // We are not using mount.GetMounts() because it's super-inefficient, - // parsing it directly sped up x10 times because of not using Sscanf. - // It was one of two major performance drawbacks in container start. - if !isSubsystemAvailable(subsystem) { - return "", "", NewNotFoundError(subsystem) - } - - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", "", err - } - defer f.Close() - - if IsCgroup2UnifiedMode() { - subsystem = "" - } - - return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem) -} - -func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsystem string) (string, string, error) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - txt := scanner.Text() - fields := strings.Fields(txt) - if len(fields) < 9 { - continue - } - if strings.HasPrefix(fields[4], cgroupPath) { - for _, opt := range strings.Split(fields[len(fields)-1], ",") { - if (subsystem == "" && fields[9] == "cgroup2") || opt == subsystem { - return fields[4], fields[3], nil - } - } - } - } - if err := scanner.Err(); err != nil { - return "", "", err - } - - return "", "", NewNotFoundError(subsystem) -} - -func isSubsystemAvailable(subsystem string) bool { - if IsCgroup2UnifiedMode() { - controllers, err := GetAllSubsystems() - if err != nil { - return false - } - for _, c := range controllers { - if c == subsystem { - return true - } - } - return false - } - - cgroups, err := ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return false - } - _, avail := cgroups[subsystem] - return avail -} - -func GetClosestMountpointAncestor(dir, mountinfo string) string { - deepestMountPoint := "" - for _, mountInfoEntry := range strings.Split(mountinfo, "\n") { - mountInfoParts := strings.Fields(mountInfoEntry) - if len(mountInfoParts) < 5 { - continue - } - mountPoint := mountInfoParts[4] - if strings.HasPrefix(mountPoint, deepestMountPoint) && strings.HasPrefix(dir, mountPoint) { - deepestMountPoint = mountPoint - } - } - return deepestMountPoint -} - -func FindCgroupMountpointDir() (string, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - text := scanner.Text() - fields := strings.Split(text, " ") - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - numPostFields := len(postSeparatorFields) - - // This is an error as we can't detect if the mount is for "cgroup" - if numPostFields == 0 { - return "", fmt.Errorf("Found no fields post '-' in %q", text) - } - - if postSeparatorFields[0] == "cgroup" || postSeparatorFields[0] == "cgroup2" { - // Check that the mount is properly formatted. - if numPostFields < 3 { - return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) - } - - return filepath.Dir(fields[4]), nil - } - } - if err := scanner.Err(); err != nil { - return "", err - } - - return "", NewNotFoundError("cgroup") -} - type Mount struct { Mountpoint string Root string Subsystems []string } -func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) { - if len(m.Subsystems) == 0 { - return "", fmt.Errorf("no subsystem for mount") - } - - return getControllerPath(m.Subsystems[0], cgroups) -} - -func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) { - res := make([]Mount, 0, len(ss)) - scanner := bufio.NewScanner(mi) - numFound := 0 - for scanner.Scan() && numFound < len(ss) { - txt := scanner.Text() - sepIdx := strings.Index(txt, " - ") - if sepIdx == -1 { - return nil, fmt.Errorf("invalid mountinfo format") - } - if txt[sepIdx+3:sepIdx+10] == "cgroup2" || txt[sepIdx+3:sepIdx+9] != "cgroup" { - continue - } - fields := strings.Split(txt, " ") - m := Mount{ - Mountpoint: fields[4], - Root: fields[3], - } - for _, opt := range strings.Split(fields[len(fields)-1], ",") { - seen, known := ss[opt] - if !known || (!all && seen) { - continue - } - ss[opt] = true - if strings.HasPrefix(opt, CgroupNamePrefix) { - opt = opt[len(CgroupNamePrefix):] - } - m.Subsystems = append(m.Subsystems, opt) - numFound++ - } - if len(m.Subsystems) > 0 || all { - res = append(res, m) - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return res, nil -} - // GetCgroupMounts returns the mounts for the cgroup subsystems. // all indicates whether to return just the first instance or all the mounts. +// This function should not be used from cgroupv2 code, as in this case +// all the controllers are available under the constant unifiedMountpoint. func GetCgroupMounts(all bool) ([]Mount, error) { if IsCgroup2UnifiedMode() { + // TODO: remove cgroupv2 case once all external users are converted availableControllers, err := GetAllSubsystems() if err != nil { return nil, err @@ -246,22 +73,7 @@ func GetCgroupMounts(all bool) ([]Mount, error) { return []Mount{m}, nil } - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - allSubsystems, err := ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return nil, err - } - - allMap := make(map[string]bool) - for s := range allSubsystems { - allMap[s] = false - } - return getCgroupMountsHelper(allMap, f, all) + return getCgroupMountsV1(all) } // GetAllSubsystems returns all the cgroup subsystems supported by the kernel @@ -305,61 +117,8 @@ func GetAllSubsystems() ([]string, error) { return subsystems, nil } -// GetOwnCgroup returns the relative path to the cgroup docker is running in. -func GetOwnCgroup(subsystem string) (string, error) { - cgroups, err := ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return "", err - } - - return getControllerPath(subsystem, cgroups) -} - -func GetOwnCgroupPath(subsystem string) (string, error) { - cgroup, err := GetOwnCgroup(subsystem) - if err != nil { - return "", err - } - - return getCgroupPathHelper(subsystem, cgroup) -} - -func GetInitCgroup(subsystem string) (string, error) { - cgroups, err := ParseCgroupFile("/proc/1/cgroup") - if err != nil { - return "", err - } - - return getControllerPath(subsystem, cgroups) -} - -func GetInitCgroupPath(subsystem string) (string, error) { - cgroup, err := GetInitCgroup(subsystem) - if err != nil { - return "", err - } - - return getCgroupPathHelper(subsystem, cgroup) -} - -func getCgroupPathHelper(subsystem, cgroup string) (string, error) { - mnt, root, err := FindCgroupMountpointAndRoot("", subsystem) - if err != nil { - return "", err - } - - // This is needed for nested containers, because in /proc/self/cgroup we - // see paths from host, which don't exist in container. - relCgroup, err := filepath.Rel(root, cgroup) - if err != nil { - return "", err - } - - return filepath.Join(mnt, relCgroup), nil -} - -func readProcsFile(dir string) ([]int, error) { - f, err := os.Open(filepath.Join(dir, CgroupProcesses)) +func readProcsFile(file string) ([]int, error) { + f, err := os.Open(file) if err != nil { return nil, err } @@ -379,11 +138,18 @@ func readProcsFile(dir string) ([]int, error) { out = append(out, pid) } } - return out, nil + return out, s.Err() } -// ParseCgroupFile parses the given cgroup file, typically from -// /proc/<pid>/cgroup, into a map of subgroups to cgroup names. +// ParseCgroupFile parses the given cgroup file, typically /proc/self/cgroup +// or /proc/<pid>/cgroup, into a map of subsystems to cgroup paths, e.g. +// "cpu": "/user.slice/user-1000.slice" +// "pids": "/user.slice/user-1000.slice" +// etc. +// +// Note that for cgroup v2 unified hierarchy, there are no per-controller +// cgroup paths, so the resulting map will have a single element where the key +// is empty string ("") and the value is the cgroup path the <pid> is in. func ParseCgroupFile(path string) (map[string]string, error) { f, err := os.Open(path) if err != nil { @@ -423,22 +189,6 @@ func parseCgroupFromReader(r io.Reader) (map[string]string, error) { return cgroups, nil } -func getControllerPath(subsystem string, cgroups map[string]string) (string, error) { - if IsCgroup2UnifiedMode() { - return "/", nil - } - - if p, ok := cgroups[subsystem]; ok { - return p, nil - } - - if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok { - return p, nil - } - - return "", NewNotFoundError(subsystem) -} - func PathExists(path string) bool { if _, err := os.Stat(path); err != nil { return false @@ -514,8 +264,8 @@ func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) { } // GetPids returns all pids, that were added to cgroup at path. -func GetPids(path string) ([]int, error) { - return readProcsFile(path) +func GetPids(dir string) ([]int, error) { + return readProcsFile(filepath.Join(dir, CgroupProcesses)) } // GetAllPids returns all pids, that were added to cgroup at path and to all its @@ -524,14 +274,13 @@ func GetAllPids(path string) ([]int, error) { var pids []int // collect pids from all sub-cgroups err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error { - dir, file := filepath.Split(p) - if file != CgroupProcesses { - return nil - } if iErr != nil { return iErr } - cPids, err := readProcsFile(dir) + if info.IsDir() || info.Name() != CgroupProcesses { + return nil + } + cPids, err := readProcsFile(p) if err != nil { return err } @@ -568,7 +317,7 @@ func WriteCgroupProc(dir string, pid int) error { // EINVAL might mean that the task being added to cgroup.procs is in state // TASK_NEW. We should attempt to do so again. - if isEINVAL(err) { + if errors.Is(err, unix.EINVAL) { time.Sleep(30 * time.Millisecond) continue } @@ -578,11 +327,53 @@ func WriteCgroupProc(dir string, pid int) error { return err } -func isEINVAL(err error) bool { - switch err := err.(type) { - case *os.PathError: - return err.Err == unix.EINVAL - default: - return false +// Since the OCI spec is designed for cgroup v1, in some cases +// there is need to convert from the cgroup v1 configuration to cgroup v2 +// the formula for BlkIOWeight is y = (1 + (x - 10) * 9999 / 990) +// convert linearly from [10-1000] to [1-10000] +func ConvertBlkIOToCgroupV2Value(blkIoWeight uint16) uint64 { + if blkIoWeight == 0 { + return 0 + } + return uint64(1 + (uint64(blkIoWeight)-10)*9999/990) +} + +// Since the OCI spec is designed for cgroup v1, in some cases +// there is need to convert from the cgroup v1 configuration to cgroup v2 +// the formula for cpuShares is y = (1 + ((x - 2) * 9999) / 262142) +// convert from [2-262144] to [1-10000] +// 262144 comes from Linux kernel definition "#define MAX_SHARES (1UL << 18)" +func ConvertCPUSharesToCgroupV2Value(cpuShares uint64) uint64 { + if cpuShares == 0 { + return 0 } + return (1 + ((cpuShares-2)*9999)/262142) +} + +// ConvertMemorySwapToCgroupV2Value converts MemorySwap value from OCI spec +// for use by cgroup v2 drivers. A conversion is needed since Resources.MemorySwap +// is defined as memory+swap combined, while in cgroup v2 swap is a separate value. +func ConvertMemorySwapToCgroupV2Value(memorySwap, memory int64) (int64, error) { + // for compatibility with cgroup1 controller, set swap to unlimited in + // case the memory is set to unlimited, and swap is not explicitly set, + // treating the request as "set both memory and swap to unlimited". + if memory == -1 && memorySwap == 0 { + return -1, nil + } + if memorySwap == -1 || memorySwap == 0 { + // -1 is "max", 0 is "unset", so treat as is + return memorySwap, nil + } + // sanity checks + if memory == 0 || memory == -1 { + return 0, errors.New("unable to set swap limit without memory limit") + } + if memory < 0 { + return 0, fmt.Errorf("invalid memory value: %d", memory) + } + if memorySwap < memory { + return 0, errors.New("memory+swap limit should be >= memory limit") + } + + return memorySwap - memory, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go new file mode 100644 index 000000000..f8487b0a9 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go @@ -0,0 +1,250 @@ +package cgroups + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +// Code in this source file are specific to cgroup v1, +// and must not be used from any cgroup v2 code. + +const ( + CgroupNamePrefix = "name=" +) + +var ( + errUnified = errors.New("not implemented for cgroup v2 unified hierarchy") +) + +type NotFoundError struct { + Subsystem string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("mountpoint for %s not found", e.Subsystem) +} + +func NewNotFoundError(sub string) error { + return &NotFoundError{ + Subsystem: sub, + } +} + +func IsNotFound(err error) bool { + if err == nil { + return false + } + _, ok := err.(*NotFoundError) + return ok +} + +// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt +func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) { + if IsCgroup2UnifiedMode() { + return "", errUnified + } + mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem) + return mnt, err +} + +func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, error) { + if IsCgroup2UnifiedMode() { + return "", "", errUnified + } + + // We are not using mount.GetMounts() because it's super-inefficient, + // parsing it directly sped up x10 times because of not using Sscanf. + // It was one of two major performance drawbacks in container start. + if !isSubsystemAvailable(subsystem) { + return "", "", NewNotFoundError(subsystem) + } + + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", "", err + } + defer f.Close() + + return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem) +} + +func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsystem string) (string, string, error) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + txt := scanner.Text() + fields := strings.Fields(txt) + if len(fields) < 9 { + continue + } + if strings.HasPrefix(fields[4], cgroupPath) { + for _, opt := range strings.Split(fields[len(fields)-1], ",") { + if opt == subsystem { + return fields[4], fields[3], nil + } + } + } + } + if err := scanner.Err(); err != nil { + return "", "", err + } + + return "", "", NewNotFoundError(subsystem) +} + +func isSubsystemAvailable(subsystem string) bool { + if IsCgroup2UnifiedMode() { + panic("don't call isSubsystemAvailable from cgroupv2 code") + } + + cgroups, err := ParseCgroupFile("/proc/self/cgroup") + if err != nil { + return false + } + _, avail := cgroups[subsystem] + return avail +} + +func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) { + if len(m.Subsystems) == 0 { + return "", fmt.Errorf("no subsystem for mount") + } + + return getControllerPath(m.Subsystems[0], cgroups) +} + +func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) { + res := make([]Mount, 0, len(ss)) + scanner := bufio.NewScanner(mi) + numFound := 0 + for scanner.Scan() && numFound < len(ss) { + txt := scanner.Text() + sepIdx := strings.Index(txt, " - ") + if sepIdx == -1 { + return nil, fmt.Errorf("invalid mountinfo format") + } + if txt[sepIdx+3:sepIdx+10] == "cgroup2" || txt[sepIdx+3:sepIdx+9] != "cgroup" { + continue + } + fields := strings.Split(txt, " ") + m := Mount{ + Mountpoint: fields[4], + Root: fields[3], + } + for _, opt := range strings.Split(fields[len(fields)-1], ",") { + seen, known := ss[opt] + if !known || (!all && seen) { + continue + } + ss[opt] = true + opt = strings.TrimPrefix(opt, CgroupNamePrefix) + m.Subsystems = append(m.Subsystems, opt) + numFound++ + } + if len(m.Subsystems) > 0 || all { + res = append(res, m) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return res, nil +} + +func getCgroupMountsV1(all bool) ([]Mount, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + allSubsystems, err := ParseCgroupFile("/proc/self/cgroup") + if err != nil { + return nil, err + } + + allMap := make(map[string]bool) + for s := range allSubsystems { + allMap[s] = false + } + return getCgroupMountsHelper(allMap, f, all) +} + +// GetOwnCgroup returns the relative path to the cgroup docker is running in. +func GetOwnCgroup(subsystem string) (string, error) { + if IsCgroup2UnifiedMode() { + return "", errUnified + } + cgroups, err := ParseCgroupFile("/proc/self/cgroup") + if err != nil { + return "", err + } + + return getControllerPath(subsystem, cgroups) +} + +func GetOwnCgroupPath(subsystem string) (string, error) { + cgroup, err := GetOwnCgroup(subsystem) + if err != nil { + return "", err + } + + return getCgroupPathHelper(subsystem, cgroup) +} + +func GetInitCgroup(subsystem string) (string, error) { + if IsCgroup2UnifiedMode() { + return "", errUnified + } + cgroups, err := ParseCgroupFile("/proc/1/cgroup") + if err != nil { + return "", err + } + + return getControllerPath(subsystem, cgroups) +} + +func GetInitCgroupPath(subsystem string) (string, error) { + cgroup, err := GetInitCgroup(subsystem) + if err != nil { + return "", err + } + + return getCgroupPathHelper(subsystem, cgroup) +} + +func getCgroupPathHelper(subsystem, cgroup string) (string, error) { + mnt, root, err := FindCgroupMountpointAndRoot("", subsystem) + if err != nil { + return "", err + } + + // This is needed for nested containers, because in /proc/self/cgroup we + // see paths from host, which don't exist in container. + relCgroup, err := filepath.Rel(root, cgroup) + if err != nil { + return "", err + } + + return filepath.Join(mnt, relCgroup), nil +} + +func getControllerPath(subsystem string, cgroups map[string]string) (string, error) { + if IsCgroup2UnifiedMode() { + return "", errUnified + } + + if p, ok := cgroups[subsystem]; ok { + return p, nil + } + + if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok { + return p, nil + } + + return "", NewNotFoundError(subsystem) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go index 58ed19c9e..6e90ae16b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go @@ -1,5 +1,9 @@ package configs +import ( + systemdDbus "github.com/coreos/go-systemd/v22/dbus" +) + type FreezerState string const ( @@ -29,18 +33,16 @@ type Cgroup struct { // Resources contains various cgroups settings to apply *Resources + + // SystemdProps are any additional properties for systemd, + // derived from org.systemd.property.xxx annotations. + // Ignored unless systemd is used for managing cgroups. + SystemdProps []systemdDbus.Property `json:"-"` } type Resources struct { - // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. - // Deprecated - AllowAllDevices *bool `json:"allow_all_devices,omitempty"` - // Deprecated - AllowedDevices []*Device `json:"allowed_devices,omitempty"` - // Deprecated - DeniedDevices []*Device `json:"denied_devices,omitempty"` - - Devices []*Device `json:"devices"` + // Devices is the set of access rules for devices in the container. + Devices []*DeviceRule `json:"devices"` // Memory limit (in bytes) Memory int64 `json:"memory"` @@ -125,6 +127,10 @@ type Resources struct { // CpuWeight sets a proportional bandwidth limit. CpuWeight uint64 `json:"cpu_weight"` - // CpuMax sets she maximum bandwidth limit (format: max period). - CpuMax string `json:"cpu_max"` + // SkipDevices allows to skip configuring device permissions. + // Used by e.g. kubelet while creating a parent cgroup (kubepods) + // common for many containers. + // + // NOTE it is impossible to start a container which has this flag set. + SkipDevices bool `json:"skip_devices"` } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 24989e9f5..ac523b417 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -8,7 +8,7 @@ import ( "time" "github.com/opencontainers/runtime-spec/specs-go" - + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -70,9 +70,10 @@ type Arg struct { // Syscall is a rule to match a syscall in Seccomp type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` + Name string `json:"name"` + Action Action `json:"action"` + ErrnoRet *uint `json:"errnoRet"` + Args []*Arg `json:"args"` } // TODO Windows. Many of these fields should be factored out into those parts @@ -175,7 +176,7 @@ type Config struct { // Hooks are a collection of actions to perform at various container lifecycle events. // CommandHooks are serialized to JSON, but other hooks are not. - Hooks *Hooks + Hooks Hooks // Version is the version of opencontainer specification that is supported. Version string `json:"version"` @@ -202,17 +203,50 @@ type Config struct { RootlessCgroups bool `json:"rootless_cgroups,omitempty"` } -type Hooks struct { +type HookName string +type HookList []Hook +type Hooks map[HookName]HookList + +const ( // Prestart commands are executed after the container namespaces are created, // but before the user supplied command is executed from init. - Prestart []Hook + // Note: This hook is now deprecated + // Prestart commands are called in the Runtime namespace. + Prestart HookName = "prestart" + + // CreateRuntime commands MUST be called as part of the create operation after + // the runtime environment has been created but before the pivot_root has been executed. + // CreateRuntime is called immediately after the deprecated Prestart hook. + // CreateRuntime commands are called in the Runtime Namespace. + CreateRuntime = "createRuntime" + + // CreateContainer commands MUST be called as part of the create operation after + // the runtime environment has been created but before the pivot_root has been executed. + // CreateContainer commands are called in the Container namespace. + CreateContainer = "createContainer" + + // StartContainer commands MUST be called as part of the start operation and before + // the container process is started. + // StartContainer commands are called in the Container namespace. + StartContainer = "startContainer" // Poststart commands are executed after the container init process starts. - Poststart []Hook + // Poststart commands are called in the Runtime Namespace. + Poststart = "poststart" // Poststop commands are executed after the container init process exits. - Poststop []Hook -} + // Poststop commands are called in the Runtime Namespace. + Poststop = "poststop" +) + +// TODO move this to runtime-spec +// See: https://github.com/opencontainers/runtime-spec/pull/1046 +const ( + Creating = "creating" + Created = "created" + Running = "running" + Stopped = "stopped" +) type Capabilities struct { // Bounding is the set of capabilities checked by the kernel. @@ -227,32 +261,39 @@ type Capabilities struct { Ambient []string } -func (hooks *Hooks) UnmarshalJSON(b []byte) error { - var state struct { - Prestart []CommandHook - Poststart []CommandHook - Poststop []CommandHook +func (hooks HookList) RunHooks(state *specs.State) error { + for i, h := range hooks { + if err := h.Run(state); err != nil { + return errors.Wrapf(err, "Running hook #%d:", i) + } } + return nil +} + +func (hooks *Hooks) UnmarshalJSON(b []byte) error { + var state map[HookName][]CommandHook + if err := json.Unmarshal(b, &state); err != nil { return err } - deserialize := func(shooks []CommandHook) (hooks []Hook) { - for _, shook := range shooks { - hooks = append(hooks, shook) + *hooks = Hooks{} + for n, commandHooks := range state { + if len(commandHooks) == 0 { + continue } - return hooks + (*hooks)[n] = HookList{} + for _, h := range commandHooks { + (*hooks)[n] = append((*hooks)[n], h) + } } - hooks.Prestart = deserialize(state.Prestart) - hooks.Poststart = deserialize(state.Poststart) - hooks.Poststop = deserialize(state.Poststop) return nil } -func (hooks Hooks) MarshalJSON() ([]byte, error) { +func (hooks *Hooks) MarshalJSON() ([]byte, error) { serialize := func(hooks []Hook) (serializableHooks []CommandHook) { for _, hook := range hooks { switch chook := hook.(type) { @@ -267,9 +308,12 @@ func (hooks Hooks) MarshalJSON() ([]byte, error) { } return json.Marshal(map[string]interface{}{ - "prestart": serialize(hooks.Prestart), - "poststart": serialize(hooks.Poststart), - "poststop": serialize(hooks.Poststop), + "prestart": serialize((*hooks)[Prestart]), + "createRuntime": serialize((*hooks)[CreateRuntime]), + "createContainer": serialize((*hooks)[CreateContainer]), + "startContainer": serialize((*hooks)[StartContainer]), + "poststart": serialize((*hooks)[Poststart]), + "poststop": serialize((*hooks)[Poststop]), }) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go index 8701bb212..632bf6ac4 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go @@ -3,30 +3,19 @@ package configs import ( "fmt" "os" + "strconv" ) const ( Wildcard = -1 ) -// TODO Windows: This can be factored out in the future - type Device struct { - // Device type, block, char, etc. - Type rune `json:"type"` + DeviceRule // Path to the device. Path string `json:"path"` - // Major is the device's major number. - Major int64 `json:"major"` - - // Minor is the device's minor number. - Minor int64 `json:"minor"` - - // Cgroup permissions format, rwm. - Permissions string `json:"permissions"` - // FileMode permission bits for the device. FileMode os.FileMode `json:"file_mode"` @@ -35,23 +24,147 @@ type Device struct { // Gid of the device. Gid uint32 `json:"gid"` +} - // Write the file to the allowed list - Allow bool `json:"allow"` +// DevicePermissions is a cgroupv1-style string to represent device access. It +// has to be a string for backward compatibility reasons, hence why it has +// methods to do set operations. +type DevicePermissions string + +const ( + deviceRead uint = (1 << iota) + deviceWrite + deviceMknod +) + +func (p DevicePermissions) toSet() uint { + var set uint + for _, perm := range p { + switch perm { + case 'r': + set |= deviceRead + case 'w': + set |= deviceWrite + case 'm': + set |= deviceMknod + } + } + return set +} + +func fromSet(set uint) DevicePermissions { + var perm string + if set&deviceRead == deviceRead { + perm += "r" + } + if set&deviceWrite == deviceWrite { + perm += "w" + } + if set&deviceMknod == deviceMknod { + perm += "m" + } + return DevicePermissions(perm) +} + +// Union returns the union of the two sets of DevicePermissions. +func (p DevicePermissions) Union(o DevicePermissions) DevicePermissions { + lhs := p.toSet() + rhs := o.toSet() + return fromSet(lhs | rhs) +} + +// Difference returns the set difference of the two sets of DevicePermissions. +// In set notation, A.Difference(B) gives you A\B. +func (p DevicePermissions) Difference(o DevicePermissions) DevicePermissions { + lhs := p.toSet() + rhs := o.toSet() + return fromSet(lhs &^ rhs) +} + +// Intersection computes the intersection of the two sets of DevicePermissions. +func (p DevicePermissions) Intersection(o DevicePermissions) DevicePermissions { + lhs := p.toSet() + rhs := o.toSet() + return fromSet(lhs & rhs) } -func (d *Device) CgroupString() string { - return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions) +// IsEmpty returns whether the set of permissions in a DevicePermissions is +// empty. +func (p DevicePermissions) IsEmpty() bool { + return p == DevicePermissions("") } -func (d *Device) Mkdev() int { - return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12)) +// IsValid returns whether the set of permissions is a subset of valid +// permissions (namely, {r,w,m}). +func (p DevicePermissions) IsValid() bool { + return p == fromSet(p.toSet()) } -// deviceNumberString converts the device number to a string return result. -func deviceNumberString(number int64) string { - if number == Wildcard { - return "*" +type DeviceType rune + +const ( + WildcardDevice DeviceType = 'a' + BlockDevice DeviceType = 'b' + CharDevice DeviceType = 'c' // or 'u' + FifoDevice DeviceType = 'p' +) + +func (t DeviceType) IsValid() bool { + switch t { + case WildcardDevice, BlockDevice, CharDevice, FifoDevice: + return true + default: + return false + } +} + +func (t DeviceType) CanMknod() bool { + switch t { + case BlockDevice, CharDevice, FifoDevice: + return true + default: + return false + } +} + +func (t DeviceType) CanCgroup() bool { + switch t { + case WildcardDevice, BlockDevice, CharDevice: + return true + default: + return false + } +} + +type DeviceRule struct { + // Type of device ('c' for char, 'b' for block). If set to 'a', this rule + // acts as a wildcard and all fields other than Allow are ignored. + Type DeviceType `json:"type"` + + // Major is the device's major number. + Major int64 `json:"major"` + + // Minor is the device's minor number. + Minor int64 `json:"minor"` + + // Permissions is the set of permissions that this rule applies to (in the + // cgroupv1 format -- any combination of "rwm"). + Permissions DevicePermissions `json:"permissions"` + + // Allow specifies whether this rule is allowed. + Allow bool `json:"allow"` +} + +func (d *DeviceRule) CgroupString() string { + var ( + major = strconv.FormatInt(d.Major, 10) + minor = strconv.FormatInt(d.Minor, 10) + ) + if d.Major == Wildcard { + major = "*" + } + if d.Minor == Wildcard { + minor = "*" } - return fmt.Sprint(number) + return fmt.Sprintf("%c %s:%s %s", d.Type, major, minor, d.Permissions) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go deleted file mode 100644 index e4f423c52..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go +++ /dev/null @@ -1,111 +0,0 @@ -// +build linux - -package configs - -var ( - // DefaultSimpleDevices are devices that are to be both allowed and created. - DefaultSimpleDevices = []*Device{ - // /dev/null and zero - { - Path: "/dev/null", - Type: 'c', - Major: 1, - Minor: 3, - Permissions: "rwm", - FileMode: 0666, - }, - { - Path: "/dev/zero", - Type: 'c', - Major: 1, - Minor: 5, - Permissions: "rwm", - FileMode: 0666, - }, - - { - Path: "/dev/full", - Type: 'c', - Major: 1, - Minor: 7, - Permissions: "rwm", - FileMode: 0666, - }, - - // consoles and ttys - { - Path: "/dev/tty", - Type: 'c', - Major: 5, - Minor: 0, - Permissions: "rwm", - FileMode: 0666, - }, - - // /dev/urandom,/dev/random - { - Path: "/dev/urandom", - Type: 'c', - Major: 1, - Minor: 9, - Permissions: "rwm", - FileMode: 0666, - }, - { - Path: "/dev/random", - Type: 'c', - Major: 1, - Minor: 8, - Permissions: "rwm", - FileMode: 0666, - }, - } - DefaultAllowedDevices = append([]*Device{ - // allow mknod for any device - { - Type: 'c', - Major: Wildcard, - Minor: Wildcard, - Permissions: "m", - }, - { - Type: 'b', - Major: Wildcard, - Minor: Wildcard, - Permissions: "m", - }, - - { - Path: "/dev/console", - Type: 'c', - Major: 5, - Minor: 1, - Permissions: "rwm", - }, - // /dev/pts/ - pts namespaces are "coming soon" - { - Path: "", - Type: 'c', - Major: 136, - Minor: Wildcard, - Permissions: "rwm", - }, - { - Path: "", - Type: 'c', - Major: 5, - Minor: 2, - Permissions: "rwm", - }, - - // tuntap - { - Path: "", - Type: 'c', - Major: 10, - Minor: 200, - Permissions: "rwm", - }, - }, DefaultSimpleDevices...) - DefaultAutoCreatedDevices = append([]*Device{}, DefaultSimpleDevices...) -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_unix.go new file mode 100644 index 000000000..650c46848 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_unix.go @@ -0,0 +1,16 @@ +// +build !windows + +package configs + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +func (d *DeviceRule) Mkdev() (uint64, error) { + if d.Major == Wildcard || d.Minor == Wildcard { + return 0, errors.New("cannot mkdev() device with wildcards") + } + return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_windows.go new file mode 100644 index 000000000..729289393 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_windows.go @@ -0,0 +1,5 @@ +package configs + +func (d *DeviceRule) Mkdev() (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go index 5dabe06ce..702f913ec 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go @@ -31,33 +31,33 @@ func DeviceFromPath(path, permissions string) (*configs.Device, error) { } var ( + devType configs.DeviceType + mode = stat.Mode devNumber = uint64(stat.Rdev) major = unix.Major(devNumber) minor = unix.Minor(devNumber) ) - if major == 0 { - return nil, ErrNotADevice - } - - var ( - devType rune - mode = stat.Mode - ) switch { case mode&unix.S_IFBLK == unix.S_IFBLK: - devType = 'b' + devType = configs.BlockDevice case mode&unix.S_IFCHR == unix.S_IFCHR: - devType = 'c' + devType = configs.CharDevice + case mode&unix.S_IFIFO == unix.S_IFIFO: + devType = configs.FifoDevice + default: + return nil, ErrNotADevice } return &configs.Device{ - Type: devType, - Path: path, - Major: int64(major), - Minor: int64(minor), - Permissions: permissions, - FileMode: os.FileMode(mode), - Uid: stat.Uid, - Gid: stat.Gid, + DeviceRule: configs.DeviceRule{ + Type: devType, + Major: int64(major), + Minor: int64(minor), + Permissions: configs.DevicePermissions(permissions), + }, + Path: path, + FileMode: os.FileMode(mode), + Uid: stat.Uid, + Gid: stat.Gid, }, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index a4ae8901a..49471960b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -5,26 +5,13 @@ package system import ( "os" "os/exec" - "syscall" // only for exec + "sync" "unsafe" "github.com/opencontainers/runc/libcontainer/user" "golang.org/x/sys/unix" ) -// If arg2 is nonzero, set the "child subreaper" attribute of the -// calling process; if arg2 is zero, unset the attribute. When a -// process is marked as a child subreaper, all of the children -// that it creates, and their descendants, will be marked as -// having a subreaper. In effect, a subreaper fulfills the role -// of init(1) for its descendant processes. Upon termination of -// a process that is orphaned (i.e., its immediate parent has -// already terminated) and marked as having a subreaper, the -// nearest still living ancestor subreaper will receive a SIGCHLD -// signal and be able to wait(2) on the process to discover its -// termination status. -const PR_SET_CHILD_SUBREAPER = 36 - type ParentDeathSignal int func (p ParentDeathSignal) Restore() error { @@ -51,7 +38,7 @@ func Execv(cmd string, args []string, env []string) error { return err } - return syscall.Exec(name, args, env) + return unix.Exec(name, args, env) } func Prlimit(pid, resource int, limit unix.Rlimit) error { @@ -100,15 +87,23 @@ func Setctty() error { return nil } +var ( + inUserNS bool + nsOnce sync.Once +) + // RunningInUserNS detects whether we are currently running in a user namespace. // Originally copied from github.com/lxc/lxd/shared/util.go func RunningInUserNS() bool { - uidmap, err := user.CurrentProcessUIDMap() - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return false - } - return UIDMapInUserNS(uidmap) + nsOnce.Do(func() { + uidmap, err := user.CurrentProcessUIDMap() + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return + } + inUserNS = UIDMapInUserNS(uidmap) + }) + return inUserNS } func UIDMapInUserNS(uidmap []user.IDMap) bool { @@ -140,7 +135,7 @@ func GetParentNSeuid() int64 { // SetSubreaper sets the value i as the subreaper setting for the calling process func SetSubreaper(i int) error { - return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) + return unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) } // GetSubreaper returns the subreaper setting for the calling process diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go deleted file mode 100644 index b8434f105..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build cgo,linux - -package system - -/* -#include <unistd.h> -*/ -import "C" - -func GetClockTicks() int { - return int(C.sysconf(C._SC_CLK_TCK)) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go deleted file mode 100644 index d93b5d5fd..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !cgo windows - -package system - -func GetClockTicks() int { - // TODO figure out a better alternative for platforms where we're missing cgo - // - // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency(). - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx - // - // An example of its usage can be found here. - // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx - - return 100 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 7b912bbf8..4b89dad73 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -60,7 +60,7 @@ type Group struct { // groupFromOS converts an os/user.(*Group) to local Group // -// (This does not include Pass, Shell or Gecos) +// (This does not include Pass or List) func groupFromOS(g *user.Group) (Group, error) { newGroup := Group{ Name: g.Name, @@ -162,10 +162,6 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { ) for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - line := strings.TrimSpace(s.Text()) if line == "" { continue @@ -183,6 +179,9 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { out = append(out, p) } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } @@ -221,10 +220,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { ) for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - text := s.Text() if text == "" { continue @@ -242,6 +237,9 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { out = append(out, p) } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } @@ -532,10 +530,6 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { ) for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - line := strings.TrimSpace(s.Text()) if line == "" { continue @@ -549,6 +543,9 @@ func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { out = append(out, p) } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } @@ -586,10 +583,6 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { ) for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - line := strings.TrimSpace(s.Text()) if line == "" { continue @@ -603,6 +596,9 @@ func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { out = append(out, p) } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go index 6d3268902..c757c20e0 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go @@ -29,6 +29,9 @@ var ( type Generator struct { Config *rspec.Spec HostSpecific bool + // This is used to keep a cache of the ENVs added to improve + // performance when adding a huge number of ENV variables + envMap map[string]int } // ExportOptions have toggles for exporting only certain parts of the specification @@ -236,7 +239,12 @@ func New(os string) (generator Generator, err error) { } } - return Generator{Config: &config}, nil + envCache := map[string]int{} + if config.Process != nil { + envCache = createEnvCacheMap(config.Process.Env) + } + + return Generator{Config: &config, envMap: envCache}, nil } // NewFromSpec creates a configuration Generator from a given @@ -246,8 +254,14 @@ func New(os string) (generator Generator, err error) { // // generator := Generator{Config: config} func NewFromSpec(config *rspec.Spec) Generator { + envCache := map[string]int{} + if config != nil && config.Process != nil { + envCache = createEnvCacheMap(config.Process.Env) + } + return Generator{ Config: config, + envMap: envCache, } } @@ -273,11 +287,27 @@ func NewFromTemplate(r io.Reader) (Generator, error) { if err := json.NewDecoder(r).Decode(&config); err != nil { return Generator{}, err } + + envCache := map[string]int{} + if config.Process != nil { + envCache = createEnvCacheMap(config.Process.Env) + } + return Generator{ Config: &config, + envMap: envCache, }, nil } +// createEnvCacheMap creates a hash map with the ENV variables given by the config +func createEnvCacheMap(env []string) map[string]int { + envMap := make(map[string]int, len(env)) + for i, val := range env { + envMap[val] = i + } + return envMap +} + // SetSpec sets the configuration in the Generator g. // // Deprecated: Replace with: @@ -414,6 +444,12 @@ func (g *Generator) SetProcessUsername(username string) { g.Config.Process.User.Username = username } +// SetProcessUmask sets g.Config.Process.User.Umask. +func (g *Generator) SetProcessUmask(umask uint32) { + g.initConfigProcess() + g.Config.Process.User.Umask = umask +} + // SetProcessGID sets g.Config.Process.User.GID. func (g *Generator) SetProcessGID(gid uint32) { g.initConfigProcess() @@ -456,21 +492,44 @@ func (g *Generator) ClearProcessEnv() { return } g.Config.Process.Env = []string{} + // Clear out the env cache map as well + g.envMap = map[string]int{} } // AddProcessEnv adds name=value into g.Config.Process.Env, or replaces an // existing entry with the given name. func (g *Generator) AddProcessEnv(name, value string) { + if name == "" { + return + } + g.initConfigProcess() + g.addEnv(fmt.Sprintf("%s=%s", name, value), name) +} - env := fmt.Sprintf("%s=%s", name, value) - for idx := range g.Config.Process.Env { - if strings.HasPrefix(g.Config.Process.Env[idx], name+"=") { - g.Config.Process.Env[idx] = env - return - } +// AddMultipleProcessEnv adds multiple name=value into g.Config.Process.Env, or replaces +// existing entries with the given name. +func (g *Generator) AddMultipleProcessEnv(envs []string) { + g.initConfigProcess() + + for _, val := range envs { + split := strings.SplitN(val, "=", 2) + g.addEnv(val, split[0]) + } +} + +// addEnv looks through adds ENV to the Process and checks envMap for +// any duplicates +// This is called by both AddMultipleProcessEnv and AddProcessEnv +func (g *Generator) addEnv(env, key string) { + if idx, ok := g.envMap[key]; ok { + // The ENV exists in the cache, so change its value in g.Config.Process.Env + g.Config.Process.Env[idx] = env + } else { + // else the env doesn't exist, so add it and add it's index to g.envMap + g.Config.Process.Env = append(g.Config.Process.Env, env) + g.envMap[key] = len(g.Config.Process.Env) - 1 } - g.Config.Process.Env = append(g.Config.Process.Env, env) } // AddProcessRlimits adds rlimit into g.Config.Process.Rlimits. @@ -1443,7 +1502,7 @@ func (g *Generator) AddDevice(device rspec.LinuxDevice) { return } if dev.Type == device.Type && dev.Major == device.Major && dev.Minor == device.Minor { - fmt.Fprintln(os.Stderr, "WARNING: The same type, major and minor should not be used for multiple devices.") + fmt.Fprintf(os.Stderr, "WARNING: Creating device %q with same type, major and minor as existing %q.\n", device.Path, dev.Path) } } diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go index 5fee5a3b2..8a8dc3970 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go @@ -566,6 +566,20 @@ func DefaultProfile(rs *specs.Spec) *rspec.LinuxSeccomp { }, }...) /* Flags parameter of the clone syscall is the 2nd on s390 */ + syscalls = append(syscalls, []rspec.LinuxSyscall{ + { + Names: []string{"clone"}, + Action: rspec.ActAllow, + Args: []rspec.LinuxSeccompArg{ + { + Index: 1, + Value: 2080505856, + ValueTwo: 0, + Op: rspec.OpMaskedEqual, + }, + }, + }, + }...) } return &rspec.LinuxSeccomp{ diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go new file mode 100644 index 000000000..79a8e6446 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go @@ -0,0 +1,21 @@ +/* +Package selinux provides a high-level interface for interacting with selinux. + +This package uses a selinux build tag to enable the selinux functionality. This +allows non-linux and linux users who do not have selinux support to still use +tools that rely on this library. + +To compile with full selinux support use the -tags=selinux option in your build +and test commands. + +Usage: + + import "github.com/opencontainers/selinux/go-selinux" + + // Ensure that selinux is enforcing mode. + if selinux.EnforceMode() != selinux.Enforcing { + selinux.SetEnforceMode(selinux.Enforcing) + } + +*/ +package selinux diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go index 779e2e3a8..10ac15a85 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go @@ -73,9 +73,9 @@ func InitLabels(options []string) (plabel string, mlabel string, Err error) { selinux.ReleaseLabel(processLabel) } processLabel = pcon.Get() - mountLabel = mcon.Get() selinux.ReserveLabel(processLabel) } + mountLabel = mcon.Get() } return processLabel, mountLabel, nil } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go index c2bdd35d7..a7d2d5e34 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go @@ -30,7 +30,6 @@ func Relabel(path string, fileLabel string, shared bool) error { // DisableSecOpt returns a security opt that can disable labeling // support for future container processes func DisableSecOpt() []string { - // TODO the selinux.DisableSecOpt stub returns []string{"disable"} instead of "nil" return nil } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go new file mode 100644 index 000000000..50760dc93 --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -0,0 +1,249 @@ +package selinux + +import ( + "github.com/pkg/errors" +) + +const ( + // Enforcing constant indicate SELinux is in enforcing mode + Enforcing = 1 + // Permissive constant to indicate SELinux is in permissive mode + Permissive = 0 + // Disabled constant to indicate SELinux is disabled + Disabled = -1 + + // DefaultCategoryRange is the upper bound on the category range + DefaultCategoryRange = uint32(1024) +) + +var ( + // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. + ErrMCSAlreadyExists = errors.New("MCS label already exists") + // ErrEmptyPath is returned when an empty path has been specified. + ErrEmptyPath = errors.New("empty path") + + // InvalidLabel is returned when an invalid label is specified. + InvalidLabel = errors.New("Invalid Label") + + // ErrIncomparable is returned two levels are not comparable + ErrIncomparable = errors.New("incomparable levels") + // ErrLevelSyntax is returned when a sensitivity or category do not have correct syntax in a level + ErrLevelSyntax = errors.New("invalid level syntax") + + // CategoryRange allows the upper bound on the category range to be adjusted + CategoryRange = DefaultCategoryRange +) + +// Context is a representation of the SELinux label broken into 4 parts +type Context map[string]string + +// SetDisabled disables SELinux support for the package +func SetDisabled() { + setDisabled() +} + +// GetEnabled returns whether SELinux is currently enabled. +func GetEnabled() bool { + return getEnabled() +} + +// ClassIndex returns the int index for an object class in the loaded policy, +// or -1 and an error +func ClassIndex(class string) (int, error) { + return classIndex(class) +} + +// SetFileLabel sets the SELinux label for this path or returns an error. +func SetFileLabel(fpath string, label string) error { + return setFileLabel(fpath, label) +} + +// FileLabel returns the SELinux label for this path or returns an error. +func FileLabel(fpath string) (string, error) { + return fileLabel(fpath) +} + +// SetFSCreateLabel tells kernel the label to create all file system objects +// created by this task. Setting label="" to return to default. +func SetFSCreateLabel(label string) error { + return setFSCreateLabel(label) +} + +// FSCreateLabel returns the default label the kernel which the kernel is using +// for file system objects created by this task. "" indicates default. +func FSCreateLabel() (string, error) { + return fsCreateLabel() +} + +// CurrentLabel returns the SELinux label of the current process thread, or an error. +func CurrentLabel() (string, error) { + return currentLabel() +} + +// PidLabel returns the SELinux label of the given pid, or an error. +func PidLabel(pid int) (string, error) { + return pidLabel(pid) +} + +// ExecLabel returns the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func ExecLabel() (string, error) { + return execLabel() +} + +// CanonicalizeContext takes a context string and writes it to the kernel +// the function then returns the context that the kernel will use. Use this +// function to check if two contexts are equivalent +func CanonicalizeContext(val string) (string, error) { + return canonicalizeContext(val) +} + +// ComputeCreateContext requests the type transition from source to target for +// class from the kernel. +func ComputeCreateContext(source string, target string, class string) (string, error) { + return computeCreateContext(source, target, class) +} + +// CalculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound) +// of a source and target range. +// The glblub is calculated as the greater of the low sensitivities and +// the lower of the high sensitivities and the and of each category bitset. +func CalculateGlbLub(sourceRange, targetRange string) (string, error) { + return calculateGlbLub(sourceRange, targetRange) +} + +// SetExecLabel sets the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func SetExecLabel(label string) error { + return setExecLabel(label) +} + +// SetTaskLabel sets the SELinux label for the current thread, or an error. +// This requires the dyntransition permission. +func SetTaskLabel(label string) error { + return setTaskLabel(label) +} + +// SetSocketLabel takes a process label and tells the kernel to assign the +// label to the next socket that gets created +func SetSocketLabel(label string) error { + return setSocketLabel(label) +} + +// SocketLabel retrieves the current socket label setting +func SocketLabel() (string, error) { + return socketLabel() +} + +// PeerLabel retrieves the label of the client on the other side of a socket +func PeerLabel(fd uintptr) (string, error) { + return peerLabel(fd) +} + +// SetKeyLabel takes a process label and tells the kernel to assign the +// label to the next kernel keyring that gets created +func SetKeyLabel(label string) error { + return setKeyLabel(label) +} + +// KeyLabel retrieves the current kernel keyring label setting +func KeyLabel() (string, error) { + return keyLabel() +} + +// Get returns the Context as a string +func (c Context) Get() string { + return c.get() +} + +// NewContext creates a new Context struct from the specified label +func NewContext(label string) (Context, error) { + return newContext(label) +} + +// ClearLabels clears all reserved labels +func ClearLabels() { + clearLabels() +} + +// ReserveLabel reserves the MLS/MCS level component of the specified label +func ReserveLabel(label string) { + reserveLabel(label) +} + +// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func EnforceMode() int { + return enforceMode() +} + +// SetEnforceMode sets the current SELinux mode Enforcing, Permissive. +// Disabled is not valid, since this needs to be set at boot time. +func SetEnforceMode(mode int) error { + return setEnforceMode(mode) +} + +// DefaultEnforceMode returns the systems default SELinux mode Enforcing, +// Permissive or Disabled. Note this is is just the default at boot time. +// EnforceMode tells you the systems current mode. +func DefaultEnforceMode() int { + return defaultEnforceMode() +} + +// ReleaseLabel un-reserves the MLS/MCS Level field of the specified label, +// allowing it to be used by another process. +func ReleaseLabel(label string) { + releaseLabel(label) +} + +// ROFileLabel returns the specified SELinux readonly file label +func ROFileLabel() string { + return roFileLabel() +} + +// KVMContainerLabels returns the default processLabel and mountLabel to be used +// for kvm containers by the calling process. +func KVMContainerLabels() (string, string) { + return kvmContainerLabels() +} + +// InitContainerLabels returns the default processLabel and file labels to be +// used for containers running an init system like systemd by the calling process. +func InitContainerLabels() (string, string) { + return initContainerLabels() +} + +// ContainerLabels returns an allocated processLabel and fileLabel to be used for +// container labeling by the calling process. +func ContainerLabels() (processLabel string, fileLabel string) { + return containerLabels() +} + +// SecurityCheckContext validates that the SELinux label is understood by the kernel +func SecurityCheckContext(val string) error { + return securityCheckContext(val) +} + +// CopyLevel returns a label with the MLS/MCS level from src label replaced on +// the dest label. +func CopyLevel(src, dest string) (string, error) { + return copyLevel(src, dest) +} + +// Chcon changes the fpath file object to the SELinux label label. +// If fpath is a directory and recurse is true, then Chcon walks the +// directory tree setting the label. +func Chcon(fpath string, label string, recurse bool) error { + return chcon(fpath, label, recurse) +} + +// DupSecOpt takes an SELinux process label and returns security options that +// can be used to set the SELinux Type and Level for future container processes. +func DupSecOpt(src string) ([]string, error) { + return dupSecOpt(src) +} + +// DisableSecOpt returns a security opt that can be used to disable SELinux +// labeling support for future container processes. +func DisableSecOpt() []string { + return disableSecOpt() +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index 9c979e5e2..d6b0d49db 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -20,17 +20,12 @@ import ( "github.com/opencontainers/selinux/pkg/pwalk" "github.com/pkg/errors" + "github.com/willf/bitset" "golang.org/x/sys/unix" ) const ( - // Enforcing constant indicate SELinux is in enforcing mode - Enforcing = 1 - // Permissive constant to indicate SELinux is in permissive mode - Permissive = 0 - // Disabled constant to indicate SELinux is disabled - Disabled = -1 - + minSensLen = 2 contextFile = "/usr/share/containers/selinux/contexts" selinuxDir = "/etc/selinux/" selinuxConfig = selinuxDir + "config" @@ -49,17 +44,27 @@ type selinuxState struct { sync.Mutex } +type level struct { + sens uint + cats *bitset.BitSet +} + +type mlsRange struct { + low *level + high *level +} + +type levelItem byte + +const ( + sensitivity levelItem = 's' + category levelItem = 'c' +) + var ( - // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. - ErrMCSAlreadyExists = errors.New("MCS label already exists") - // ErrEmptyPath is returned when an empty path has been specified. - ErrEmptyPath = errors.New("empty path") - // InvalidLabel is returned when an invalid label is specified. - InvalidLabel = errors.New("Invalid Label") - - assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) - roFileLabel string - state = selinuxState{ + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) + readOnlyFileLabel string + state = selinuxState{ mcsList: make(map[string]bool), } @@ -68,9 +73,6 @@ var ( haveThreadSelf bool ) -// Context is a representation of the SELinux label broken into 4 parts -type Context map[string]string - func (s *selinuxState) setEnable(enabled bool) bool { s.Lock() defer s.Unlock() @@ -97,8 +99,8 @@ func (s *selinuxState) getEnabled() bool { return s.setEnable(enabled) } -// SetDisabled disables selinux support for the package -func SetDisabled() { +// setDisabled disables SELinux support for the package +func setDisabled() { state.setEnable(false) } @@ -190,15 +192,15 @@ func (s *selinuxState) getSELinuxfs() string { // getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs // filesystem or an empty string if no mountpoint is found. Selinuxfs is -// a proc-like pseudo-filesystem that exposes the selinux policy API to +// a proc-like pseudo-filesystem that exposes the SELinux policy API to // processes. The existence of an selinuxfs mount is used to determine -// whether selinux is currently enabled or not. +// whether SELinux is currently enabled or not. func getSelinuxMountPoint() string { return state.getSELinuxfs() } -// GetEnabled returns whether selinux is currently enabled. -func GetEnabled() bool { +// getEnabled returns whether SELinux is currently enabled. +func getEnabled() bool { return state.getEnabled() } @@ -282,8 +284,9 @@ func readCon(fpath string) (string, error) { return strings.Trim(retval, "\x00"), nil } -// ClassIndex returns the int index for an object class in the loaded policy, or -1 and an error -func ClassIndex(class string) (int, error) { +// classIndex returns the int index for an object class in the loaded policy, +// or -1 and an error +func classIndex(class string) (int, error) { permpath := fmt.Sprintf("class/%s/index", class) indexpath := filepath.Join(getSelinuxMountPoint(), permpath) @@ -299,8 +302,8 @@ func ClassIndex(class string) (int, error) { return index, nil } -// SetFileLabel sets the SELinux label for this path or returns an error. -func SetFileLabel(fpath string, label string) error { +// setFileLabel sets the SELinux label for this path or returns an error. +func setFileLabel(fpath string, label string) error { if fpath == "" { return ErrEmptyPath } @@ -310,8 +313,8 @@ func SetFileLabel(fpath string, label string) error { return nil } -// FileLabel returns the SELinux label for this path or returns an error. -func FileLabel(fpath string) (string, error) { +// fileLabel returns the SELinux label for this path or returns an error. +func fileLabel(fpath string) (string, error) { if fpath == "" { return "", ErrEmptyPath } @@ -327,37 +330,31 @@ func FileLabel(fpath string) (string, error) { return string(label), nil } -/* -SetFSCreateLabel tells kernel the label to create all file system objects -created by this task. Setting label="" to return to default. -*/ -func SetFSCreateLabel(label string) error { +// setFSCreateLabel tells kernel the label to create all file system objects +// created by this task. Setting label="" to return to default. +func setFSCreateLabel(label string) error { return writeAttr("fscreate", label) } -/* -FSCreateLabel returns the default label the kernel which the kernel is using -for file system objects created by this task. "" indicates default. -*/ -func FSCreateLabel() (string, error) { +// fsCreateLabel returns the default label the kernel which the kernel is using +// for file system objects created by this task. "" indicates default. +func fsCreateLabel() (string, error) { return readAttr("fscreate") } -// CurrentLabel returns the SELinux label of the current process thread, or an error. -func CurrentLabel() (string, error) { +// currentLabel returns the SELinux label of the current process thread, or an error. +func currentLabel() (string, error) { return readAttr("current") } -// PidLabel returns the SELinux label of the given pid, or an error. -func PidLabel(pid int) (string, error) { +// pidLabel returns the SELinux label of the given pid, or an error. +func pidLabel(pid int) (string, error) { return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) } -/* -ExecLabel returns the SELinux label that the kernel will use for any programs -that are executed by the current process thread, or an error. -*/ -func ExecLabel() (string, error) { +// ExecLabel returns the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func execLabel() (string, error) { return readAttr("exec") } @@ -366,7 +363,7 @@ func writeCon(fpath, val string) error { return ErrEmptyPath } if val == "" { - if !GetEnabled() { + if !getEnabled() { return nil } } @@ -418,20 +415,17 @@ func writeAttr(attr, val string) error { return writeCon(attrPath(attr), val) } -/* -CanonicalizeContext takes a context string and writes it to the kernel -the function then returns the context that the kernel will use. This function -can be used to see if two contexts are equivalent -*/ -func CanonicalizeContext(val string) (string, error) { +// canonicalizeContext takes a context string and writes it to the kernel +// the function then returns the context that the kernel will use. Use this +// function to check if two contexts are equivalent +func canonicalizeContext(val string) (string, error) { return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val) } -/* -ComputeCreateContext requests the type transition from source to target for class from the kernel. -*/ -func ComputeCreateContext(source string, target string, class string) (string, error) { - classidx, err := ClassIndex(class) +// computeCreateContext requests the type transition from source to target for +// class from the kernel. +func computeCreateContext(source string, target string, class string) (string, error) { + classidx, err := classIndex(class) if err != nil { return "", err } @@ -439,6 +433,217 @@ func ComputeCreateContext(source string, target string, class string) (string, e return readWriteCon(filepath.Join(getSelinuxMountPoint(), "create"), fmt.Sprintf("%s %s %d", source, target, classidx)) } +// catsToBitset stores categories in a bitset. +func catsToBitset(cats string) (*bitset.BitSet, error) { + bitset := &bitset.BitSet{} + + catlist := strings.Split(cats, ",") + for _, r := range catlist { + ranges := strings.SplitN(r, ".", 2) + if len(ranges) > 1 { + catstart, err := parseLevelItem(ranges[0], category) + if err != nil { + return nil, err + } + catend, err := parseLevelItem(ranges[1], category) + if err != nil { + return nil, err + } + for i := catstart; i <= catend; i++ { + bitset.Set(i) + } + } else { + cat, err := parseLevelItem(ranges[0], category) + if err != nil { + return nil, err + } + bitset.Set(cat) + } + } + + return bitset, nil +} + +// parseLevelItem parses and verifies that a sensitivity or category are valid +func parseLevelItem(s string, sep levelItem) (uint, error) { + if len(s) < minSensLen || levelItem(s[0]) != sep { + return 0, ErrLevelSyntax + } + val, err := strconv.ParseUint(s[1:], 10, 32) + if err != nil { + return 0, err + } + + return uint(val), nil +} + +// parseLevel fills a level from a string that contains +// a sensitivity and categories +func (l *level) parseLevel(levelStr string) error { + lvl := strings.SplitN(levelStr, ":", 2) + sens, err := parseLevelItem(lvl[0], sensitivity) + if err != nil { + return errors.Wrap(err, "failed to parse sensitivity") + } + l.sens = sens + if len(lvl) > 1 { + cats, err := catsToBitset(lvl[1]) + if err != nil { + return errors.Wrap(err, "failed to parse categories") + } + l.cats = cats + } + + return nil +} + +// rangeStrToMLSRange marshals a string representation of a range. +func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { + mlsRange := &mlsRange{} + levelSlice := strings.SplitN(rangeStr, "-", 2) + + switch len(levelSlice) { + // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023 + case 2: + mlsRange.high = &level{} + if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil { + return nil, errors.Wrapf(err, "failed to parse high level %q", levelSlice[1]) + } + fallthrough + // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 + case 1: + mlsRange.low = &level{} + if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil { + return nil, errors.Wrapf(err, "failed to parse low level %q", levelSlice[0]) + } + } + + if mlsRange.high == nil { + mlsRange.high = mlsRange.low + } + + return mlsRange, nil +} + +// bitsetToStr takes a category bitset and returns it in the +// canonical selinux syntax +func bitsetToStr(c *bitset.BitSet) string { + var str string + i, e := c.NextSet(0) + len := 0 + for e { + if len == 0 { + if str != "" { + str += "," + } + str += "c" + strconv.Itoa(int(i)) + } + + next, e := c.NextSet(i + 1) + if e { + // consecutive cats + if next == i+1 { + len++ + i = next + continue + } + } + if len == 1 { + str += ",c" + strconv.Itoa(int(i)) + } else if len > 1 { + str += ".c" + strconv.Itoa(int(i)) + } + if !e { + break + } + len = 0 + i = next + } + + return str +} + +func (l1 *level) equal(l2 *level) bool { + if l2 == nil || l1 == nil { + return l1 == l2 + } + if l1.sens != l2.sens { + return false + } + return l1.cats.Equal(l2.cats) +} + +// String returns an mlsRange as a string. +func (m mlsRange) String() string { + low := "s" + strconv.Itoa(int(m.low.sens)) + if m.low.cats != nil && m.low.cats.Count() > 0 { + low += ":" + bitsetToStr(m.low.cats) + } + + if m.low.equal(m.high) { + return low + } + + high := "s" + strconv.Itoa(int(m.high.sens)) + if m.high.cats != nil && m.high.cats.Count() > 0 { + high += ":" + bitsetToStr(m.high.cats) + } + + return low + "-" + high +} + +func max(a, b uint) uint { + if a > b { + return a + } + return b +} + +func min(a, b uint) uint { + if a < b { + return a + } + return b +} + +// calculateGlbLub computes the glb (greatest lower bound) and lub (least upper bound) +// of a source and target range. +// The glblub is calculated as the greater of the low sensitivities and +// the lower of the high sensitivities and the and of each category bitset. +func calculateGlbLub(sourceRange, targetRange string) (string, error) { + s, err := rangeStrToMLSRange(sourceRange) + if err != nil { + return "", err + } + t, err := rangeStrToMLSRange(targetRange) + if err != nil { + return "", err + } + + if s.high.sens < t.low.sens || t.high.sens < s.low.sens { + /* these ranges have no common sensitivities */ + return "", ErrIncomparable + } + + outrange := &mlsRange{low: &level{}, high: &level{}} + + /* take the greatest of the low */ + outrange.low.sens = max(s.low.sens, t.low.sens) + + /* take the least of the high */ + outrange.high.sens = min(s.high.sens, t.high.sens) + + /* find the intersecting categories */ + if s.low.cats != nil && t.low.cats != nil { + outrange.low.cats = s.low.cats.Intersection(t.low.cats) + } + if s.high.cats != nil && t.high.cats != nil { + outrange.high.cats = s.high.cats.Intersection(t.high.cats) + } + + return outrange.String(), nil +} + func readWriteCon(fpath string, val string) (string, error) { if fpath == "" { return "", ErrEmptyPath @@ -461,41 +666,37 @@ func readWriteCon(fpath string, val string) (string, error) { return strings.Trim(retval, "\x00"), nil } -/* -SetExecLabel sets the SELinux label that the kernel will use for any programs -that are executed by the current process thread, or an error. -*/ -func SetExecLabel(label string) error { +// setExecLabel sets the SELinux label that the kernel will use for any programs +// that are executed by the current process thread, or an error. +func setExecLabel(label string) error { return writeAttr("exec", label) } -/* -SetTaskLabel sets the SELinux label for the current thread, or an error. -This requires the dyntransition permission. -*/ -func SetTaskLabel(label string) error { +// setTaskLabel sets the SELinux label for the current thread, or an error. +// This requires the dyntransition permission. +func setTaskLabel(label string) error { return writeAttr("current", label) } -// SetSocketLabel takes a process label and tells the kernel to assign the +// setSocketLabel takes a process label and tells the kernel to assign the // label to the next socket that gets created -func SetSocketLabel(label string) error { +func setSocketLabel(label string) error { return writeAttr("sockcreate", label) } -// SocketLabel retrieves the current socket label setting -func SocketLabel() (string, error) { +// socketLabel retrieves the current socket label setting +func socketLabel() (string, error) { return readAttr("sockcreate") } -// PeerLabel retrieves the label of the client on the other side of a socket -func PeerLabel(fd uintptr) (string, error) { +// peerLabel retrieves the label of the client on the other side of a socket +func peerLabel(fd uintptr) (string, error) { return unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) } -// SetKeyLabel takes a process label and tells the kernel to assign the +// setKeyLabel takes a process label and tells the kernel to assign the // label to the next kernel keyring that gets created -func SetKeyLabel(label string) error { +func setKeyLabel(label string) error { err := writeCon("/proc/self/attr/keycreate", label) if os.IsNotExist(errors.Cause(err)) { return nil @@ -506,21 +707,21 @@ func SetKeyLabel(label string) error { return err } -// KeyLabel retrieves the current kernel keyring label setting -func KeyLabel() (string, error) { +// keyLabel retrieves the current kernel keyring label setting +func keyLabel() (string, error) { return readCon("/proc/self/attr/keycreate") } -// Get returns the Context as a string -func (c Context) Get() string { +// get returns the Context as a string +func (c Context) get() string { if c["level"] != "" { return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) } return fmt.Sprintf("%s:%s:%s", c["user"], c["role"], c["type"]) } -// NewContext creates a new Context struct from the specified label -func NewContext(label string) (Context, error) { +// newContext creates a new Context struct from the specified label +func newContext(label string) (Context, error) { c := make(Context) if len(label) != 0 { @@ -538,15 +739,15 @@ func NewContext(label string) (Context, error) { return c, nil } -// ClearLabels clears all reserved labels -func ClearLabels() { +// clearLabels clears all reserved labels +func clearLabels() { state.Lock() state.mcsList = make(map[string]bool) state.Unlock() } -// ReserveLabel reserves the MLS/MCS level component of the specified label -func ReserveLabel(label string) { +// reserveLabel reserves the MLS/MCS level component of the specified label +func reserveLabel(label string) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) > 3 { @@ -559,8 +760,8 @@ func selinuxEnforcePath() string { return path.Join(getSelinuxMountPoint(), "enforce") } -// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled -func EnforceMode() int { +// enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func enforceMode() int { var enforce int enforceB, err := ioutil.ReadFile(selinuxEnforcePath()) @@ -574,20 +775,16 @@ func EnforceMode() int { return enforce } -/* -SetEnforceMode sets the current SELinux mode Enforcing, Permissive. -Disabled is not valid, since this needs to be set at boot time. -*/ -func SetEnforceMode(mode int) error { +// setEnforceMode sets the current SELinux mode Enforcing, Permissive. +// Disabled is not valid, since this needs to be set at boot time. +func setEnforceMode(mode int) error { return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0644) } -/* -DefaultEnforceMode returns the systems default SELinux mode Enforcing, -Permissive or Disabled. Note this is is just the default at boot time. -EnforceMode tells you the systems current mode. -*/ -func DefaultEnforceMode() int { +// defaultEnforceMode returns the systems default SELinux mode Enforcing, +// Permissive or Disabled. Note this is is just the default at boot time. +// EnforceMode tells you the systems current mode. +func defaultEnforceMode() int { switch readConfig(selinuxTag) { case "enforcing": return Enforcing @@ -667,11 +864,9 @@ func uniqMcs(catRange uint32) string { return mcs } -/* -ReleaseLabel will unreserve the MLS/MCS Level field of the specified label. -Allowing it to be used by another process. -*/ -func ReleaseLabel(label string) { +// releaseLabel un-reserves the MLS/MCS Level field of the specified label, +// allowing it to be used by another process. +func releaseLabel(label string) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) > 3 { @@ -680,9 +875,9 @@ func ReleaseLabel(label string) { } } -// ROFileLabel returns the specified SELinux readonly file label -func ROFileLabel() string { - return roFileLabel +// roFileLabel returns the specified SELinux readonly file label +func roFileLabel() string { + return readOnlyFileLabel } func openContextFile() (*os.File, error) { @@ -737,11 +932,9 @@ func loadLabels() map[string]string { return labels } -/* -KVMContainerLabels returns the default processLabel and mountLabel to be used -for kvm containers by the calling process. -*/ -func KVMContainerLabels() (string, string) { +// kvmContainerLabels returns the default processLabel and mountLabel to be used +// for kvm containers by the calling process. +func kvmContainerLabels() (string, string) { processLabel := labels["kvm_process"] if processLabel == "" { processLabel = labels["process"] @@ -750,11 +943,9 @@ func KVMContainerLabels() (string, string) { return addMcs(processLabel, labels["file"]) } -/* -InitContainerLabels returns the default processLabel and file labels to be -used for containers running an init system like systemd by the calling process. -*/ -func InitContainerLabels() (string, string) { +// initContainerLabels returns the default processLabel and file labels to be +// used for containers running an init system like systemd by the calling process. +func initContainerLabels() (string, string) { processLabel := labels["init_process"] if processLabel == "" { processLabel = labels["process"] @@ -763,25 +954,23 @@ func InitContainerLabels() (string, string) { return addMcs(processLabel, labels["file"]) } -/* -ContainerLabels returns an allocated processLabel and fileLabel to be used for -container labeling by the calling process. -*/ -func ContainerLabels() (processLabel string, fileLabel string) { - if !GetEnabled() { +// containerLabels returns an allocated processLabel and fileLabel to be used for +// container labeling by the calling process. +func containerLabels() (processLabel string, fileLabel string) { + if !getEnabled() { return "", "" } processLabel = labels["process"] fileLabel = labels["file"] - roFileLabel = labels["ro_file"] + readOnlyFileLabel = labels["ro_file"] if processLabel == "" || fileLabel == "" { return "", fileLabel } - if roFileLabel == "" { - roFileLabel = fileLabel + if readOnlyFileLabel == "" { + readOnlyFileLabel = fileLabel } return addMcs(processLabel, fileLabel) @@ -790,7 +979,7 @@ func ContainerLabels() (processLabel string, fileLabel string) { func addMcs(processLabel, fileLabel string) (string, string) { scon, _ := NewContext(processLabel) if scon["level"] != "" { - mcs := uniqMcs(1024) + mcs := uniqMcs(CategoryRange) scon["level"] = mcs processLabel = scon.Get() scon, _ = NewContext(fileLabel) @@ -800,16 +989,14 @@ func addMcs(processLabel, fileLabel string) (string, string) { return processLabel, fileLabel } -// SecurityCheckContext validates that the SELinux label is understood by the kernel -func SecurityCheckContext(val string) error { +// securityCheckContext validates that the SELinux label is understood by the kernel +func securityCheckContext(val string) error { return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0644) } -/* -CopyLevel returns a label with the MLS/MCS level from src label replaced on -the dest label. -*/ -func CopyLevel(src, dest string) (string, error) { +// copyLevel returns a label with the MLS/MCS level from src label replaced on +// the dest label. +func copyLevel(src, dest string) (string, error) { if src == "" { return "", nil } @@ -833,7 +1020,7 @@ func CopyLevel(src, dest string) (string, error) { return tcon.Get(), nil } -// Prevent users from relabing system files +// Prevent users from relabeling system files func badPrefix(fpath string) error { if fpath == "" { return ErrEmptyPath @@ -848,10 +1035,10 @@ func badPrefix(fpath string) error { return nil } -// Chcon changes the fpath file object to the SELinux label label. -// If fpath is a directory and recurse is true, Chcon will walk the +// chcon changes the fpath file object to the SELinux label label. +// If fpath is a directory and recurse is true, then chcon walks the // directory tree setting the label. -func Chcon(fpath string, label string, recurse bool) error { +func chcon(fpath string, label string, recurse bool) error { if fpath == "" { return ErrEmptyPath } @@ -876,9 +1063,9 @@ func Chcon(fpath string, label string, recurse bool) error { }) } -// DupSecOpt takes an SELinux process label and returns security options that +// dupSecOpt takes an SELinux process label and returns security options that // can be used to set the SELinux Type and Level for future container processes. -func DupSecOpt(src string) ([]string, error) { +func dupSecOpt(src string) ([]string, error) { if src == "" { return nil, nil } @@ -903,8 +1090,8 @@ func DupSecOpt(src string) ([]string, error) { return dup, nil } -// DisableSecOpt returns a security opt that can be used to disable SELinux +// disableSecOpt returns a security opt that can be used to disable SELinux // labeling support for future container processes. -func DisableSecOpt() []string { +func disableSecOpt() []string { return []string{"disable"} } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index f9f5e2061..c526b210f 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -2,253 +2,147 @@ package selinux -import ( - "errors" -) - -const ( - // Enforcing constant indicate SELinux is in enforcing mode - Enforcing = 1 - // Permissive constant to indicate SELinux is in permissive mode - Permissive = 0 - // Disabled constant to indicate SELinux is disabled - Disabled = -1 -) - -var ( - // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. - ErrMCSAlreadyExists = errors.New("MCS label already exists") - // ErrEmptyPath is returned when an empty path has been specified. - ErrEmptyPath = errors.New("empty path") -) - -// Context is a representation of the SELinux label broken into 4 parts -type Context map[string]string - -// SetDisabled disables selinux support for the package -func SetDisabled() { - return -} - -// GetEnabled returns whether selinux is currently enabled. -func GetEnabled() bool { +func setDisabled() { +} + +func getEnabled() bool { return false } -// ClassIndex returns the int index for an object class in the loaded policy, or -1 and an error -func ClassIndex(class string) (int, error) { +func classIndex(class string) (int, error) { return -1, nil } -// SetFileLabel sets the SELinux label for this path or returns an error. -func SetFileLabel(fpath string, label string) error { +func setFileLabel(fpath string, label string) error { return nil } -// FileLabel returns the SELinux label for this path or returns an error. -func FileLabel(fpath string) (string, error) { +func fileLabel(fpath string) (string, error) { return "", nil } -/* -SetFSCreateLabel tells kernel the label to create all file system objects -created by this task. Setting label="" to return to default. -*/ -func SetFSCreateLabel(label string) error { +func setFSCreateLabel(label string) error { return nil } -/* -FSCreateLabel returns the default label the kernel which the kernel is using -for file system objects created by this task. "" indicates default. -*/ -func FSCreateLabel() (string, error) { +func fsCreateLabel() (string, error) { + return "", nil +} + +func currentLabel() (string, error) { return "", nil } -// CurrentLabel returns the SELinux label of the current process thread, or an error. -func CurrentLabel() (string, error) { +func pidLabel(pid int) (string, error) { return "", nil } -// PidLabel returns the SELinux label of the given pid, or an error. -func PidLabel(pid int) (string, error) { +func execLabel() (string, error) { return "", nil } -/* -ExecLabel returns the SELinux label that the kernel will use for any programs -that are executed by the current process thread, or an error. -*/ -func ExecLabel() (string, error) { +func canonicalizeContext(val string) (string, error) { return "", nil } -/* -CanonicalizeContext takes a context string and writes it to the kernel -the function then returns the context that the kernel will use. This function -can be used to see if two contexts are equivalent -*/ -func CanonicalizeContext(val string) (string, error) { +func computeCreateContext(source string, target string, class string) (string, error) { return "", nil } -/* -ComputeCreateContext requests the type transition from source to target for class from the kernel. -*/ -func ComputeCreateContext(source string, target string, class string) (string, error) { +func calculateGlbLub(sourceRange, targetRange string) (string, error) { return "", nil } -/* -SetExecLabel sets the SELinux label that the kernel will use for any programs -that are executed by the current process thread, or an error. -*/ -func SetExecLabel(label string) error { +func setExecLabel(label string) error { return nil } -/* -SetTaskLabel sets the SELinux label for the current thread, or an error. -This requires the dyntransition permission. -*/ -func SetTaskLabel(label string) error { +func setTaskLabel(label string) error { return nil } -/* -SetSocketLabel sets the SELinux label that the kernel will use for any programs -that are executed by the current process thread, or an error. -*/ -func SetSocketLabel(label string) error { +func setSocketLabel(label string) error { return nil } -// SocketLabel retrieves the current socket label setting -func SocketLabel() (string, error) { +func socketLabel() (string, error) { return "", nil } -// PeerLabel retrieves the label of the client on the other side of a socket -func PeerLabel(fd uintptr) (string, error) { +func peerLabel(fd uintptr) (string, error) { return "", nil } -// SetKeyLabel takes a process label and tells the kernel to assign the -// label to the next kernel keyring that gets created -func SetKeyLabel(label string) error { +func setKeyLabel(label string) error { return nil } -// KeyLabel retrieves the current kernel keyring label setting -func KeyLabel() (string, error) { +func keyLabel() (string, error) { return "", nil } -// Get returns the Context as a string -func (c Context) Get() string { +func (c Context) get() string { return "" } -// NewContext creates a new Context struct from the specified label -func NewContext(label string) (Context, error) { +func newContext(label string) (Context, error) { c := make(Context) return c, nil } -// ClearLabels clears all reserved MLS/MCS levels -func ClearLabels() { - return +func clearLabels() { } -// ReserveLabel reserves the MLS/MCS level component of the specified label -func ReserveLabel(label string) { - return +func reserveLabel(label string) { } -// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled -func EnforceMode() int { +func enforceMode() int { return Disabled } -/* -SetEnforceMode sets the current SELinux mode Enforcing, Permissive. -Disabled is not valid, since this needs to be set at boot time. -*/ -func SetEnforceMode(mode int) error { +func setEnforceMode(mode int) error { return nil } -/* -DefaultEnforceMode returns the systems default SELinux mode Enforcing, -Permissive or Disabled. Note this is is just the default at boot time. -EnforceMode tells you the systems current mode. -*/ -func DefaultEnforceMode() int { +func defaultEnforceMode() int { return Disabled } -/* -ReleaseLabel will unreserve the MLS/MCS Level field of the specified label. -Allowing it to be used by another process. -*/ -func ReleaseLabel(label string) { - return +func releaseLabel(label string) { } -// ROFileLabel returns the specified SELinux readonly file label -func ROFileLabel() string { +func roFileLabel() string { return "" } -// KVMContainerLabels returns the default processLabel and mountLabel to be used -// for kvm containers by the calling process. -func KVMContainerLabels() (string, string) { +func kvmContainerLabels() (string, string) { return "", "" } -// InitContainerLabels returns the default processLabel and file labels to be -// used for containers running an init system like systemd by the calling -func InitContainerLabels() (string, string) { +func initContainerLabels() (string, string) { return "", "" } -/* -ContainerLabels returns an allocated processLabel and fileLabel to be used for -container labeling by the calling process. -*/ -func ContainerLabels() (processLabel string, fileLabel string) { +func containerLabels() (processLabel string, fileLabel string) { return "", "" } -// SecurityCheckContext validates that the SELinux label is understood by the kernel -func SecurityCheckContext(val string) error { +func securityCheckContext(val string) error { return nil } -/* -CopyLevel returns a label with the MLS/MCS level from src label replaced on -the dest label. -*/ -func CopyLevel(src, dest string) (string, error) { +func copyLevel(src, dest string) (string, error) { return "", nil } -// Chcon changes the `fpath` file object to the SELinux label `label`. -// If `fpath` is a directory and `recurse`` is true, Chcon will walk the -// directory tree setting the label. -func Chcon(fpath string, label string, recurse bool) error { +func chcon(fpath string, label string, recurse bool) error { return nil } -// DupSecOpt takes an SELinux process label and returns security options that -// can be used to set the SELinux Type and Level for future container processes. -func DupSecOpt(src string) ([]string, error) { +func dupSecOpt(src string) ([]string, error) { return nil, nil } -// DisableSecOpt returns a security opt that can be used to disable SELinux -// labeling support for future container processes. -func DisableSecOpt() []string { +func disableSecOpt() []string { return []string{"disable"} } diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go index 2ee0d0150..63fde1842 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go @@ -48,7 +48,11 @@ func WalkN(root string, walkFn WalkFunc, num int) error { errCh := make(chan error, 1) // get the first error, ignore others // Start walking a tree asap - var err error + var ( + err error + wg sync.WaitGroup + ) + wg.Add(1) go func() { err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error { if err != nil { @@ -68,9 +72,9 @@ func WalkN(root string, walkFn WalkFunc, num int) error { if err == nil { close(files) } + wg.Done() }() - var wg sync.WaitGroup wg.Add(num) for i := 0; i < num; i++ { go func() { diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md index 6a7e3c5ca..cab87e9d6 100644 --- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md +++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md @@ -1,6 +1,19 @@ Changes by Version ================== +2.25.0 (2020-07-13) +------------------- +## Breaking changes +- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster + + The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments. + The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct, + or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable. + +## Bug fixes +- Do not add invalid context to references (#521) -- Yuri Shkuro + + 2.24.0 (2020-06-14) ------------------- - Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock index 2a5215a50..387958b12 100644 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock +++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock @@ -142,10 +142,19 @@ version = "v0.0.5" [[projects]] - digest = "1:0496f0e99014b7fd0a560c539f51d0882731137b85494142f47e550e4657176a" + digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02" + name = "github.com/stretchr/objx" + packages = ["."] + pruneopts = "UT" + revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" + version = "v0.1.1" + +[[projects]] + digest = "1:d88ba57c4e8f5db6ce9ab6605a89f4542ee751b576884ba5271c2ba3d4b6f2d2" name = "github.com/stretchr/testify" packages = [ "assert", + "mock", "require", "suite", ] @@ -154,6 +163,42 @@ version = "v1.4.0" [[projects]] + digest = "1:5b98956718573850caf7e0fd00b571a6657c4ef1f345ddf0c96b43ce355fe862" + name = "github.com/uber/jaeger-client-go" + packages = [ + ".", + "config", + "crossdock/client", + "crossdock/common", + "crossdock/endtoend", + "crossdock/log", + "crossdock/server", + "crossdock/thrift/tracetest", + "internal/baggage", + "internal/baggage/remote", + "internal/reporterstats", + "internal/spanlog", + "internal/throttler", + "internal/throttler/remote", + "log", + "log/zap/mock_opentracing", + "rpcmetrics", + "testutils", + "thrift", + "thrift-gen/agent", + "thrift-gen/baggage", + "thrift-gen/jaeger", + "thrift-gen/sampling", + "thrift-gen/zipkincore", + "transport", + "transport/zipkin", + "utils", + ] + pruneopts = "UT" + revision = "66c008c3d6ad856cac92a0af53186efbffa8e6a5" + version = "v2.24.0" + +[[projects]] digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3" name = "github.com/uber/jaeger-lib" packages = [ @@ -314,8 +359,36 @@ "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/mock", "github.com/stretchr/testify/require", "github.com/stretchr/testify/suite", + "github.com/uber/jaeger-client-go", + "github.com/uber/jaeger-client-go/config", + "github.com/uber/jaeger-client-go/crossdock/client", + "github.com/uber/jaeger-client-go/crossdock/common", + "github.com/uber/jaeger-client-go/crossdock/endtoend", + "github.com/uber/jaeger-client-go/crossdock/log", + "github.com/uber/jaeger-client-go/crossdock/server", + "github.com/uber/jaeger-client-go/crossdock/thrift/tracetest", + "github.com/uber/jaeger-client-go/internal/baggage", + "github.com/uber/jaeger-client-go/internal/baggage/remote", + "github.com/uber/jaeger-client-go/internal/reporterstats", + "github.com/uber/jaeger-client-go/internal/spanlog", + "github.com/uber/jaeger-client-go/internal/throttler", + "github.com/uber/jaeger-client-go/internal/throttler/remote", + "github.com/uber/jaeger-client-go/log", + "github.com/uber/jaeger-client-go/log/zap/mock_opentracing", + "github.com/uber/jaeger-client-go/rpcmetrics", + "github.com/uber/jaeger-client-go/testutils", + "github.com/uber/jaeger-client-go/thrift", + "github.com/uber/jaeger-client-go/thrift-gen/agent", + "github.com/uber/jaeger-client-go/thrift-gen/baggage", + "github.com/uber/jaeger-client-go/thrift-gen/jaeger", + "github.com/uber/jaeger-client-go/thrift-gen/sampling", + "github.com/uber/jaeger-client-go/thrift-gen/zipkincore", + "github.com/uber/jaeger-client-go/transport", + "github.com/uber/jaeger-client-go/transport/zipkin", + "github.com/uber/jaeger-client-go/utils", "github.com/uber/jaeger-lib/metrics", "github.com/uber/jaeger-lib/metrics/metricstest", "github.com/uber/jaeger-lib/metrics/prometheus", diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md index e7b13b1c2..687f5780c 100644 --- a/vendor/github.com/uber/jaeger-client-go/README.md +++ b/vendor/github.com/uber/jaeger-client-go/README.md @@ -61,6 +61,8 @@ JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the coll JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`). JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`). JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`). +JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`). +JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`). JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/. JAEGER_SAMPLER_PARAM | The sampler parameter (number). JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler. diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go index e6ffb987d..bb1228294 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config.go @@ -22,6 +22,7 @@ import ( "time" "github.com/opentracing/opentracing-go" + "github.com/uber/jaeger-client-go/utils" "github.com/uber/jaeger-client-go" "github.com/uber/jaeger-client-go/internal/baggage/remote" @@ -124,6 +125,17 @@ type ReporterConfig struct { // Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT LocalAgentHostPort string `yaml:"localAgentHostPort"` + // DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves + // the agent's hostname and reconnects if there was a change. This option only + // applies if LocalAgentHostPort is specified. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED + DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"` + + // AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname + // in order to detect address changes. This option only applies if DisableAttemptReconnecting is false. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL + AttemptReconnectInterval time.Duration + // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL. // Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT CollectorEndpoint string `yaml:"collectorEndpoint"` @@ -384,7 +396,7 @@ func (rc *ReporterConfig) NewReporter( metrics *jaeger.Metrics, logger jaeger.Logger, ) (jaeger.Reporter, error) { - sender, err := rc.newTransport() + sender, err := rc.newTransport(logger) if err != nil { return nil, err } @@ -401,7 +413,7 @@ func (rc *ReporterConfig) NewReporter( return reporter, err } -func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) { +func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) { switch { case rc.CollectorEndpoint != "": httpOptions := []transport.HTTPOption{transport.HTTPBatchSize(1), transport.HTTPHeaders(rc.HTTPHeaders)} @@ -410,6 +422,13 @@ func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) { } return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil default: - return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0) + return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{ + AgentClientUDPParams: utils.AgentClientUDPParams{ + HostPort: rc.LocalAgentHostPort, + Logger: logger, + DisableAttemptReconnecting: rc.DisableAttemptReconnecting, + AttemptReconnectInterval: rc.AttemptReconnectInterval, + }, + }) } } diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go index f38eb9d93..92d60cd59 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go @@ -24,30 +24,31 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pkg/errors" - "github.com/uber/jaeger-client-go" ) const ( // environment variable names - envServiceName = "JAEGER_SERVICE_NAME" - envDisabled = "JAEGER_DISABLED" - envRPCMetrics = "JAEGER_RPC_METRICS" - envTags = "JAEGER_TAGS" - envSamplerType = "JAEGER_SAMPLER_TYPE" - envSamplerParam = "JAEGER_SAMPLER_PARAM" - envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint - envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT" - envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS" - envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" - envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE" - envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL" - envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS" - envEndpoint = "JAEGER_ENDPOINT" - envUser = "JAEGER_USER" - envPassword = "JAEGER_PASSWORD" - envAgentHost = "JAEGER_AGENT_HOST" - envAgentPort = "JAEGER_AGENT_PORT" + envServiceName = "JAEGER_SERVICE_NAME" + envDisabled = "JAEGER_DISABLED" + envRPCMetrics = "JAEGER_RPC_METRICS" + envTags = "JAEGER_TAGS" + envSamplerType = "JAEGER_SAMPLER_TYPE" + envSamplerParam = "JAEGER_SAMPLER_PARAM" + envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint + envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT" + envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS" + envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" + envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE" + envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL" + envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS" + envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED" + envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL" + envEndpoint = "JAEGER_ENDPOINT" + envUser = "JAEGER_USER" + envPassword = "JAEGER_PASSWORD" + envAgentHost = "JAEGER_AGENT_HOST" + envAgentPort = "JAEGER_AGENT_PORT" ) // FromEnv uses environment variables to set the tracer's Configuration @@ -206,6 +207,24 @@ func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) { if useEnv || rc.LocalAgentHostPort == "" { rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port) } + + if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + rc.DisableAttemptReconnecting = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e) + } + } + + if !rc.DisableAttemptReconnecting { + if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" { + if value, err := time.ParseDuration(e); err == nil { + rc.AttemptReconnectInterval = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e) + } + } + } } return rc, nil diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go index feaf344ad..2f63d5909 100644 --- a/vendor/github.com/uber/jaeger-client-go/constants.go +++ b/vendor/github.com/uber/jaeger-client-go/constants.go @@ -22,7 +22,7 @@ import ( const ( // JaegerClientVersion is the version of the client library reported as Span tag. - JaegerClientVersion = "Go-2.24.0" + JaegerClientVersion = "Go-2.25.0" // JaegerClientVersionTagKey is the name of the tag used to report client version. JaegerClientVersionTagKey = "jaeger.version" diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go index 1b44f3f8c..ae9d94a9a 100644 --- a/vendor/github.com/uber/jaeger-client-go/span_context.go +++ b/vendor/github.com/uber/jaeger-client-go/span_context.go @@ -212,10 +212,14 @@ func (c SpanContext) SetFirehose() { } func (c SpanContext) String() string { + var flags int32 + if c.samplingState != nil { + flags = c.samplingState.stateFlags.Load() + } if c.traceID.High == 0 { - return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) + return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags) } - return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) + return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags) } // ContextFromString reconstructs the Context encoded in a string diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go index 8a3fc97ab..477c6eae3 100644 --- a/vendor/github.com/uber/jaeger-client-go/tracer.go +++ b/vendor/github.com/uber/jaeger-client-go/tracer.go @@ -216,10 +216,10 @@ func (t *Tracer) startSpanWithOptions( options.StartTime = t.timeNow() } - // Predicate whether the given span context is a valid reference - // which may be used as parent / debug ID / baggage items source - isValidReference := func(ctx SpanContext) bool { - return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0 + // Predicate whether the given span context is an empty reference + // or may be used as parent / debug ID / baggage items source + isEmptyReference := func(ctx SpanContext) bool { + return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0 } var references []Reference @@ -235,7 +235,7 @@ func (t *Tracer) startSpanWithOptions( reflect.ValueOf(ref.ReferencedContext))) continue } - if !isValidReference(ctxRef) { + if isEmptyReference(ctxRef) { continue } @@ -245,14 +245,17 @@ func (t *Tracer) startSpanWithOptions( continue } - references = append(references, Reference{Type: ref.Type, Context: ctxRef}) + if ctxRef.IsValid() { + // we don't want empty context that contains only debug-id or baggage + references = append(references, Reference{Type: ref.Type, Context: ctxRef}) + } if !hasParent { parent = ctxRef hasParent = ref.Type == opentracing.ChildOfRef } } - if !hasParent && isValidReference(parent) { + if !hasParent && !isEmptyReference(parent) { // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from // the FollowFromRef as the parent hasParent = true diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go index 7370d8007..5734819ab 100644 --- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go +++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go @@ -19,6 +19,7 @@ import ( "fmt" "github.com/uber/jaeger-client-go/internal/reporterstats" + "github.com/uber/jaeger-client-go/log" "github.com/uber/jaeger-client-go/thrift" j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" "github.com/uber/jaeger-client-go/utils" @@ -57,35 +58,57 @@ type udpSender struct { failedToEmitSpans int64 } -// NewUDPTransport creates a reporter that submits spans to jaeger-agent. +// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should +// be passed to NewUDPTransportWithParams. +type UDPTransportParams struct { + utils.AgentClientUDPParams +} + +// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent. // TODO: (breaking change) move to transport/ package. -func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { - if len(hostPort) == 0 { - hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort) +func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) { + if len(params.HostPort) == 0 { + params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort) } - if maxPacketSize == 0 { - maxPacketSize = utils.UDPPacketMaxLength + + if params.Logger == nil { + params.Logger = log.StdLogger + } + + if params.MaxPacketSize == 0 { + params.MaxPacketSize = utils.UDPPacketMaxLength } protocolFactory := thrift.NewTCompactProtocolFactory() // Each span is first written to thriftBuffer to determine its size in bytes. - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) - client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize) + client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams) if err != nil { return nil, err } return &udpSender{ client: client, - maxSpanBytes: maxPacketSize - emitBatchOverhead, + maxSpanBytes: params.MaxPacketSize - emitBatchOverhead, thriftBuffer: thriftBuffer, thriftProtocol: thriftProtocol, }, nil } +// NewUDPTransport creates a reporter that submits spans to jaeger-agent. +// TODO: (breaking change) move to transport/ package. +func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { + return NewUDPTransportWithParams(UDPTransportParams{ + AgentClientUDPParams: utils.AgentClientUDPParams{ + HostPort: hostPort, + MaxPacketSize: maxPacketSize, + }, + }) +} + // SetReporterStats implements reporterstats.Receiver. func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) { s.reporterStats = rs diff --git a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go new file mode 100644 index 000000000..0dffc7fa2 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go @@ -0,0 +1,189 @@ +// Copyright (c) 2020 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/uber/jaeger-client-go/log" +) + +// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +type reconnectingUDPConn struct { + hostPort string + resolveFunc resolveFunc + dialFunc dialFunc + logger log.Logger + bufferBytes int64 + + connMtx sync.RWMutex + conn *net.UDPConn + destAddr *net.UDPAddr + closeChan chan struct{} +} + +type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error) +type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) + +// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) { + conn := &reconnectingUDPConn{ + hostPort: hostPort, + resolveFunc: resolveFunc, + dialFunc: dialFunc, + logger: logger, + closeChan: make(chan struct{}), + } + + if err := conn.attemptResolveAndDial(); err != nil { + logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout)) + } + + go conn.reconnectLoop(resolveTimeout) + + return conn, nil +} + +func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) { + ticker := time.NewTicker(resolveTimeout) + defer ticker.Stop() + + for { + select { + case <-c.closeChan: + return + case <-ticker.C: + if err := c.attemptResolveAndDial(); err != nil { + c.logger.Error(err.Error()) + } + } + } +} + +func (c *reconnectingUDPConn) attemptResolveAndDial() error { + newAddr, err := c.resolveFunc("udp", c.hostPort) + if err != nil { + return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err) + } + + c.connMtx.RLock() + curAddr := c.destAddr + c.connMtx.RUnlock() + + // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn + if curAddr != nil && newAddr.String() == curAddr.String() { + return nil + } + + if err := c.attemptDialNewAddr(newAddr); err != nil { + return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err) + } + + return nil +} + +func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error { + connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr) + if err != nil { + return err + } + + if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 { + if err = connUDP.SetWriteBuffer(bufferBytes); err != nil { + return err + } + } + + c.connMtx.Lock() + c.destAddr = newAddr + // store prev to close later + prevConn := c.conn + c.conn = connUDP + c.connMtx.Unlock() + + if prevConn != nil { + return prevConn.Close() + } + + return nil +} + +// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning +func (c *reconnectingUDPConn) Write(b []byte) (int, error) { + var bytesWritten int + var err error + + c.connMtx.RLock() + if c.conn == nil { + // if connection is not initialized indicate this with err in order to hook into retry logic + err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved") + } else { + bytesWritten, err = c.conn.Write(b) + } + c.connMtx.RUnlock() + + if err == nil { + return bytesWritten, nil + } + + // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again + if reconnErr := c.attemptResolveAndDial(); reconnErr == nil { + c.connMtx.RLock() + defer c.connMtx.RUnlock() + return c.conn.Write(b) + } + + // return original error if reconn fails + return bytesWritten, err +} + +// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation +func (c *reconnectingUDPConn) Close() error { + close(c.closeChan) + + // acquire rw lock before closing conn to ensure calls to Write drain + c.connMtx.Lock() + defer c.connMtx.Unlock() + + if c.conn != nil { + return c.conn.Close() + } + + return nil +} + +// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held +// and SetWriteBuffer is called store bufferBytes to be set for new conns +func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error { + var err error + + c.connMtx.RLock() + if c.conn != nil { + err = c.conn.SetWriteBuffer(bytes) + } + c.connMtx.RUnlock() + + if err == nil { + atomic.StoreInt64(&c.bufferBytes, int64(bytes)) + } + + return err +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go index fadd73e49..2352643ce 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go @@ -19,7 +19,9 @@ import ( "fmt" "io" "net" + "time" + "github.com/uber/jaeger-client-go/log" "github.com/uber/jaeger-client-go/thrift" "github.com/uber/jaeger-client-go/thrift-gen/agent" @@ -35,41 +37,90 @@ type AgentClientUDP struct { agent.Agent io.Closer - connUDP *net.UDPConn + connUDP udpConn client *agent.AgentClient maxPacketSize int // max size of datagram in bytes thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span } -// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { - if maxPacketSize == 0 { - maxPacketSize = UDPPacketMaxLength +type udpConn interface { + Write([]byte) (int, error) + SetWriteBuffer(int) error + Close() error +} + +// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should +// be passed to NewAgentClientUDPWithParams. +type AgentClientUDPParams struct { + HostPort string + MaxPacketSize int + Logger log.Logger + DisableAttemptReconnecting bool + AttemptReconnectInterval time.Duration +} + +// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP. +func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) { + // validate hostport + if _, _, err := net.SplitHostPort(params.HostPort); err != nil { + return nil, err + } + + if params.MaxPacketSize == 0 { + params.MaxPacketSize = UDPPacketMaxLength + } + + if params.Logger == nil { + params.Logger = log.StdLogger } - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 { + params.AttemptReconnectInterval = time.Second * 30 + } + + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) protocolFactory := thrift.NewTCompactProtocolFactory() client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory) - destAddr, err := net.ResolveUDPAddr("udp", hostPort) - if err != nil { - return nil, err - } + var connUDP udpConn + var err error - connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err + if params.DisableAttemptReconnecting { + destAddr, err := net.ResolveUDPAddr("udp", params.HostPort) + if err != nil { + return nil, err + } + + connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) + if err != nil { + return nil, err + } + } else { + // host is hostname, setup resolver loop in case host record changes during operation + connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) + if err != nil { + return nil, err + } } - if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil { + + if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { return nil, err } - clientUDP := &AgentClientUDP{ + return &AgentClientUDP{ connUDP: connUDP, client: client, - maxPacketSize: maxPacketSize, - thriftBuffer: thriftBuffer} - return clientUDP, nil + maxPacketSize: params.MaxPacketSize, + thriftBuffer: thriftBuffer, + }, nil +} + +// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. +func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { + return NewAgentClientUDPWithParams(AgentClientUDPParams{ + HostPort: hostPort, + MaxPacketSize: maxPacketSize, + }) } // EmitZipkinBatch implements EmitZipkinBatch() of Agent interface diff --git a/vendor/github.com/willf/bitset/.gitignore b/vendor/github.com/willf/bitset/.gitignore new file mode 100644 index 000000000..5c204d28b --- /dev/null +++ b/vendor/github.com/willf/bitset/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +target diff --git a/vendor/github.com/willf/bitset/.travis.yml b/vendor/github.com/willf/bitset/.travis.yml new file mode 100644 index 000000000..094aa5ce0 --- /dev/null +++ b/vendor/github.com/willf/bitset/.travis.yml @@ -0,0 +1,37 @@ +language: go + +sudo: false + +branches: + except: + - release + +branches: + only: + - master + - travis + +go: + - "1.11.x" + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; + - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; + - go get github.com/mattn/goveralls + +before_script: + - make deps + +script: + - make qa + +after_failure: + - cat ./target/test/report.xml + +after_success: + - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/willf/bitset/LICENSE b/vendor/github.com/willf/bitset/LICENSE new file mode 100644 index 000000000..59cab8a93 --- /dev/null +++ b/vendor/github.com/willf/bitset/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 Will Fitzgerald. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/willf/bitset/Makefile b/vendor/github.com/willf/bitset/Makefile new file mode 100644 index 000000000..db8377106 --- /dev/null +++ b/vendor/github.com/willf/bitset/Makefile @@ -0,0 +1,191 @@ +# MAKEFILE +# +# @author Nicola Asuni <info@tecnick.com> +# @link https://github.com/willf/bitset +# ------------------------------------------------------------------------------ + +# List special make targets that are not associated with files +.PHONY: help all test format fmtcheck vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan qa deps clean nuke + +# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS). +SHELL=/bin/bash + +# CVS path (path to the parent dir containing the project) +CVSPATH=github.com/willf + +# Project owner +OWNER=willf + +# Project vendor +VENDOR=willf + +# Project name +PROJECT=bitset + +# Project version +VERSION=$(shell cat VERSION) + +# Name of RPM or DEB package +PKGNAME=${VENDOR}-${PROJECT} + +# Current directory +CURRENTDIR=$(shell pwd) + +# GO lang path +ifneq ($(GOPATH),) + ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),) + # the defined GOPATH is not valid + GOPATH= + endif +endif +ifeq ($(GOPATH),) + # extract the GOPATH + GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR))) +endif + +# --- MAKE TARGETS --- + +# Display general help about this command +help: + @echo "" + @echo "$(PROJECT) Makefile." + @echo "GOPATH=$(GOPATH)" + @echo "The following commands are available:" + @echo "" + @echo " make qa : Run all the tests" + @echo " make test : Run the unit tests" + @echo "" + @echo " make format : Format the source code" + @echo " make fmtcheck : Check if the source code has been formatted" + @echo " make vet : Check for suspicious constructs" + @echo " make lint : Check for style errors" + @echo " make coverage : Generate the coverage report" + @echo " make cyclo : Generate the cyclomatic complexity report" + @echo " make ineffassign : Detect ineffectual assignments" + @echo " make misspell : Detect commonly misspelled words in source files" + @echo " make structcheck : Find unused struct fields" + @echo " make varcheck : Find unused global variables and constants" + @echo " make errcheck : Check that error return values are used" + @echo " make gosimple : Suggest code simplifications" + @echo " make astscan : GO AST scanner" + @echo "" + @echo " make docs : Generate source code documentation" + @echo "" + @echo " make deps : Get the dependencies" + @echo " make clean : Remove any build artifact" + @echo " make nuke : Deletes any intermediate file" + @echo "" + +# Alias for help target +all: help + +# Run the unit tests +test: + @mkdir -p target/test + @mkdir -p target/report + GOPATH=$(GOPATH) \ + go test \ + -covermode=atomic \ + -bench=. \ + -race \ + -cpuprofile=target/report/cpu.out \ + -memprofile=target/report/mem.out \ + -mutexprofile=target/report/mutex.out \ + -coverprofile=target/report/coverage.out \ + -v ./... | \ + tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \ + test $${PIPESTATUS[0]} -eq 0 + +# Format the source code +format: + @find . -type f -name "*.go" -exec gofmt -s -w {} \; + +# Check if the source code has been formatted +fmtcheck: + @mkdir -p target + @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff + @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } + +# Check for syntax errors +vet: + GOPATH=$(GOPATH) go vet . + +# Check for style errors +lint: + GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint . + +# Generate the coverage report +coverage: + @mkdir -p target/report + GOPATH=$(GOPATH) \ + go tool cover -html=target/report/coverage.out -o target/report/coverage.html + +# Report cyclomatic complexity +cyclo: + @mkdir -p target/report + GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0 + +# Detect ineffectual assignments +ineffassign: + @mkdir -p target/report + GOPATH=$(GOPATH) ineffassign ./ | tee target/report/ineffassign.txt ; test $${PIPESTATUS[0]} -eq 0 + +# Detect commonly misspelled words in source files +misspell: + @mkdir -p target/report + GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0 + +# Find unused struct fields +structcheck: + @mkdir -p target/report + GOPATH=$(GOPATH) structcheck -a ./ | tee target/report/structcheck.txt + +# Find unused global variables and constants +varcheck: + @mkdir -p target/report + GOPATH=$(GOPATH) varcheck -e ./ | tee target/report/varcheck.txt + +# Check that error return values are used +errcheck: + @mkdir -p target/report + GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt + +# AST scanner +astscan: + @mkdir -p target/report + GOPATH=$(GOPATH) gosec . | tee target/report/astscan.txt ; test $${PIPESTATUS[0]} -eq 0 || true + +# Generate source docs +docs: + @mkdir -p target/docs + nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 & + wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060` + @echo '<html><head><meta http-equiv="refresh" content="0;./127.0.0.1:6060/pkg/'${CVSPATH}'/'${PROJECT}'/index.html"/></head><a href="./127.0.0.1:6060/pkg/'${CVSPATH}'/'${PROJECT}'/index.html">'${PKGNAME}' Documentation ...</a></html>' > target/docs/index.html + +# Alias to run all quality-assurance checks +qa: fmtcheck test vet lint coverage cyclo ineffassign misspell structcheck varcheck errcheck gosimple astscan + +# --- INSTALL --- + +# Get the dependencies +deps: + GOPATH=$(GOPATH) go get ./... + GOPATH=$(GOPATH) go get golang.org/x/lint/golint + GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report + GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov + GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo + GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign + GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell + GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck + GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck + GOPATH=$(GOPATH) go get github.com/kisielk/errcheck + GOPATH=$(GOPATH) go get github.com/securego/gosec/cmd/gosec/... + +# Remove any build artifact +clean: + GOPATH=$(GOPATH) go clean ./... + +# Deletes any intermediate file +nuke: + rm -rf ./target + GOPATH=$(GOPATH) go clean -i ./... diff --git a/vendor/github.com/willf/bitset/README.md b/vendor/github.com/willf/bitset/README.md new file mode 100644 index 000000000..6c62b20c6 --- /dev/null +++ b/vendor/github.com/willf/bitset/README.md @@ -0,0 +1,96 @@ +# bitset + +*Go language library to map between non-negative integers and boolean values* + +[![Master Build Status](https://secure.travis-ci.org/willf/bitset.png?branch=master)](https://travis-ci.org/willf/bitset?branch=master) +[![Master Coverage Status](https://coveralls.io/repos/willf/bitset/badge.svg?branch=master&service=github)](https://coveralls.io/github/willf/bitset?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) +[![GoDoc](https://godoc.org/github.com/willf/bitset?status.svg)](http://godoc.org/github.com/willf/bitset) + + +## Description + +Package bitset implements bitsets, a mapping between non-negative integers and boolean values. +It should be more efficient than map[uint] bool. + +It provides methods for setting, clearing, flipping, and testing individual integers. + +But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits. + +BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used. + +Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining. + +### Example use: + +```go +package main + +import ( + "fmt" + "math/rand" + + "github.com/willf/bitset" +) + +func main() { + fmt.Printf("Hello from BitSet!\n") + var b bitset.BitSet + // play some Go Fish + for i := 0; i < 100; i++ { + card1 := uint(rand.Intn(52)) + card2 := uint(rand.Intn(52)) + b.Set(card1) + if b.Test(card2) { + fmt.Println("Go Fish!") + } + b.Clear(card1) + } + + // Chaining + b.Set(10).Set(11) + + for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) { + fmt.Println("The following bit is set:", i) + } + if b.Intersection(bitset.New(100).Set(10)).Count() == 1 { + fmt.Println("Intersection works.") + } else { + fmt.Println("Intersection doesn't work???") + } +} +``` + +As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. + +Godoc documentation is at: https://godoc.org/github.com/willf/bitset + + +## Implementation Note + +Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed. + +It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped. + +## Installation + +```bash +go get github.com/willf/bitset +``` + +## Contributing + +If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") + +This project include a Makefile that allows you to test and build the project with simple commands. +To see all available options: +```bash +make help +``` + +## Running all tests + +Before committing the code, please check if it passes all tests using (note: this will install some dependencies): +```bash +make qa +``` diff --git a/vendor/github.com/willf/bitset/azure-pipelines.yml b/vendor/github.com/willf/bitset/azure-pipelines.yml new file mode 100644 index 000000000..f9b295918 --- /dev/null +++ b/vendor/github.com/willf/bitset/azure-pipelines.yml @@ -0,0 +1,39 @@ +# Go +# Build your Go project. +# Add steps that test, save build artifacts, deploy, and more: +# https://docs.microsoft.com/azure/devops/pipelines/languages/go + +trigger: +- master + +pool: + vmImage: 'Ubuntu-16.04' + +variables: + GOBIN: '$(GOPATH)/bin' # Go binaries path + GOROOT: '/usr/local/go1.11' # Go installation path + GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path + modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code + +steps: +- script: | + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + shopt -s extglob + shopt -s dotglob + mv !(gopath) '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + displayName: 'Set up the Go workspace' + +- script: | + go version + go get -v -t -d ./... + if [ -f Gopkg.toml ]; then + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure + fi + go build -v . + workingDirectory: '$(modulePath)' + displayName: 'Get dependencies, then build' diff --git a/vendor/github.com/willf/bitset/bitset.go b/vendor/github.com/willf/bitset/bitset.go new file mode 100644 index 000000000..22e5d42e5 --- /dev/null +++ b/vendor/github.com/willf/bitset/bitset.go @@ -0,0 +1,879 @@ +/* +Package bitset implements bitsets, a mapping +between non-negative integers and boolean values. It should be more +efficient than map[uint] bool. + +It provides methods for setting, clearing, flipping, and testing +individual integers. + +But it also provides set intersection, union, difference, +complement, and symmetric operations, as well as tests to +check whether any, all, or no bits are set, and querying a +bitset's current length and number of positive bits. + +BitSets are expanded to the size of the largest set bit; the +memory allocation is approximately Max bits, where Max is +the largest set bit. BitSets are never shrunk. On creation, +a hint can be given for the number of bits that will be used. + +Many of the methods, including Set,Clear, and Flip, return +a BitSet pointer, which allows for chaining. + +Example use: + + import "bitset" + var b BitSet + b.Set(10).Set(11) + if b.Test(1000) { + b.Clear(1000) + } + if B.Intersection(bitset.New(100).Set(10)).Count() > 1 { + fmt.Println("Intersection works.") + } + +As an alternative to BitSets, one should check out the 'big' package, +which provides a (less set-theoretical) view of bitsets. + +*/ +package bitset + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// the wordSize of a bit set +const wordSize = uint(64) + +// log2WordSize is lg(wordSize) +const log2WordSize = uint(6) + +// allBits has every bit set +const allBits uint64 = 0xffffffffffffffff + +// default binary BigEndian +var binaryOrder binary.ByteOrder = binary.BigEndian + +// default json encoding base64.URLEncoding +var base64Encoding = base64.URLEncoding + +// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding) +func Base64StdEncoding() { base64Encoding = base64.StdEncoding } + +// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian) +func LittleEndian() { binaryOrder = binary.LittleEndian } + +// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0. +type BitSet struct { + length uint + set []uint64 +} + +// Error is used to distinguish errors (panics) generated in this package. +type Error string + +// safeSet will fixup b.set to be non-nil and return the field value +func (b *BitSet) safeSet() []uint64 { + if b.set == nil { + b.set = make([]uint64, wordsNeeded(0)) + } + return b.set +} + +// From is a constructor used to create a BitSet from an array of integers +func From(buf []uint64) *BitSet { + return &BitSet{uint(len(buf)) * 64, buf} +} + +// Bytes returns the bitset as array of integers +func (b *BitSet) Bytes() []uint64 { + return b.set +} + +// wordsNeeded calculates the number of words needed for i bits +func wordsNeeded(i uint) int { + if i > (Cap() - wordSize + 1) { + return int(Cap() >> log2WordSize) + } + return int((i + (wordSize - 1)) >> log2WordSize) +} + +// New creates a new BitSet with a hint that length bits will be required +func New(length uint) (bset *BitSet) { + defer func() { + if r := recover(); r != nil { + bset = &BitSet{ + 0, + make([]uint64, 0), + } + } + }() + + bset = &BitSet{ + length, + make([]uint64, wordsNeeded(length)), + } + + return bset +} + +// Cap returns the total possible capacity, or number of bits +func Cap() uint { + return ^uint(0) +} + +// Len returns the number of bits in the BitSet. +// Note the difference to method Count, see example. +func (b *BitSet) Len() uint { + return b.length +} + +// extendSetMaybe adds additional words to incorporate new bits if needed +func (b *BitSet) extendSetMaybe(i uint) { + if i >= b.length { // if we need more bits, make 'em + nsize := wordsNeeded(i + 1) + if b.set == nil { + b.set = make([]uint64, nsize) + } else if cap(b.set) >= nsize { + b.set = b.set[:nsize] // fast resize + } else if len(b.set) < nsize { + newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x + copy(newset, b.set) + b.set = newset + } + b.length = i + 1 + } +} + +// Test whether bit i is set. +func (b *BitSet) Test(i uint) bool { + if i >= b.length { + return false + } + return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 +} + +// Set bit i to 1 +func (b *BitSet) Set(i uint) *BitSet { + b.extendSetMaybe(i) + b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) + return b +} + +// Clear bit i to 0 +func (b *BitSet) Clear(i uint) *BitSet { + if i >= b.length { + return b + } + b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1)) + return b +} + +// SetTo sets bit i to value +func (b *BitSet) SetTo(i uint, value bool) *BitSet { + if value { + return b.Set(i) + } + return b.Clear(i) +} + +// Flip bit at i +func (b *BitSet) Flip(i uint) *BitSet { + if i >= b.length { + return b.Set(i) + } + b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1)) + return b +} + +// Shrink shrinks BitSet to desired length in bits. It clears all bits > length +// and reduces the size and length of the set. +// +// A new slice is allocated to store the new bits, so you may see an increase in +// memory usage until the GC runs. Normally this should not be a problem, but if you +// have an extremely large BitSet its important to understand that the old BitSet will +// remain in memory until the GC frees it. +func (b *BitSet) Shrink(length uint) *BitSet { + idx := wordsNeeded(length + 1) + if idx > len(b.set) { + return b + } + shrunk := make([]uint64, idx) + copy(shrunk, b.set[:idx]) + b.set = shrunk + b.length = length + 1 + b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)) - 1)) + return b +} + +// InsertAt takes an index which indicates where a bit should be +// inserted. Then it shifts all the bits in the set to the left by 1, starting +// from the given index position, and sets the index position to 0. +// +// Depending on the size of your BitSet, and where you are inserting the new entry, +// this method could be extremely slow and in some cases might cause the entire BitSet +// to be recopied. +func (b *BitSet) InsertAt(idx uint) *BitSet { + insertAtElement := (idx >> log2WordSize) + + // if length of set is a multiple of wordSize we need to allocate more space first + if b.isLenExactMultiple() { + b.set = append(b.set, uint64(0)) + } + + var i uint + for i = uint(len(b.set) - 1); i > insertAtElement; i-- { + // all elements above the position where we want to insert can simply by shifted + b.set[i] <<= 1 + + // we take the most significant bit of the previous element and set it as + // the least significant bit of the current element + b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63 + } + + // generate a mask to extract the data that we need to shift left + // within the element where we insert a bit + dataMask := ^(uint64(1)<<uint64(idx&(wordSize-1)) - 1) + + // extract that data that we'll shift + data := b.set[i] & dataMask + + // set the positions of the data mask to 0 in the element where we insert + b.set[i] &= ^dataMask + + // shift data mask to the left and insert its data to the slice element + b.set[i] |= data << 1 + + // add 1 to length of BitSet + b.length++ + + return b +} + +// String creates a string representation of the Bitmap +func (b *BitSet) String() string { + // follows code from https://github.com/RoaringBitmap/roaring + var buffer bytes.Buffer + start := []byte("{") + buffer.Write(start) + counter := 0 + i, e := b.NextSet(0) + for e { + counter = counter + 1 + // to avoid exhausting the memory + if counter > 0x40000 { + buffer.WriteString("...") + break + } + buffer.WriteString(strconv.FormatInt(int64(i), 10)) + i, e = b.NextSet(i + 1) + if e { + buffer.WriteString(",") + } + } + buffer.WriteString("}") + return buffer.String() +} + +// DeleteAt deletes the bit at the given index position from +// within the bitset +// All the bits residing on the left of the deleted bit get +// shifted right by 1 +// The running time of this operation may potentially be +// relatively slow, O(length) +func (b *BitSet) DeleteAt(i uint) *BitSet { + // the index of the slice element where we'll delete a bit + deleteAtElement := i >> log2WordSize + + // generate a mask for the data that needs to be shifted right + // within that slice element that gets modified + dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1) + + // extract the data that we'll shift right from the slice element + data := b.set[deleteAtElement] & dataMask + + // set the masked area to 0 while leaving the rest as it is + b.set[deleteAtElement] &= ^dataMask + + // shift the previously extracted data to the right and then + // set it in the previously masked area + b.set[deleteAtElement] |= (data >> 1) & dataMask + + // loop over all the consecutive slice elements to copy each + // lowest bit into the highest position of the previous element, + // then shift the entire content to the right by 1 + for i := int(deleteAtElement) + 1; i < len(b.set); i++ { + b.set[i-1] |= (b.set[i] & 1) << 63 + b.set[i] >>= 1 + } + + b.length = b.length - 1 + + return b +} + +// NextSet returns the next bit set from the specified index, +// including possibly the current index +// along with an error code (true = valid, false = no set bit found) +// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} +func (b *BitSet) NextSet(i uint) (uint, bool) { + x := int(i >> log2WordSize) + if x >= len(b.set) { + return 0, false + } + w := b.set[x] + w = w >> (i & (wordSize - 1)) + if w != 0 { + return i + trailingZeroes64(w), true + } + x = x + 1 + for x < len(b.set) { + if b.set[x] != 0 { + return uint(x)*wordSize + trailingZeroes64(b.set[x]), true + } + x = x + 1 + + } + return 0, false +} + +// NextSetMany returns many next bit sets from the specified index, +// including possibly the current index and up to cap(buffer). +// If the returned slice has len zero, then no more set bits were found +// +// buffer := make([]uint, 256) // this should be reused +// j := uint(0) +// j, buffer = bitmap.NextSetMany(j, buffer) +// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) { +// for k := range buffer { +// do something with buffer[k] +// } +// j += 1 +// } +// +func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { + myanswer := buffer + capacity := cap(buffer) + x := int(i >> log2WordSize) + if x >= len(b.set) || capacity == 0 { + return 0, myanswer[:0] + } + skip := i & (wordSize - 1) + word := b.set[x] >> skip + myanswer = myanswer[:capacity] + size := int(0) + for word != 0 { + r := trailingZeroes64(word) + t := word & ((^word) + 1) + myanswer[size] = r + i + size++ + if size == capacity { + goto End + } + word = word ^ t + } + x++ + for idx, word := range b.set[x:] { + for word != 0 { + r := trailingZeroes64(word) + t := word & ((^word) + 1) + myanswer[size] = r + (uint(x+idx) << 6) + size++ + if size == capacity { + goto End + } + word = word ^ t + } + } +End: + if size > 0 { + return myanswer[size-1], myanswer[:size] + } + return 0, myanswer[:0] +} + +// NextClear returns the next clear bit from the specified index, +// including possibly the current index +// along with an error code (true = valid, false = no bit found i.e. all bits are set) +func (b *BitSet) NextClear(i uint) (uint, bool) { + x := int(i >> log2WordSize) + if x >= len(b.set) { + return 0, false + } + w := b.set[x] + w = w >> (i & (wordSize - 1)) + wA := allBits >> (i & (wordSize - 1)) + index := i + trailingZeroes64(^w) + if w != wA && index < b.length { + return index, true + } + x++ + for x < len(b.set) { + index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) + if b.set[x] != allBits && index < b.length { + return index, true + } + x++ + } + return 0, false +} + +// ClearAll clears the entire BitSet +func (b *BitSet) ClearAll() *BitSet { + if b != nil && b.set != nil { + for i := range b.set { + b.set[i] = 0 + } + } + return b +} + +// wordCount returns the number of words used in a bit set +func (b *BitSet) wordCount() int { + return len(b.set) +} + +// Clone this BitSet +func (b *BitSet) Clone() *BitSet { + c := New(b.length) + if b.set != nil { // Clone should not modify current object + copy(c.set, b.set) + } + return c +} + +// Copy into a destination BitSet +// Returning the size of the destination BitSet +// like array copy +func (b *BitSet) Copy(c *BitSet) (count uint) { + if c == nil { + return + } + if b.set != nil { // Copy should not modify current object + copy(c.set, b.set) + } + count = c.length + if b.length < c.length { + count = b.length + } + return +} + +// Count (number of set bits). +// Also known as "popcount" or "popularity count". +func (b *BitSet) Count() uint { + if b != nil && b.set != nil { + return uint(popcntSlice(b.set)) + } + return 0 +} + +// Equal tests the equivalence of two BitSets. +// False if they are of different sizes, otherwise true +// only if all the same bits are set +func (b *BitSet) Equal(c *BitSet) bool { + if c == nil || b == nil { + return c == b + } + if b.length != c.length { + return false + } + if b.length == 0 { // if they have both length == 0, then could have nil set + return true + } + // testing for equality shoud not transform the bitset (no call to safeSet) + + for p, v := range b.set { + if c.set[p] != v { + return false + } + } + return true +} + +func panicIfNull(b *BitSet) { + if b == nil { + panic(Error("BitSet must not be null")) + } +} + +// Difference of base set and other set +// This is the BitSet equivalent of &^ (and not) +func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + result = b.Clone() // clone b (in case b is bigger than compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + result.set[i] = b.set[i] &^ compare.set[i] + } + return +} + +// DifferenceCardinality computes the cardinality of the differnce +func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + cnt := uint64(0) + cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) + cnt += popcntSlice(b.set[l:]) + return uint(cnt) +} + +// InPlaceDifference computes the difference of base set and other set +// This is the BitSet equivalent of &^ (and not) +func (b *BitSet) InPlaceDifference(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + b.set[i] &^= compare.set[i] + } +} + +// Convenience function: return two bitsets ordered by +// increasing length. Note: neither can be nil +func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) { + if a.length <= b.length { + ap, bp = a, b + } else { + ap, bp = b, a + } + return +} + +// Intersection of base set and other set +// This is the BitSet equivalent of & (and) +func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + result = New(b.length) + for i, word := range b.set { + result.set[i] = word & compare.set[i] + } + return +} + +// IntersectionCardinality computes the cardinality of the union +func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntAndSlice(b.set, compare.set) + return uint(cnt) +} + +// InPlaceIntersection destructively computes the intersection of +// base set and the compare set. +// This is the BitSet equivalent of & (and) +func (b *BitSet) InPlaceIntersection(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + b.set[i] &= compare.set[i] + } + for i := l; i < len(b.set); i++ { + b.set[i] = 0 + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } +} + +// Union of base set and other set +// This is the BitSet equivalent of | (or) +func (b *BitSet) Union(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + result = compare.Clone() + for i, word := range b.set { + result.set[i] = word | compare.set[i] + } + return +} + +// UnionCardinality computes the cardinality of the uniton of the base set +// and the compare set. +func (b *BitSet) UnionCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntOrSlice(b.set, compare.set) + if len(compare.set) > len(b.set) { + cnt += popcntSlice(compare.set[len(b.set):]) + } + return uint(cnt) +} + +// InPlaceUnion creates the destructive union of base set and compare set. +// This is the BitSet equivalent of | (or). +func (b *BitSet) InPlaceUnion(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } + for i := 0; i < l; i++ { + b.set[i] |= compare.set[i] + } + if len(compare.set) > l { + for i := l; i < len(compare.set); i++ { + b.set[i] = compare.set[i] + } + } +} + +// SymmetricDifference of base set and other set +// This is the BitSet equivalent of ^ (xor) +func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + // compare is bigger, so clone it + result = compare.Clone() + for i, word := range b.set { + result.set[i] = word ^ compare.set[i] + } + return +} + +// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference +func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntXorSlice(b.set, compare.set) + if len(compare.set) > len(b.set) { + cnt += popcntSlice(compare.set[len(b.set):]) + } + return uint(cnt) +} + +// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set +// This is the BitSet equivalent of ^ (xor) +func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } + for i := 0; i < l; i++ { + b.set[i] ^= compare.set[i] + } + if len(compare.set) > l { + for i := l; i < len(compare.set); i++ { + b.set[i] = compare.set[i] + } + } +} + +// Is the length an exact multiple of word sizes? +func (b *BitSet) isLenExactMultiple() bool { + return b.length%wordSize == 0 +} + +// Clean last word by setting unused bits to 0 +func (b *BitSet) cleanLastWord() { + if !b.isLenExactMultiple() { + b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize) + } +} + +// Complement computes the (local) complement of a biset (up to length bits) +func (b *BitSet) Complement() (result *BitSet) { + panicIfNull(b) + result = New(b.length) + for i, word := range b.set { + result.set[i] = ^word + } + result.cleanLastWord() + return +} + +// All returns true if all bits are set, false otherwise. Returns true for +// empty sets. +func (b *BitSet) All() bool { + panicIfNull(b) + return b.Count() == b.length +} + +// None returns true if no bit is set, false otherwise. Returns true for +// empty sets. +func (b *BitSet) None() bool { + panicIfNull(b) + if b != nil && b.set != nil { + for _, word := range b.set { + if word > 0 { + return false + } + } + return true + } + return true +} + +// Any returns true if any bit is set, false otherwise +func (b *BitSet) Any() bool { + panicIfNull(b) + return !b.None() +} + +// IsSuperSet returns true if this is a superset of the other set +func (b *BitSet) IsSuperSet(other *BitSet) bool { + for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { + if !b.Test(i) { + return false + } + } + return true +} + +// IsStrictSuperSet returns true if this is a strict superset of the other set +func (b *BitSet) IsStrictSuperSet(other *BitSet) bool { + return b.Count() > other.Count() && b.IsSuperSet(other) +} + +// DumpAsBits dumps a bit set as a string of bits +func (b *BitSet) DumpAsBits() string { + if b.set == nil { + return "." + } + buffer := bytes.NewBufferString("") + i := len(b.set) - 1 + for ; i >= 0; i-- { + fmt.Fprintf(buffer, "%064b.", b.set[i]) + } + return buffer.String() +} + +// BinaryStorageSize returns the binary storage requirements +func (b *BitSet) BinaryStorageSize() int { + return binary.Size(uint64(0)) + binary.Size(b.set) +} + +// WriteTo writes a BitSet to a stream +func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { + length := uint64(b.length) + + // Write length + err := binary.Write(stream, binaryOrder, length) + if err != nil { + return 0, err + } + + // Write set + err = binary.Write(stream, binaryOrder, b.set) + return int64(b.BinaryStorageSize()), err +} + +// ReadFrom reads a BitSet from a stream written using WriteTo +func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { + var length uint64 + + // Read length first + err := binary.Read(stream, binaryOrder, &length) + if err != nil { + return 0, err + } + newset := New(uint(length)) + + if uint64(newset.length) != length { + return 0, errors.New("Unmarshalling error: type mismatch") + } + + // Read remaining bytes as set + err = binary.Read(stream, binaryOrder, newset.set) + if err != nil { + return 0, err + } + + *b = *newset + return int64(b.BinaryStorageSize()), nil +} + +// MarshalBinary encodes a BitSet into a binary form and returns the result. +func (b *BitSet) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + _, err := b.WriteTo(writer) + if err != nil { + return []byte{}, err + } + + err = writer.Flush() + + return buf.Bytes(), err +} + +// UnmarshalBinary decodes the binary form generated by MarshalBinary. +func (b *BitSet) UnmarshalBinary(data []byte) error { + buf := bytes.NewReader(data) + reader := bufio.NewReader(buf) + + _, err := b.ReadFrom(reader) + + return err +} + +// MarshalJSON marshals a BitSet as a JSON structure +func (b *BitSet) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) + _, err := b.WriteTo(buffer) + if err != nil { + return nil, err + } + + // URLEncode all bytes + return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes())) +} + +// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON +func (b *BitSet) UnmarshalJSON(data []byte) error { + // Unmarshal as string + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + // URLDecode string + buf, err := base64Encoding.DecodeString(s) + if err != nil { + return err + } + + _, err = b.ReadFrom(bytes.NewReader(buf)) + return err +} diff --git a/vendor/github.com/willf/bitset/popcnt.go b/vendor/github.com/willf/bitset/popcnt.go new file mode 100644 index 000000000..76577a838 --- /dev/null +++ b/vendor/github.com/willf/bitset/popcnt.go @@ -0,0 +1,53 @@ +package bitset + +// bit population count, take from +// https://code.google.com/p/go/issues/detail?id=4988#c11 +// credit: https://code.google.com/u/arnehormann/ +func popcount(x uint64) (n uint64) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return x >> 56 +} + +func popcntSliceGo(s []uint64) uint64 { + cnt := uint64(0) + for _, x := range s { + cnt += popcount(x) + } + return cnt +} + +func popcntMaskSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] &^ m[i]) + } + return cnt +} + +func popcntAndSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] & m[i]) + } + return cnt +} + +func popcntOrSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] | m[i]) + } + return cnt +} + +func popcntXorSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] ^ m[i]) + } + return cnt +} diff --git a/vendor/github.com/willf/bitset/popcnt_19.go b/vendor/github.com/willf/bitset/popcnt_19.go new file mode 100644 index 000000000..fc8ff4f36 --- /dev/null +++ b/vendor/github.com/willf/bitset/popcnt_19.go @@ -0,0 +1,45 @@ +// +build go1.9 + +package bitset + +import "math/bits" + +func popcntSlice(s []uint64) uint64 { + var cnt int + for _, x := range s { + cnt += bits.OnesCount64(x) + } + return uint64(cnt) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] &^ m[i]) + } + return uint64(cnt) +} + +func popcntAndSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] & m[i]) + } + return uint64(cnt) +} + +func popcntOrSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] | m[i]) + } + return uint64(cnt) +} + +func popcntXorSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] ^ m[i]) + } + return uint64(cnt) +} diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.go b/vendor/github.com/willf/bitset/popcnt_amd64.go new file mode 100644 index 000000000..4cf64f24a --- /dev/null +++ b/vendor/github.com/willf/bitset/popcnt_amd64.go @@ -0,0 +1,68 @@ +// +build !go1.9 +// +build amd64,!appengine + +package bitset + +// *** the following functions are defined in popcnt_amd64.s + +//go:noescape + +func hasAsm() bool + +// useAsm is a flag used to select the GO or ASM implementation of the popcnt function +var useAsm = hasAsm() + +//go:noescape + +func popcntSliceAsm(s []uint64) uint64 + +//go:noescape + +func popcntMaskSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntAndSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntOrSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntXorSliceAsm(s, m []uint64) uint64 + +func popcntSlice(s []uint64) uint64 { + if useAsm { + return popcntSliceAsm(s) + } + return popcntSliceGo(s) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + if useAsm { + return popcntMaskSliceAsm(s, m) + } + return popcntMaskSliceGo(s, m) +} + +func popcntAndSlice(s, m []uint64) uint64 { + if useAsm { + return popcntAndSliceAsm(s, m) + } + return popcntAndSliceGo(s, m) +} + +func popcntOrSlice(s, m []uint64) uint64 { + if useAsm { + return popcntOrSliceAsm(s, m) + } + return popcntOrSliceGo(s, m) +} + +func popcntXorSlice(s, m []uint64) uint64 { + if useAsm { + return popcntXorSliceAsm(s, m) + } + return popcntXorSliceGo(s, m) +} diff --git a/vendor/github.com/willf/bitset/popcnt_amd64.s b/vendor/github.com/willf/bitset/popcnt_amd64.s new file mode 100644 index 000000000..666c0dcc1 --- /dev/null +++ b/vendor/github.com/willf/bitset/popcnt_amd64.s @@ -0,0 +1,104 @@ +// +build !go1.9 +// +build amd64,!appengine + +TEXT ·hasAsm(SB),4,$0-1 +MOVQ $1, AX +CPUID +SHRQ $23, CX +ANDQ $1, CX +MOVB CX, ret+0(FP) +RET + +#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 + +TEXT ·popcntSliceAsm(SB),4,$0-32 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntSliceEnd +popcntSliceLoop: +BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX +ADDQ DX, AX +ADDQ $8, SI +LOOP popcntSliceLoop +popcntSliceEnd: +MOVQ AX, ret+24(FP) +RET + +TEXT ·popcntMaskSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntMaskSliceEnd +MOVQ m+24(FP), DI +popcntMaskSliceLoop: +MOVQ (DI), DX +NOTQ DX +ANDQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntMaskSliceLoop +popcntMaskSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntAndSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntAndSliceEnd +MOVQ m+24(FP), DI +popcntAndSliceLoop: +MOVQ (DI), DX +ANDQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntAndSliceLoop +popcntAndSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntOrSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntOrSliceEnd +MOVQ m+24(FP), DI +popcntOrSliceLoop: +MOVQ (DI), DX +ORQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntOrSliceLoop +popcntOrSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntXorSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntXorSliceEnd +MOVQ m+24(FP), DI +popcntXorSliceLoop: +MOVQ (DI), DX +XORQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntXorSliceLoop +popcntXorSliceEnd: +MOVQ AX, ret+48(FP) +RET diff --git a/vendor/github.com/willf/bitset/popcnt_generic.go b/vendor/github.com/willf/bitset/popcnt_generic.go new file mode 100644 index 000000000..21e0ff7b4 --- /dev/null +++ b/vendor/github.com/willf/bitset/popcnt_generic.go @@ -0,0 +1,24 @@ +// +build !go1.9 +// +build !amd64 appengine + +package bitset + +func popcntSlice(s []uint64) uint64 { + return popcntSliceGo(s) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + return popcntMaskSliceGo(s, m) +} + +func popcntAndSlice(s, m []uint64) uint64 { + return popcntAndSliceGo(s, m) +} + +func popcntOrSlice(s, m []uint64) uint64 { + return popcntOrSliceGo(s, m) +} + +func popcntXorSlice(s, m []uint64) uint64 { + return popcntXorSliceGo(s, m) +} diff --git a/vendor/github.com/willf/bitset/trailing_zeros_18.go b/vendor/github.com/willf/bitset/trailing_zeros_18.go new file mode 100644 index 000000000..c52b61be9 --- /dev/null +++ b/vendor/github.com/willf/bitset/trailing_zeros_18.go @@ -0,0 +1,14 @@ +// +build !go1.9 + +package bitset + +var deBruijn = [...]byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +func trailingZeroes64(v uint64) uint { + return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58]) +} diff --git a/vendor/github.com/willf/bitset/trailing_zeros_19.go b/vendor/github.com/willf/bitset/trailing_zeros_19.go new file mode 100644 index 000000000..36a988e71 --- /dev/null +++ b/vendor/github.com/willf/bitset/trailing_zeros_19.go @@ -0,0 +1,9 @@ +// +build go1.9 + +package bitset + +import "math/bits" + +func trailingZeroes64(v uint64) uint { + return uint(bits.TrailingZeros64(v)) +} |