aboutsummaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorDaniel J Walsh <dwalsh@redhat.com>2017-11-03 19:44:23 +0000
committerDaniel J Walsh <dwalsh@redhat.com>2017-11-04 09:07:47 +0000
commit619637a9197877f3bda54648f9fabc4af90cf9c2 (patch)
tree87c2b0e722100c8068333b686b3636d046bd5dfa /vendor
parent098389dc3e7bbba7c266ad24c909f3a5422e2908 (diff)
downloadpodman-619637a9197877f3bda54648f9fabc4af90cf9c2.tar.gz
podman-619637a9197877f3bda54648f9fabc4af90cf9c2.tar.bz2
podman-619637a9197877f3bda54648f9fabc4af90cf9c2.zip
Handle Linux Capabilities from command line
Had to revendor in docker/docker again, which dropped a bunch of packages Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/docker/docker/daemon/caps/utils_unix.go131
-rw-r--r--vendor/github.com/docker/docker/hack/README.md60
-rw-r--r--vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md69
-rw-r--r--vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf2
-rw-r--r--vendor/github.com/docker/docker/pkg/pools/pools.go137
5 files changed, 262 insertions, 137 deletions
diff --git a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go
new file mode 100644
index 000000000..c99485f51
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go
@@ -0,0 +1,131 @@
+// +build !windows
+
+package caps
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/pkg/stringutils"
+ "github.com/syndtr/gocapability/capability"
+)
+
+var capabilityList Capabilities
+
+func init() {
+ last := capability.CAP_LAST_CAP
+ // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
+ if last == capability.Cap(63) {
+ last = capability.CAP_BLOCK_SUSPEND
+ }
+ for _, cap := range capability.List() {
+ if cap > last {
+ continue
+ }
+ capabilityList = append(capabilityList,
+ &CapabilityMapping{
+ Key: "CAP_" + strings.ToUpper(cap.String()),
+ Value: cap,
+ },
+ )
+ }
+}
+
+type (
+ // CapabilityMapping maps linux capability name to its value of capability.Cap type
+ // Capabilities is one of the security systems in Linux Security Module (LSM)
+ // framework provided by the kernel.
+ // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html
+ CapabilityMapping struct {
+ Key string `json:"key,omitempty"`
+ Value capability.Cap `json:"value,omitempty"`
+ }
+ // Capabilities contains all CapabilityMapping
+ Capabilities []*CapabilityMapping
+)
+
+// String returns <key> of CapabilityMapping
+func (c *CapabilityMapping) String() string {
+ return c.Key
+}
+
+// GetCapability returns CapabilityMapping which contains specific key
+func GetCapability(key string) *CapabilityMapping {
+ for _, capp := range capabilityList {
+ if capp.Key == key {
+ cpy := *capp
+ return &cpy
+ }
+ }
+ return nil
+}
+
+// GetAllCapabilities returns all of the capabilities
+func GetAllCapabilities() []string {
+ output := make([]string, len(capabilityList))
+ for i, capability := range capabilityList {
+ output[i] = capability.String()
+ }
+ return output
+}
+
+// TweakCapabilities can tweak capabilities by adding or dropping capabilities
+// based on the basics capabilities.
+func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
+ var (
+ newCaps []string
+ allCaps = GetAllCapabilities()
+ )
+
+ // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix
+ // Currently they are mixed in here. We should do conversion in one place.
+
+ // look for invalid cap in the drop list
+ for _, cap := range drops {
+ if strings.ToLower(cap) == "all" {
+ continue
+ }
+
+ if !stringutils.InSlice(allCaps, "CAP_"+cap) {
+ return nil, fmt.Errorf("Unknown capability drop: %q", cap)
+ }
+ }
+
+ // handle --cap-add=all
+ if stringutils.InSlice(adds, "all") {
+ basics = allCaps
+ }
+
+ if !stringutils.InSlice(drops, "all") {
+ for _, cap := range basics {
+ // skip `all` already handled above
+ if strings.ToLower(cap) == "all" {
+ continue
+ }
+
+ // if we don't drop `all`, add back all the non-dropped caps
+ if !stringutils.InSlice(drops, cap[4:]) {
+ newCaps = append(newCaps, strings.ToUpper(cap))
+ }
+ }
+ }
+
+ for _, cap := range adds {
+ // skip `all` already handled above
+ if strings.ToLower(cap) == "all" {
+ continue
+ }
+
+ cap = "CAP_" + cap
+
+ if !stringutils.InSlice(allCaps, cap) {
+ return nil, fmt.Errorf("Unknown capability to add: %q", cap)
+ }
+
+ // add cap if not already in the list
+ if !stringutils.InSlice(newCaps, cap) {
+ newCaps = append(newCaps, strings.ToUpper(cap))
+ }
+ }
+ return newCaps, nil
+}
diff --git a/vendor/github.com/docker/docker/hack/README.md b/vendor/github.com/docker/docker/hack/README.md
new file mode 100644
index 000000000..802395d53
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/README.md
@@ -0,0 +1,60 @@
+## About
+
+This directory contains a collection of scripts used to build and manage this
+repository. If there are any issues regarding the intention of a particular
+script (or even part of a certain script), please reach out to us.
+It may help us either refine our current scripts, or add on new ones
+that are appropriate for a given use case.
+
+## DinD (dind.sh)
+
+DinD is a wrapper script which allows Docker to be run inside a Docker
+container. DinD requires the container to
+be run with privileged mode enabled.
+
+## Generate Authors (generate-authors.sh)
+
+Generates AUTHORS; a file with all the names and corresponding emails of
+individual contributors. AUTHORS can be found in the home directory of
+this repository.
+
+## Make
+
+There are two make files, each with different extensions. Neither are supposed
+to be called directly; only invoke `make`. Both scripts run inside a Docker
+container.
+
+### make.ps1
+
+- The Windows native build script that uses PowerShell semantics; it is limited
+unlike `hack\make.sh` since it does not provide support for the full set of
+operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
+does provide support for local Windows development and Windows to Windows CI.
+More information is found within `make.ps1` by the author, @jhowardmsft
+
+### make.sh
+
+- Referenced via `make test` when running tests on a local machine,
+or directly referenced when running tests inside a Docker development container.
+- When running on a local machine, `make test` to run all tests found in
+`test`, `test-unit`, `test-integration-cli`, and `test-docker-py` on
+your local machine. The default timeout is set in `make.sh` to 60 minutes
+(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
+all of the tests.
+- When running inside a Docker development container, `hack/make.sh` does
+not have a single target that runs all the tests. You need to provide a
+single command line with multiple targets that performs the same thing.
+An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py`
+- For more information related to testing outside the scope of this README,
+refer to
+[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
+
+## Release (release.sh)
+
+Releases any bundles built by `make` on a public AWS S3 bucket.
+For information regarding configuration, please view `release.sh`.
+
+## Vendor (vendor.sh)
+
+A shell script that is a wrapper around Vndr. For information on how to use
+this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)
diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
new file mode 100644
index 000000000..1cea52526
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
@@ -0,0 +1,69 @@
+# Integration Testing on Swarm
+
+IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
+
+## Architecture
+
+### Master service
+
+ - Works as a funker caller
+ - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
+
+### Worker service
+
+ - Works as a funker callee
+ - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
+
+### Client
+
+ - Controls master and workers via `docker stack`
+ - No need to have a local daemon
+
+Typically, the master and workers are supposed to be running on a cloud environment,
+while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
+
+## Requirement
+
+ - Docker daemon 1.13 or later
+ - Private registry for distributed execution with multiple nodes
+
+## Usage
+
+### Step 1: Prepare images
+
+ $ make build-integration-cli-on-swarm
+
+Following environment variables are known to work in this step:
+
+ - `BUILDFLAGS`
+ - `DOCKER_INCREMENTAL_BINARY`
+
+Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
+
+### Step 2: Execute tests
+
+ $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
+
+Following environment variables are known to work in this step:
+
+ - `DOCKER_GRAPHDRIVER`
+ - `DOCKER_EXPERIMENTAL`
+
+#### Flags
+
+Basic flags:
+
+ - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
+ - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
+ - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
+
+Experimental flags for mitigating makespan nonuniformity:
+
+ - `-shuffle`: Shuffle the test filter strings
+
+Flags for debugging IT on Swarm itself:
+
+ - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
+ - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
+ - `-dry-run`: skip the actual workload
+ - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
new file mode 100644
index 000000000..efd6d6d04
--- /dev/null
+++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
@@ -0,0 +1,2 @@
+# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
+github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
deleted file mode 100644
index 6a111a3ba..000000000
--- a/vendor/github.com/docker/docker/pkg/pools/pools.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Package pools provides a collection of pools which provide various
-// data types with buffers. These can be used to lower the number of
-// memory allocations and reuse buffers.
-//
-// New pools should be added to this package to allow them to be
-// shared across packages.
-//
-// Utility functions which operate on pools should be added to this
-// package to allow them to be reused.
-package pools
-
-import (
- "bufio"
- "io"
- "sync"
-
- "github.com/docker/docker/pkg/ioutils"
-)
-
-const buffer32K = 32 * 1024
-
-var (
- // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
- BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
- // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
- BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
- buffer32KPool = newBufferPoolWithSize(buffer32K)
-)
-
-// BufioReaderPool is a bufio reader that uses sync.Pool.
-type BufioReaderPool struct {
- pool sync.Pool
-}
-
-// newBufioReaderPoolWithSize is unexported because new pools should be
-// added here to be shared where required.
-func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
- return &BufioReaderPool{
- pool: sync.Pool{
- New: func() interface{} { return bufio.NewReaderSize(nil, size) },
- },
- }
-}
-
-// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
-func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
- buf := bufPool.pool.Get().(*bufio.Reader)
- buf.Reset(r)
- return buf
-}
-
-// Put puts the bufio.Reader back into the pool.
-func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
- b.Reset(nil)
- bufPool.pool.Put(b)
-}
-
-type bufferPool struct {
- pool sync.Pool
-}
-
-func newBufferPoolWithSize(size int) *bufferPool {
- return &bufferPool{
- pool: sync.Pool{
- New: func() interface{} { return make([]byte, size) },
- },
- }
-}
-
-func (bp *bufferPool) Get() []byte {
- return bp.pool.Get().([]byte)
-}
-
-func (bp *bufferPool) Put(b []byte) {
- bp.pool.Put(b)
-}
-
-// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
-func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
- buf := buffer32KPool.Get()
- written, err = io.CopyBuffer(dst, src, buf)
- buffer32KPool.Put(buf)
- return
-}
-
-// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
-// into the pool and closes the reader if it's an io.ReadCloser.
-func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
- return ioutils.NewReadCloserWrapper(r, func() error {
- if readCloser, ok := r.(io.ReadCloser); ok {
- readCloser.Close()
- }
- bufPool.Put(buf)
- return nil
- })
-}
-
-// BufioWriterPool is a bufio writer that uses sync.Pool.
-type BufioWriterPool struct {
- pool sync.Pool
-}
-
-// newBufioWriterPoolWithSize is unexported because new pools should be
-// added here to be shared where required.
-func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
- return &BufioWriterPool{
- pool: sync.Pool{
- New: func() interface{} { return bufio.NewWriterSize(nil, size) },
- },
- }
-}
-
-// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
-func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
- buf := bufPool.pool.Get().(*bufio.Writer)
- buf.Reset(w)
- return buf
-}
-
-// Put puts the bufio.Writer back into the pool.
-func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
- b.Reset(nil)
- bufPool.pool.Put(b)
-}
-
-// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
-// into the pool and closes the writer if it's an io.Writecloser.
-func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
- return ioutils.NewWriteCloserWrapper(w, func() error {
- buf.Flush()
- if writeCloser, ok := w.(io.WriteCloser); ok {
- writeCloser.Close()
- }
- bufPool.Put(buf)
- return nil
- })
-}