summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml36
-rw-r--r--.papr.yml79
-rw-r--r--.papr_prepare.sh2
-rw-r--r--Dockerfile.Fedora1
-rw-r--r--cmd/podman/checkpoint.go25
-rw-r--r--cmd/podman/cleanup.go40
-rw-r--r--cmd/podman/common.go75
-rw-r--r--cmd/podman/create_cli_test.go2
-rw-r--r--cmd/podman/kill.go48
-rw-r--r--cmd/podman/libpodruntime/runtime.go54
-rw-r--r--cmd/podman/login.go9
-rw-r--r--cmd/podman/logout.go6
-rw-r--r--cmd/podman/parse.go10
-rw-r--r--cmd/podman/port.go3
-rw-r--r--cmd/podman/ps.go377
-rw-r--r--cmd/podman/restore.go28
-rw-r--r--cmd/podman/rm.go64
-rw-r--r--cmd/podman/shared/container.go260
-rw-r--r--cmd/podman/stats.go7
-rw-r--r--cmd/podman/stop.go48
-rw-r--r--cmd/podman/utils.go11
-rwxr-xr-xcontrib/cirrus/build_vm_images.sh2
-rw-r--r--contrib/cirrus/lib.sh33
-rw-r--r--contrib/cirrus/packer/centos_setup.sh72
-rw-r--r--contrib/cirrus/packer/fedora_setup.sh16
-rw-r--r--contrib/cirrus/packer/libpod_images.json18
-rw-r--r--contrib/cirrus/packer/rhel_setup.sh76
-rw-r--r--contrib/cirrus/packer/ubuntu_setup.sh11
-rwxr-xr-xcontrib/cirrus/podbot.py98
-rwxr-xr-xcontrib/cirrus/setup_environment.sh1
-rwxr-xr-xcontrib/cirrus/success.sh22
-rw-r--r--contrib/python/podman/MANIFEST.in1
-rwxr-xr-xcontrib/python/podman/test/test_runner.sh12
-rw-r--r--contrib/python/podman/tox.ini8
-rw-r--r--contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py4
-rw-r--r--contrib/python/pypodman/pypodman/lib/podman_parser.py4
-rw-r--r--docs/podman.1.md8
-rw-r--r--libpod/container.go2
-rw-r--r--libpod/container_api.go8
-rw-r--r--libpod/container_easyjson.go744
-rw-r--r--libpod/container_inspect.go3
-rw-r--r--libpod/container_internal.go16
-rw-r--r--libpod/container_internal_linux.go9
-rw-r--r--libpod/oci.go29
-rw-r--r--libpod/options.go6
-rw-r--r--libpod/runtime.go45
-rw-r--r--libpod/runtime_ctr.go30
-rw-r--r--libpod/storage.go30
-rw-r--r--pkg/criu/criu.go19
-rw-r--r--pkg/util/utils.go84
-rw-r--r--test/e2e/checkpoint_test.go7
-rw-r--r--test/e2e/exec_test.go1
-rw-r--r--test/e2e/libpod_suite_test.go45
-rw-r--r--test/e2e/logs_test.go12
-rw-r--r--test/e2e/pod_create_test.go2
-rw-r--r--test/e2e/pod_infra_container_test.go2
-rw-r--r--test/e2e/pod_top_test.go4
-rw-r--r--test/e2e/run_entrypoint_test.go8
-rw-r--r--test/e2e/run_passwd_test.go8
-rw-r--r--test/e2e/run_privileged_test.go2
-rw-r--r--test/e2e/run_test.go4
-rw-r--r--test/e2e/stats_test.go19
-rw-r--r--test/e2e/version_test.go2
-rw-r--r--vendor.conf7
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/LICENSE201
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/README.md27
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/main.go250
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/notify.go63
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go1203
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/test/main.go133
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/test/phaul-main.go192
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/test/piggie.c57
-rw-r--r--vendor/github.com/containers/storage/containers.go16
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/chown.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go11
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go27
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go17
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go6
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go8
-rw-r--r--vendor/github.com/containers/storage/layers.go39
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/containers/storage/store.go45
-rw-r--r--vendor/github.com/containers/storage/vendor.conf2
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE31
-rw-r--r--vendor/github.com/golang/protobuf/README.md241
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go229
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go970
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go1355
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go300
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go586
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go898
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go311
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go484
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go270
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go872
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go854
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go895
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label.go5
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go14
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go7
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go5
105 files changed, 12222 insertions, 1237 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index c00f2d095..87842da74 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -16,11 +16,11 @@ gce_instance:
# Main collection of env. varss to set for all scripts. All others
# are cooked in by $SCRIPT_BASE/setup_environment.sh
env:
+ FEDORA_CNI_COMMIT: "412b6d31280682bb4fab4446f113c22ff1886554"
CNI_COMMIT: "7480240de9749f9a0a5c8614b17f1f03e0c06ab9"
CRIO_COMMIT: "662dbb31b5d4f5ed54511a47cde7190c61c28677"
+ CRIU_COMMIT: "584cbe4643c3fc7dc901ff08bf923ca0fe7326f9"
RUNC_COMMIT: "ad0f5255060d36872be04de22f8731f38ef2d7b1"
- # Enable debugging delay on test-failure if non-empty.
- FLAKE_DEBUG_DELAY: 1
# File to update in home-dir with task-specific env. var values
ENVLIB: ".bash_profile"
# Overrides default location (/tmp/cirrus) for repo clone
@@ -30,6 +30,7 @@ env:
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
PACKER_BASE: "./contrib/cirrus/packer"
+ IRCID: ENCRYPTED[e87bba62a8e924dc70bdb2b66b16f6ab4a60d2870e6e5534ae9e2b0076f483c71c84091c655ca239101e6816c5ec0883]
# Every *_task runs in parallel in separate VMs. The name prefix only for reference
# in WebUI, and will be followed by matrix details. This task does all the
@@ -41,12 +42,11 @@ full_vm_testing_task:
# 'matrix' combinations. All run in parallel.
matrix:
# Images are generated separetly, from build_images_task (below)
- image_name: "ubuntu-1804-bionic-v20180911-libpod-5763563410948096"
+ image_name: "ubuntu-1804-bionic-v20180911-libpod-fce09afe"
# TODO: Make these work (also build_images_task below)
- #image_name: "rhel-server-ec2-7-5-165-1-libpod-5358668723781632"
- #image_name: "centos-7-v20180911-libpod-5358668723781632"
- #image_name: "fedora-cloud-base-28-1-1-7-libpod-5358668723781632"
-
+ #image_name: "rhel-server-ec2-7-5-165-1-libpod-fce09afe"
+ #image_name: "centos-7-v20180911-libpod-fce09afe"
+ #image_name: "fedora-cloud-base-28-1-1-7-libpod-fce09afe"
timeout_in: 120m
# Every *_script runs in sequence, for each task. The name prefix is for
@@ -62,6 +62,8 @@ full_vm_testing_task:
integration_test_script: $SCRIPT_BASE/integration_test.sh
+ success_script: $SCRIPT_BASE/success.sh
+
# This task build new images for future PR testing, but only after a PR merge.
# These images save needing to install/setup the same environment to test every
@@ -82,19 +84,21 @@ build_vm_images_task:
PACKER_BUILDS: "ubuntu-18"
# TODO: Make these work (also full_vm_testing_task above)
# PACKER_BUILDS: "rhel-7,centos-7,fedora-28,ubuntu-18"
- # Command to register a RHEL VM
- RHSM_COMMAND: ENCRYPTED[fec01433222af1ed0b8e40e89e7d18f6ee2fa9f49a1e721dc72f7eed3c740661215d1bd05cb54ac66a1a62116b92bdce]
- # Additional environment variables needed to build GCE images, within a GCE VM
- SERVICE_ACCOUNT: ENCRYPTED[02e03838b1156eb9516c7cc1e888e287910759842275f3c7bc2b4d56075cc6740e29ffa0ab71ebdbbd079673361dd8c9]
- GCE_SSH_USERNAME: ENCRYPTED[a19a4ec62423e3e0fe4e7d1a5c9f11eda8fde321b9047ab5ed5590c2b1d7a2d12091c2be1531f949eae927059c2ae531]
- GCP_PROJECT_ID: ENCRYPTED[77cb2d392bbc8d17412547d7d91f8d190089bf6e6b96eab9927994bbff6ab2c691ba0329ac7a650ba6182fbbab9fb68d]
- # Existing base values to use, output images get epoc stamped names
- PACKER_VER: "1.3.1"
- # low-level base VM image name inputs to packer
CENTOS_BASE_IMAGE: "centos-7-v20180911"
RHEL_BASE_IMAGE: "rhel-server-ec2-7-5-165-1"
FEDORA_BASE_IMAGE: "fedora-cloud-base-28-1-1-7"
UBUNTU_BASE_IMAGE: "ubuntu-1804-bionic-v20180911"
+ # low-level base VM image name inputs to packer
+
+ # Command to register a RHEL VM
+ RHSM_COMMAND: ENCRYPTED[5caa5ff8c5370c3d25c7a1a28168501ab0fa2e5e3b627926f6eaba02b3fed965a7638a6151657809661f8c905c7dc187]
+ # Additional environment variables needed to build GCE images, within a GCE VM
+ SERVICE_ACCOUNT: ENCRYPTED[99e9a0b1c23f8dd29e83dfdf164f064cfd17afd9b895ca3b5e4c41170bd4290a8366fe2ad8e7a210b9f751711d1d002a]
+ GCE_SSH_USERNAME: ENCRYPTED[a7706b9e4b8bbb47f76358df7407f4fffa2e8552531190cc0b3315180c4b50588f560c4f85731e99cb5f43a396778277]
+ GCP_PROJECT_ID: ENCRYPTED[7c80e728e046b1c76147afd156a32c1c57d4a1ac1eab93b7e68e718c61ca8564fc61fef815952b8ae0a64e7034b8fe4f]
+ # Version of packer to use
+ PACKER_VER: "1.3.1"
+
gce_instance:
image_name: "image-builder-image" # Simply CentOS 7 + packer dependencies
diff --git a/.papr.yml b/.papr.yml
index 71f9b1d2f..97068546d 100644
--- a/.papr.yml
+++ b/.papr.yml
@@ -12,6 +12,7 @@ required: true
timeout: 90m
tests:
+ - rpm-ostree usroverlay && rpm -Uvh https://kojipkgs.fedoraproject.org//packages/podman/0.10.1/1.gite4a1553.fc28/x86_64/podman-0.10.1-1.gite4a1553.fc28.x86_64.rpm
- CONTAINER_RUNTIME="podman" sh .papr_prepare.sh
artifacts:
@@ -132,42 +133,42 @@ required: false
timeout: 90m
context: "Fedora 28 Cloud"
-#---
-#
-#host:
-# distro: fedora/29/cloud/pungi
-# specs:
-# ram: 8192
-# cpus: 4
-#packages:
-# - btrfs-progs-devel
-# - glib2-devel
-# - glibc-devel
-# - glibc-static
-# - git
-# - go-md2man
-# - gpgme-devel
-# - libassuan-devel
-# - libgpg-error-devel
-# - libseccomp-devel
-# - libselinux-devel
-# - ostree-devel
-# - pkgconfig
-# - make
-# - nc
-# - go-compilers-golang-compiler
-# - podman
-# - python3-varlink
-# - python3-dateutil
-# - python3-psutil
-# - container-selinux
-#
-#tests:
-# - sed 's/^expand-check.*/expand-check=0/g' -i /etc/selinux/semanage.conf
-# - dnf -y reinstall container-selinux
-# - sh .papr.sh -b -i -t
-#
-#required: false
-#
-#timeout: 90m
-#context: "Fedora 29 Cloud"
+---
+
+host:
+ distro: fedora/29/cloud/pungi
+ specs:
+ ram: 8192
+ cpus: 4
+packages:
+ - btrfs-progs-devel
+ - glib2-devel
+ - glibc-devel
+ - glibc-static
+ - git
+ - go-md2man
+ - gpgme-devel
+ - libassuan-devel
+ - libgpg-error-devel
+ - libseccomp-devel
+ - libselinux-devel
+ - ostree-devel
+ - pkgconfig
+ - make
+ - nc
+ - go-compilers-golang-compiler
+ - podman
+ - python3-varlink
+ - python3-dateutil
+ - python3-psutil
+ - container-selinux
+
+tests:
+ - sed 's/^expand-check.*/expand-check=0/g' -i /etc/selinux/semanage.conf
+ - dnf -y reinstall container-selinux
+ - sh .papr.sh -b -i -t
+
+required: false
+
+timeout: 90m
+context: "Fedora 29 Cloud"
diff --git a/.papr_prepare.sh b/.papr_prepare.sh
index fbb3647ce..e0657dcd2 100644
--- a/.papr_prepare.sh
+++ b/.papr_prepare.sh
@@ -14,4 +14,4 @@ fi
${CONTAINER_RUNTIME} build -t ${IMAGE} -f Dockerfile.${DIST} . 2>build.log
# Run the tests
-${CONTAINER_RUNTIME} run --rm --cap-add all --security-opt seccomp=unconfined --security-opt label=disable --net=host -v $PWD:/go/src/github.com/containers/libpod:Z --workdir /go/src/github.com/containers/libpod -e CGROUP_MANAGER=cgroupfs -e PYTHON=$PYTHON -e STORAGE_OPTIONS="--storage-driver=vfs" -e CRIO_ROOT="/go/src/github.com/containers/libpod" -e PODMAN_BINARY="/usr/bin/podman" -e CONMON_BINARY="/usr/libexec/podman/conmon" -e DIST=$DIST -e CONTAINER_RUNTIME=$CONTAINER_RUNTIME $IMAGE sh ./.papr.sh -b -i -t
+${CONTAINER_RUNTIME} run --rm --privileged --net=host -v $PWD:/go/src/github.com/containers/libpod:Z --workdir /go/src/github.com/containers/libpod -e CGROUP_MANAGER=cgroupfs -e PYTHON=$PYTHON -e STORAGE_OPTIONS="--storage-driver=vfs" -e CRIO_ROOT="/go/src/github.com/containers/libpod" -e PODMAN_BINARY="/usr/bin/podman" -e CONMON_BINARY="/usr/libexec/podman/conmon" -e DIST=$DIST -e CONTAINER_RUNTIME=$CONTAINER_RUNTIME $IMAGE sh ./.papr.sh -b -i -t
diff --git a/Dockerfile.Fedora b/Dockerfile.Fedora
index 30653d06b..c83097227 100644
--- a/Dockerfile.Fedora
+++ b/Dockerfile.Fedora
@@ -31,6 +31,7 @@ RUN dnf -y install btrfs-progs-devel \
nmap-ncat \
xz \
slirp4netns \
+ container-selinux \
iptables && dnf clean all
# Install CNI plugins
diff --git a/cmd/podman/checkpoint.go b/cmd/podman/checkpoint.go
index 8582ce138..bf280920d 100644
--- a/cmd/podman/checkpoint.go
+++ b/cmd/podman/checkpoint.go
@@ -6,6 +6,7 @@ import (
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/urfave/cli"
@@ -22,6 +23,11 @@ var (
Name: "keep, k",
Usage: "keep all temporary checkpoint files",
},
+ cli.BoolFlag{
+ Name: "all, a",
+ Usage: "checkpoint all running containers",
+ },
+ LatestFlag,
}
checkpointCommand = cli.Command{
Name: "checkpoint",
@@ -45,21 +51,14 @@ func checkpointCmd(c *cli.Context) error {
defer runtime.Shutdown(false)
keep := c.Bool("keep")
- args := c.Args()
- if len(args) < 1 {
- return errors.Errorf("you must provide at least one container name or id")
+
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
- var lastError error
- for _, arg := range args {
- ctr, err := runtime.LookupContainer(arg)
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "error looking up container %q", arg)
- continue
- }
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
+
+ for _, ctr := range containers {
if err = ctr.Checkpoint(context.TODO(), keep); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
diff --git a/cmd/podman/cleanup.go b/cmd/podman/cleanup.go
index 3fd150783..bc4af9f50 100644
--- a/cmd/podman/cleanup.go
+++ b/cmd/podman/cleanup.go
@@ -5,7 +5,6 @@ import (
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
@@ -44,43 +43,14 @@ func cleanupCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- args := c.Args()
+ if err := checkAllAndLatest(c); err != nil {
+ return err
+ }
+
+ cleanupContainers, lastError := getAllOrLatestContainers(c, runtime, -1, "all")
ctx := getContext()
- var lastError error
- var cleanupContainers []*libpod.Container
- if c.Bool("all") {
- if c.Bool("lastest") {
- return errors.New("--all and --latest cannot be used together")
- }
- if len(args) != 0 {
- return errors.New("--all and explicit container IDs cannot be used together")
- }
- cleanupContainers, err = runtime.GetContainers()
- if err != nil {
- return errors.Wrapf(err, "unable to get container list")
- }
- } else if c.Bool("latest") {
- if len(args) != 0 {
- return errors.New("--latest and explicit container IDs cannot be used together")
- }
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get latest container")
- }
- cleanupContainers = append(cleanupContainers, lastCtr)
- } else {
- for _, i := range args {
- container, err := runtime.LookupContainer(i)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- lastError = errors.Wrapf(err, "unable to find container %s", i)
- continue
- }
- cleanupContainers = append(cleanupContainers, container)
- }
- }
for _, ctr := range cleanupContainers {
if err = ctr.Cleanup(ctx); err != nil {
if lastError != nil {
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index e342659ed..f9e746b28 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -89,6 +89,73 @@ func validateFlags(c *cli.Context, flags []cli.Flag) error {
return nil
}
+// checkAllAndLatest checks that --all and --latest are used correctly
+func checkAllAndLatest(c *cli.Context) error {
+ argLen := len(c.Args())
+ if (c.Bool("all") || c.Bool("latest")) && argLen > 0 {
+ return errors.Errorf("no arguments are needed with --all or --latest")
+ }
+ if c.Bool("all") && c.Bool("latest") {
+ return errors.Errorf("--all and --latest cannot be used together")
+ }
+ if argLen < 1 && !c.Bool("all") && !c.Bool("latest") {
+ return errors.Errorf("you must provide at least one pod name or id")
+ }
+ return nil
+}
+
+// getAllOrLatestContainers tries to return the correct list of containers
+// depending if --all, --latest or <container-id> is used.
+// It requires the Context (c) and the Runtime (runtime). As different
+// commands are using different container state for the --all option
+// the desired state has to be specified in filterState. If no filter
+// is desired a -1 can be used to get all containers. For a better
+// error message, if the filter fails, a corresponding verb can be
+// specified which will then appear in the error message.
+func getAllOrLatestContainers(c *cli.Context, runtime *libpod.Runtime, filterState libpod.ContainerStatus, verb string) ([]*libpod.Container, error) {
+ var containers []*libpod.Container
+ var lastError error
+ var err error
+ if c.Bool("all") {
+ if filterState != -1 {
+ var filterFuncs []libpod.ContainerFilter
+ filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
+ state, _ := c.State()
+ return state == filterState
+ })
+ containers, err = runtime.GetContainers(filterFuncs...)
+ } else {
+ containers, err = runtime.GetContainers()
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to get %s containers", verb)
+ }
+ } else if c.Bool("latest") {
+ lastCtr, err := runtime.GetLatestContainer()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to get latest container")
+ }
+ containers = append(containers, lastCtr)
+ } else {
+ args := c.Args()
+ for _, i := range args {
+ container, err := runtime.LookupContainer(i)
+ if err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "unable to find container %s", i)
+ }
+ if container != nil {
+ // This is here to make sure this does not return [<nil>] but only nil
+ containers = append(containers, container)
+ }
+ }
+ }
+
+ return containers, lastError
+}
+
// getContext returns a non-nil, empty context
func getContext() context.Context {
return context.TODO()
@@ -465,3 +532,11 @@ func getAuthFile(authfile string) string {
}
return os.Getenv("REGISTRY_AUTH_FILE")
}
+
+// scrubServer removes 'http://' or 'https://' from the front of the
+// server/registry string if either is there. This will be mostly used
+// for user input from 'podman login' and 'podman logout'.
+func scrubServer(server string) string {
+ server = strings.TrimPrefix(server, "https://")
+ return strings.TrimPrefix(server, "http://")
+}
diff --git a/cmd/podman/create_cli_test.go b/cmd/podman/create_cli_test.go
index fa128c8e6..9db007ff3 100644
--- a/cmd/podman/create_cli_test.go
+++ b/cmd/podman/create_cli_test.go
@@ -47,7 +47,7 @@ func TestGetAllLabels(t *testing.T) {
}
func TestGetAllLabelsBadKeyValue(t *testing.T) {
- inLabels := []string{"ONE1", "TWO=2"}
+ inLabels := []string{"=badValue", "="}
fileLabels := []string{}
_, err := getAllLabels(fileLabels, inLabels)
assert.Error(t, err, assert.AnError)
diff --git a/cmd/podman/kill.go b/cmd/podman/kill.go
index 56dd170b5..7ca5bd7c5 100644
--- a/cmd/podman/kill.go
+++ b/cmd/podman/kill.go
@@ -41,19 +41,10 @@ var (
// killCmd kills one or more containers with a signal
func killCmd(c *cli.Context) error {
- args := c.Args()
- if (!c.Bool("all") && !c.Bool("latest")) && len(args) == 0 {
- return errors.Errorf("you must specify one or more containers to kill")
- }
- if (c.Bool("all") || c.Bool("latest")) && len(args) > 0 {
- return errors.Errorf("you cannot specify any containers to kill with --latest or --all")
- }
- if c.Bool("all") && c.Bool("latest") {
- return errors.Errorf("--all and --latest cannot be used together")
- }
- if len(args) < 1 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("you must provide at least one container name or id")
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
+
if err := validateFlags(c, killFlags); err != nil {
return err
}
@@ -76,38 +67,7 @@ func killCmd(c *cli.Context) error {
killSignal = uint(sysSignal)
}
- var filterFuncs []libpod.ContainerFilter
- var containers []*libpod.Container
- var lastError error
- if c.Bool("all") {
- // only get running containers
- filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
- state, _ := c.State()
- return state == libpod.ContainerStateRunning
- })
- containers, err = runtime.GetContainers(filterFuncs...)
- if err != nil {
- return errors.Wrapf(err, "unable to get running containers")
- }
- } else if c.Bool("latest") {
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get last created container")
- }
- containers = append(containers, lastCtr)
- } else {
- for _, i := range args {
- container, err := runtime.LookupContainer(i)
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "unable to find container %s", i)
- continue
- }
- containers = append(containers, container)
- }
- }
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
for _, ctr := range containers {
if err := ctr.Kill(killSignal); err != nil {
diff --git a/cmd/podman/libpodruntime/runtime.go b/cmd/podman/libpodruntime/runtime.go
index a0d497e8e..df422eb81 100644
--- a/cmd/podman/libpodruntime/runtime.go
+++ b/cmd/podman/libpodruntime/runtime.go
@@ -1,21 +1,16 @@
package libpodruntime
import (
- "fmt"
- "os"
- "path/filepath"
-
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
- "github.com/pkg/errors"
"github.com/urfave/cli"
)
// GetRuntime generates a new libpod runtime configured by command line options
func GetRuntime(c *cli.Context) (*libpod.Runtime, error) {
- storageOpts, err := GetDefaultStoreOptions()
+ storageOpts, err := util.GetDefaultStoreOptions()
if err != nil {
return nil, err
}
@@ -28,7 +23,7 @@ func GetContainerRuntime(c *cli.Context) (*libpod.Runtime, error) {
if err != nil {
return nil, err
}
- storageOpts, err := GetDefaultStoreOptions()
+ storageOpts, err := util.GetDefaultStoreOptions()
if err != nil {
return nil, err
}
@@ -37,51 +32,6 @@ func GetContainerRuntime(c *cli.Context) (*libpod.Runtime, error) {
return GetRuntimeWithStorageOpts(c, &storageOpts)
}
-func GetRootlessStorageOpts() (storage.StoreOptions, error) {
- var opts storage.StoreOptions
-
- rootlessRuntime, err := libpod.GetRootlessRuntimeDir()
- if err != nil {
- return opts, err
- }
- opts.RunRoot = filepath.Join(rootlessRuntime, "run")
-
- dataDir := os.Getenv("XDG_DATA_HOME")
- if dataDir == "" {
- home := os.Getenv("HOME")
- if home == "" {
- return opts, fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
- }
- // runc doesn't like symlinks in the rootfs path, and at least
- // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return opts, errors.Wrapf(err, "cannot resolve %s", home)
- }
- dataDir = filepath.Join(resolvedHome, ".local", "share")
- }
- opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
- opts.GraphDriverName = "vfs"
- return opts, nil
-}
-
-func GetDefaultStoreOptions() (storage.StoreOptions, error) {
- storageOpts := storage.DefaultStoreOptions
- if rootless.IsRootless() {
- var err error
- storageOpts, err = GetRootlessStorageOpts()
- if err != nil {
- return storageOpts, err
- }
-
- storageConf := filepath.Join(os.Getenv("HOME"), ".config/containers/storage.conf")
- if _, err := os.Stat(storageConf); err == nil {
- storage.ReloadConfigurationFile(storageConf, &storageOpts)
- }
- }
- return storageOpts, nil
-}
-
// GetRuntime generates a new libpod runtime configured by command line options
func GetRuntimeWithStorageOpts(c *cli.Context, storageOpts *storage.StoreOptions) (*libpod.Runtime, error) {
options := []libpod.RuntimeOption{}
diff --git a/cmd/podman/login.go b/cmd/podman/login.go
index 76f0f50ff..aa26d1466 100644
--- a/cmd/podman/login.go
+++ b/cmd/podman/login.go
@@ -60,10 +60,7 @@ func loginCmd(c *cli.Context) error {
if len(args) == 0 {
return errors.Errorf("registry must be given")
}
- var server string
- if len(args) == 1 {
- server = args[0]
- }
+ server := scrubServer(args[0])
authfile := getAuthFile(c.String("authfile"))
sc := common.GetSystemContext("", authfile, false)
@@ -113,6 +110,10 @@ func getUserAndPass(username, password, userFromAuthFile string) (string, string
if err != nil {
return "", "", errors.Wrapf(err, "error reading username")
}
+ // If no username provided, use userFromAuthFile instead.
+ if strings.TrimSpace(username) == "" {
+ username = userFromAuthFile
+ }
}
if password == "" {
fmt.Print("Password: ")
diff --git a/cmd/podman/logout.go b/cmd/podman/logout.go
index 099464e4f..3cdb606b5 100644
--- a/cmd/podman/logout.go
+++ b/cmd/podman/logout.go
@@ -44,7 +44,7 @@ func logoutCmd(c *cli.Context) error {
}
var server string
if len(args) == 1 {
- server = args[0]
+ server = scrubServer(args[0])
}
authfile := getAuthFile(c.String("authfile"))
@@ -54,14 +54,14 @@ func logoutCmd(c *cli.Context) error {
if err := config.RemoveAllAuthentication(sc); err != nil {
return err
}
- fmt.Println("Remove login credentials for all registries")
+ fmt.Println("Removed login credentials for all registries")
return nil
}
err := config.RemoveAuthentication(sc, server)
switch err {
case nil:
- fmt.Printf("Remove login credentials for %s\n", server)
+ fmt.Printf("Removed login credentials for %s\n", server)
return nil
case config.ErrNotLoggedIn:
return errors.Errorf("Not logged into %s\n", server)
diff --git a/cmd/podman/parse.go b/cmd/podman/parse.go
index ade592ddf..2e4959656 100644
--- a/cmd/podman/parse.go
+++ b/cmd/podman/parse.go
@@ -198,6 +198,11 @@ func readKVStrings(env map[string]string, files []string, override []string) err
func parseEnv(env map[string]string, line string) error {
data := strings.SplitN(line, "=", 2)
+ // catch invalid variables such as "=" or "=A"
+ if data[0] == "" {
+ return errors.Errorf("invalid environment variable: %q", line)
+ }
+
// trim the front of a variable, but nothing else
name := strings.TrimLeft(data[0], whiteSpaces)
if strings.ContainsAny(name, whiteSpaces) {
@@ -208,10 +213,7 @@ func parseEnv(env map[string]string, line string) error {
env[name] = data[1]
} else {
// if only a pass-through variable is given, clean it up.
- val, exists := os.LookupEnv(name)
- if !exists {
- return errors.Errorf("environment variable %q does not exist", name)
- }
+ val, _ := os.LookupEnv(name)
env[name] = val
}
return nil
diff --git a/cmd/podman/port.go b/cmd/podman/port.go
index d6497d450..3355e751b 100644
--- a/cmd/podman/port.go
+++ b/cmd/podman/port.go
@@ -104,6 +104,9 @@ func portCmd(c *cli.Context) error {
containers = append(containers, container)
} else if c.Bool("latest") {
container, err = runtime.GetLatestContainer()
+ if err != nil {
+ return errors.Wrapf(err, "unable to get last created container")
+ }
containers = append(containers, container)
} else {
containers, err = runtime.GetRunningContainers()
diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go
index 32b3a0574..a468f6121 100644
--- a/cmd/podman/ps.go
+++ b/cmd/podman/ps.go
@@ -1,11 +1,15 @@
package main
import (
+ "encoding/json"
"fmt"
+ "html/template"
+ "os"
"reflect"
"sort"
"strconv"
"strings"
+ "text/tabwriter"
"time"
"github.com/containers/libpod/cmd/podman/formats"
@@ -16,12 +20,31 @@ import (
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-units"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"github.com/urfave/cli"
"k8s.io/apimachinery/pkg/fields"
)
-const mountTruncLength = 12
+const (
+ mountTruncLength = 12
+ hid = "CONTAINER ID"
+ himage = "IMAGE"
+ hcommand = "COMMAND"
+ hcreated = "CREATED"
+ hstatus = "STATUS"
+ hports = "PORTS"
+ hnames = "NAMES"
+ hsize = "SIZE"
+ hinfra = "IS INFRA"
+ hpod = "POD"
+ nspid = "PID"
+ nscgroup = "CGROUPNS"
+ nsipc = "IPC"
+ nsmnt = "MNT"
+ nsnet = "NET"
+ nspidns = "PIDNS"
+ nsuserns = "USERNS"
+ nsuts = "UTS"
+)
type psTemplateParams struct {
ID string
@@ -76,7 +99,7 @@ type psJSONParams struct {
}
// Type declaration and functions for sorting the PS output
-type psSorted []psJSONParams
+type psSorted []shared.PsContainerOutput
func (a psSorted) Len() int { return len(a) }
func (a psSorted) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
@@ -84,7 +107,7 @@ func (a psSorted) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type psSortedCommand struct{ psSorted }
func (a psSortedCommand) Less(i, j int) bool {
- return strings.Join(a.psSorted[i].Command, " ") < strings.Join(a.psSorted[j].Command, " ")
+ return a.psSorted[i].Command < a.psSorted[j].Command
}
type psSortedCreated struct{ psSorted }
@@ -201,6 +224,11 @@ var (
)
func psCmd(c *cli.Context) error {
+ var (
+ filterFuncs []libpod.ContainerFilter
+ outputContainers []*libpod.Container
+ )
+
if err := validateFlags(c, psFlags); err != nil {
return err
}
@@ -220,11 +248,9 @@ func psCmd(c *cli.Context) error {
return errors.Errorf("too many arguments, ps takes no arguments")
}
- format := genPsFormat(c.String("format"), c.Bool("quiet"), c.Bool("size"), c.Bool("namespace"), c.Bool("pod"), c.Bool("all"))
-
opts := shared.PsOptions{
All: c.Bool("all"),
- Format: format,
+ Format: c.String("format"),
Last: c.Int("last"),
Latest: c.Bool("latest"),
NoTrunc: c.Bool("no-trunc"),
@@ -235,18 +261,6 @@ func psCmd(c *cli.Context) error {
Sort: c.String("sort"),
}
- var filterFuncs []libpod.ContainerFilter
- // When we are dealing with latest or last=n, we need to
- // get all containers.
- if !opts.All && !opts.Latest && opts.Last < 1 {
- // only get running containers
- filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
- state, _ := c.State()
- // Don't return infra containers
- return state == libpod.ContainerStateRunning && !c.IsInfra()
- })
- }
-
filters := c.StringSlice("filter")
if len(filters) > 0 {
for _, f := range filters {
@@ -262,8 +276,6 @@ func psCmd(c *cli.Context) error {
}
}
- var outputContainers []*libpod.Container
-
if !opts.Latest {
// Get all containers
containers, err := runtime.GetContainers(filterFuncs...)
@@ -288,7 +300,92 @@ func psCmd(c *cli.Context) error {
outputContainers = []*libpod.Container{latestCtr}
}
- return generatePsOutput(outputContainers, opts)
+ pss := shared.PBatch(outputContainers, 8, opts)
+ if opts.Sort != "" {
+ pss, err = sortPsOutput(opts.Sort, pss)
+ if err != nil {
+ return err
+ }
+ }
+
+ // If quiet, print only cids and return
+ if opts.Quiet {
+ return printQuiet(pss)
+ }
+
+ // If the user wants their own GO template format
+ if opts.Format != "" {
+ if opts.Format == "json" {
+ return dumpJSON(pss)
+ }
+ return printFormat(opts.Format, pss)
+ }
+
+ // Define a tab writer with stdout as the output
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
+ defer w.Flush()
+
+ // Output standard PS headers
+ if !opts.Namespace {
+ fmt.Fprintf(w, "\n%s\t%s\t%s\t%s\t%s\t%s\t%s", hid, himage, hcommand, hcreated, hstatus, hports, hnames)
+ // If the user does not want size OR pod info, we print the isInfra bool
+ if !opts.Size && !opts.Pod {
+ fmt.Fprintf(w, "\t%s", hinfra)
+ }
+ // User wants pod info
+ if opts.Pod {
+ fmt.Fprintf(w, "\t%s", hpod)
+ }
+ //User wants size info
+ if opts.Size {
+ fmt.Fprintf(w, "\t%s", hsize)
+ }
+ } else {
+ // Output Namespace headers
+ fmt.Fprintf(w, "\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s", hid, hnames, nspid, nscgroup, nsipc, nsmnt, nsnet, nspidns, nsuserns, nsuts)
+ }
+
+ // Now iterate each container and output its information
+ for _, container := range pss {
+
+ // Standard PS output
+ if !opts.Namespace {
+ fmt.Fprintf(w, "\n%s\t%s\t%s\t%s\t%s\t%s\t%s", container.ID, container.Image, container.Command, container.Created, container.Status, container.Ports, container.Names)
+
+ // If not size and not pod info, do isInfra
+ if !opts.Size && !opts.Pod {
+ fmt.Fprintf(w, "\t%t", container.IsInfra)
+ }
+ // User wants pod info
+ if opts.Pod {
+ fmt.Fprintf(w, "\t%s", container.Pod)
+ }
+ //User wants size info
+ if opts.Size {
+ var size string
+ if container.Size == nil {
+ size = units.HumanSizeWithPrecision(0, 0)
+ } else {
+ size = units.HumanSizeWithPrecision(float64(container.Size.RwSize), 3) + " (virtual " + units.HumanSizeWithPrecision(float64(container.Size.RootFsSize), 3) + ")"
+ fmt.Fprintf(w, "\t%s", size)
+ }
+ }
+
+ } else {
+ // Print namespace information
+ ns := shared.GetNamespaces(container.Pid)
+ fmt.Fprintf(w, "\n%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s", container.ID, container.Names, container.Pid, ns.Cgroup, ns.IPC, ns.MNT, ns.NET, ns.PIDNS, ns.User, ns.UTS)
+ }
+
+ }
+ return nil
+}
+
+func printQuiet(containers []shared.PsContainerOutput) error {
+ for _, c := range containers {
+ fmt.Println(c.ID)
+ }
+ return nil
}
// checkFlagsPassed checks if mutually exclusive flags are passed together
@@ -420,47 +517,6 @@ func generateContainerFilterFuncs(filter, filterValue string, runtime *libpod.Ru
return nil, errors.Errorf("%s is an invalid filter", filter)
}
-// generate the template based on conditions given
-func genPsFormat(format string, quiet, size, namespace, pod, infra bool) string {
- if format != "" {
- // "\t" from the command line is not being recognized as a tab
- // replacing the string "\t" to a tab character if the user passes in "\t"
- return strings.Replace(format, `\t`, "\t", -1)
- }
- if quiet {
- return formats.IDString
- }
- podappend := ""
- if pod {
- podappend = "{{.Pod}}\t"
- }
- if namespace {
- return fmt.Sprintf("table {{.ID}}\t{{.Names}}\t%s{{.PID}}\t{{.CGROUPNS}}\t{{.IPC}}\t{{.MNT}}\t{{.NET}}\t{{.PIDNS}}\t{{.USERNS}}\t{{.UTS}}", podappend)
- }
- format = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.Created}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}\t"
- format += podappend
- if size {
- format += "{{.Size}}\t"
- }
- if infra {
- format += "{{.IsInfra}}\t"
- }
- return format
-}
-
-func psToGeneric(templParams []psTemplateParams, JSONParams []psJSONParams) (genericParams []interface{}) {
- if len(templParams) > 0 {
- for _, v := range templParams {
- genericParams = append(genericParams, interface{}(v))
- }
- return
- }
- for _, v := range JSONParams {
- genericParams = append(genericParams, interface{}(v))
- }
- return
-}
-
// generate the accurate header based on template given
func (p *psTemplateParams) headerMap() map[string]string {
v := reflect.Indirect(reflect.ValueOf(p))
@@ -503,176 +559,6 @@ func sortPsOutput(sortBy string, psOutput psSorted) (psSorted, error) {
return psOutput, nil
}
-// getTemplateOutput returns the modified container information
-func getTemplateOutput(psParams []psJSONParams, opts shared.PsOptions) ([]psTemplateParams, error) {
- var (
- psOutput []psTemplateParams
- pod, status, size string
- ns *shared.Namespace
- )
- // If the user is trying to filter based on size, or opted to sort on size
- // the size bool must be set.
- if strings.Contains(opts.Format, ".Size") || opts.Sort == "size" {
- opts.Size = true
- }
- if strings.Contains(opts.Format, ".Pod") || opts.Sort == "pod" {
- opts.Pod = true
- }
-
- for _, psParam := range psParams {
- // do we need this?
- imageName := psParam.Image
- ctrID := psParam.ID
-
- if opts.Namespace {
- ns = psParam.Namespaces
- }
- if opts.Size {
- if psParam.Size == nil {
- size = units.HumanSizeWithPrecision(0, 0)
- } else {
- size = units.HumanSizeWithPrecision(float64(psParam.Size.RwSize), 3) + " (virtual " + units.HumanSizeWithPrecision(float64(psParam.Size.RootFsSize), 3) + ")"
- }
- }
- if opts.Pod {
- pod = psParam.Pod
- }
-
- command := strings.Join(psParam.Command, " ")
- if !opts.NoTrunc {
- if len(command) > 20 {
- command = command[:19] + "..."
- }
- }
- ports := portsToString(psParam.Ports)
- labels := formatLabels(psParam.Labels)
-
- switch psParam.Status {
- case libpod.ContainerStateExited.String():
- fallthrough
- case libpod.ContainerStateStopped.String():
- exitedSince := units.HumanDuration(time.Since(psParam.ExitedAt))
- status = fmt.Sprintf("Exited (%d) %s ago", psParam.ExitCode, exitedSince)
- case libpod.ContainerStateRunning.String():
- status = "Up " + units.HumanDuration(time.Since(psParam.StartedAt)) + " ago"
- case libpod.ContainerStatePaused.String():
- status = "Paused"
- case libpod.ContainerStateCreated.String(), libpod.ContainerStateConfigured.String():
- status = "Created"
- default:
- status = "Error"
- }
-
- if !opts.NoTrunc {
- ctrID = shortID(psParam.ID)
- pod = shortID(psParam.Pod)
- }
- params := psTemplateParams{
- ID: ctrID,
- Image: imageName,
- Command: command,
- CreatedAtTime: psParam.CreatedAt,
- Created: units.HumanDuration(time.Since(psParam.CreatedAt)) + " ago",
- Status: status,
- Ports: ports,
- Size: size,
- Names: psParam.Names,
- Labels: labels,
- Mounts: getMounts(psParam.Mounts, opts.NoTrunc),
- PID: psParam.PID,
- Pod: pod,
- IsInfra: psParam.IsInfra,
- }
-
- if opts.Namespace {
- params.CGROUPNS = ns.Cgroup
- params.IPC = ns.IPC
- params.MNT = ns.MNT
- params.NET = ns.NET
- params.PIDNS = ns.PIDNS
- params.USERNS = ns.User
- params.UTS = ns.UTS
- }
- psOutput = append(psOutput, params)
- }
-
- return psOutput, nil
-}
-
-// getAndSortJSONOutput returns the container info in its raw, sorted form
-func getAndSortJSONParams(containers []*libpod.Container, opts shared.PsOptions) ([]psJSONParams, error) {
- var (
- psOutput psSorted
- ns *shared.Namespace
- )
- for _, ctr := range containers {
- batchInfo, err := shared.BatchContainerOp(ctr, opts)
- if err != nil {
- if errors.Cause(err) == libpod.ErrNoSuchCtr {
- logrus.Warn(err)
- continue
- }
- return nil, err
- }
-
- if opts.Namespace {
- ns = shared.GetNamespaces(batchInfo.Pid)
- }
- params := psJSONParams{
- ID: ctr.ID(),
- Image: batchInfo.ConConfig.RootfsImageName,
- ImageID: batchInfo.ConConfig.RootfsImageID,
- Command: batchInfo.ConConfig.Spec.Process.Args,
- ExitCode: batchInfo.ExitCode,
- Exited: batchInfo.Exited,
- CreatedAt: batchInfo.ConConfig.CreatedTime,
- StartedAt: batchInfo.StartedTime,
- ExitedAt: batchInfo.ExitedTime,
- Status: batchInfo.ConState.String(),
- PID: batchInfo.Pid,
- Ports: batchInfo.ConConfig.PortMappings,
- Size: batchInfo.Size,
- Names: batchInfo.ConConfig.Name,
- Labels: batchInfo.ConConfig.Labels,
- Mounts: batchInfo.ConConfig.UserVolumes,
- ContainerRunning: batchInfo.ConState == libpod.ContainerStateRunning,
- Namespaces: ns,
- Pod: ctr.PodID(),
- IsInfra: ctr.IsInfra(),
- }
-
- psOutput = append(psOutput, params)
- }
- return sortPsOutput(opts.Sort, psOutput)
-}
-
-func generatePsOutput(containers []*libpod.Container, opts shared.PsOptions) error {
- if len(containers) == 0 && opts.Format != formats.JSONString {
- return nil
- }
- psOutput, err := getAndSortJSONParams(containers, opts)
- if err != nil {
- return err
- }
- var out formats.Writer
-
- switch opts.Format {
- case formats.JSONString:
- if err != nil {
- return errors.Wrapf(err, "unable to create JSON for output")
- }
- out = formats.JSONStructArray{Output: psToGeneric([]psTemplateParams{}, psOutput)}
- default:
- psOutput, err := getTemplateOutput(psOutput, opts)
- if err != nil {
- return errors.Wrapf(err, "unable to create output")
- }
- out = formats.StdoutTemplateArray{Output: psToGeneric(psOutput, []psJSONParams{}), Template: opts.Format, Fields: psOutput[0].headerMap()}
- }
-
- return formats.Writer(out).Out()
-}
-
// getLabels converts the labels to a string of the form "key=value, key2=value2"
func formatLabels(labels map[string]string) string {
var arr []string
@@ -723,3 +609,28 @@ func portsToString(ports []ocicni.PortMapping) string {
}
return strings.Join(portDisplay, ", ")
}
+
+func printFormat(format string, containers []shared.PsContainerOutput) error {
+ out := template.New("output")
+ out, err := out.Parse(format + "\n")
+
+ if err != nil {
+ return err
+ }
+ for _, container := range containers {
+ if err := out.Execute(os.Stdout, container); err != nil {
+ return err
+ }
+
+ }
+ return nil
+}
+
+func dumpJSON(containers []shared.PsContainerOutput) error {
+ b, err := json.MarshalIndent(containers, "", "\t")
+ if err != nil {
+ return err
+ }
+ os.Stdout.Write(b)
+ return nil
+}
diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go
index 623c4936e..067a2b5d4 100644
--- a/cmd/podman/restore.go
+++ b/cmd/podman/restore.go
@@ -6,6 +6,7 @@ import (
"os"
"github.com/containers/libpod/cmd/podman/libpodruntime"
+ "github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/urfave/cli"
@@ -22,6 +23,14 @@ var (
Name: "keep, k",
Usage: "keep all temporary checkpoint files",
},
+ // restore --all would make more sense if there would be
+ // dedicated state for container which are checkpointed.
+ // TODO: add ContainerStateCheckpointed
+ cli.BoolFlag{
+ Name: "all, a",
+ Usage: "restore all checkpointed containers",
+ },
+ LatestFlag,
}
restoreCommand = cli.Command{
Name: "restore",
@@ -45,21 +54,14 @@ func restoreCmd(c *cli.Context) error {
defer runtime.Shutdown(false)
keep := c.Bool("keep")
- args := c.Args()
- if len(args) < 1 {
- return errors.Errorf("you must provide at least one container name or id")
+
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
- var lastError error
- for _, arg := range args {
- ctr, err := runtime.LookupContainer(arg)
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "error looking up container %q", arg)
- continue
- }
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "checkpointed")
+
+ for _, ctr := range containers {
if err = ctr.Restore(context.TODO(), keep); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index f64eca6f4..c6641e879 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -2,7 +2,7 @@ package main
import (
"fmt"
- "os"
+ rt "runtime"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
@@ -45,6 +45,12 @@ Running containers will not be removed without the -f option.
// saveCmd saves the image to either docker-archive or oci
func rmCmd(c *cli.Context) error {
+ var (
+ delContainers []*libpod.Container
+ lastError error
+ deleteFuncs []workerInput
+ )
+
ctx := getContext()
if err := validateFlags(c, rmFlags); err != nil {
return err
@@ -56,49 +62,31 @@ func rmCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- args := c.Args()
- if c.Bool("latest") && c.Bool("all") {
- return errors.Errorf("--all and --latest cannot be used together")
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
- if len(args) == 0 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("specify one or more containers to remove")
- }
+ delContainers, lastError = getAllOrLatestContainers(c, runtime, -1, "all")
- var delContainers []*libpod.Container
- var lastError error
- if c.Bool("all") {
- delContainers, err = runtime.GetContainers()
- if err != nil {
- return errors.Wrapf(err, "unable to get container list")
- }
- } else if c.Bool("latest") {
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get latest container")
- }
- delContainers = append(delContainers, lastCtr)
- } else {
- for _, i := range args {
- container, err := runtime.LookupContainer(i)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- lastError = errors.Wrapf(err, "unable to find container %s", i)
- continue
- }
- delContainers = append(delContainers, container)
+ for _, container := range delContainers {
+ f := func() error {
+ return runtime.RemoveContainer(ctx, container, c.Bool("force"))
}
+
+ deleteFuncs = append(deleteFuncs, workerInput{
+ containerID: container.ID(),
+ parallelFunc: f,
+ })
}
- for _, container := range delContainers {
- err = runtime.RemoveContainer(ctx, container, c.Bool("force"))
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "failed to delete container %v", container.ID())
- } else {
- fmt.Println(container.ID())
+
+ deleteErrors := parallelExecuteWorkerPool(rt.NumCPU()*3, deleteFuncs)
+ for cid, result := range deleteErrors {
+ if result != nil {
+ fmt.Println(result.Error())
+ lastError = result
+ continue
}
+ fmt.Println(cid)
}
return lastError
}
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go
index f44d0f7c9..4af737e0a 100644
--- a/cmd/podman/shared/container.go
+++ b/cmd/podman/shared/container.go
@@ -2,11 +2,15 @@ package shared
import (
"encoding/json"
+ "fmt"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/docker/go-units"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
+ "sync"
"time"
"github.com/containers/libpod/libpod"
@@ -17,6 +21,11 @@ import (
"github.com/sirupsen/logrus"
)
+const (
+ cidTruncLength = 12
+ podTruncLength = 12
+)
+
// PsOptions describes the struct being formed for ps
type PsOptions struct {
All bool
@@ -45,6 +54,35 @@ type BatchContainerStruct struct {
Size *ContainerSize
}
+// PsContainerOutput is the struct being returned from a parallel
+// Batch operation
+type PsContainerOutput struct {
+ ID string
+ Image string
+ Command string
+ Created string
+ Ports string
+ Names string
+ IsInfra bool
+ Status string
+ State libpod.ContainerStatus
+ Pid int
+ Size *ContainerSize
+ Pod string
+ CreatedAt time.Time
+ ExitedAt time.Time
+ StartedAt time.Time
+ Labels map[string]string
+ PID string
+ Cgroup string
+ IPC string
+ MNT string
+ NET string
+ PIDNS string
+ User string
+ UTS string
+}
+
// Namespace describes output for ps namespace
type Namespace struct {
PID string `json:"pid,omitempty"`
@@ -64,6 +102,212 @@ type ContainerSize struct {
RwSize int64 `json:"rwSize"`
}
+// NewBatchContainer runs a batch process under one lock to get container information and only
+// be called in PBatch
+func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) {
+ var (
+ conState libpod.ContainerStatus
+ command string
+ created string
+ status string
+ exitedAt time.Time
+ startedAt time.Time
+ exitCode int32
+ err error
+ pid int
+ size *ContainerSize
+ ns *Namespace
+ pso PsContainerOutput
+ )
+ batchErr := ctr.Batch(func(c *libpod.Container) error {
+ conState, err = c.State()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container state")
+ }
+ command = strings.Join(c.Command(), " ")
+ created = units.HumanDuration(time.Since(c.CreatedTime())) + " ago"
+
+ exitCode, _, err = c.ExitCode()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container exit code")
+ }
+ startedAt, err = c.StartedTime()
+ if err != nil {
+ logrus.Errorf("error getting started time for %q: %v", c.ID(), err)
+ }
+ exitedAt, err = c.FinishedTime()
+ if err != nil {
+ logrus.Errorf("error getting exited time for %q: %v", c.ID(), err)
+ }
+ if opts.Namespace {
+ pid, err = c.PID()
+ if err != nil {
+ return errors.Wrapf(err, "unable to obtain container pid")
+ }
+ ns = GetNamespaces(pid)
+ }
+ if opts.Size {
+ size = new(ContainerSize)
+
+ rootFsSize, err := c.RootFsSize()
+ if err != nil {
+ logrus.Errorf("error getting root fs size for %q: %v", c.ID(), err)
+ }
+
+ rwSize, err := c.RWSize()
+ if err != nil {
+ logrus.Errorf("error getting rw size for %q: %v", c.ID(), err)
+ }
+
+ size.RootFsSize = rootFsSize
+ size.RwSize = rwSize
+ }
+
+ return nil
+ })
+
+ if batchErr != nil {
+ return pso, batchErr
+ }
+
+ switch conState.String() {
+ case libpod.ContainerStateExited.String():
+ fallthrough
+ case libpod.ContainerStateStopped.String():
+ exitedSince := units.HumanDuration(time.Since(exitedAt))
+ status = fmt.Sprintf("Exited (%d) %s ago", exitCode, exitedSince)
+ case libpod.ContainerStateRunning.String():
+ status = "Up " + units.HumanDuration(time.Since(startedAt)) + " ago"
+ case libpod.ContainerStatePaused.String():
+ status = "Paused"
+ case libpod.ContainerStateCreated.String(), libpod.ContainerStateConfigured.String():
+ status = "Created"
+ default:
+ status = "Error"
+ }
+
+ _, imageName := ctr.Image()
+ cid := ctr.ID()
+ pod := ctr.PodID()
+ if !opts.NoTrunc {
+ cid = cid[0:cidTruncLength]
+ if len(pod) > 12 {
+ pod = pod[0:podTruncLength]
+ }
+ }
+
+ pso.ID = cid
+ pso.Image = imageName
+ pso.Command = command
+ pso.Created = created
+ pso.Ports = portsToString(ctr.PortMappings())
+ pso.Names = ctr.Name()
+ pso.IsInfra = ctr.IsInfra()
+ pso.Status = status
+ pso.State = conState
+ pso.Pid = pid
+ pso.Size = size
+ pso.Pod = pod
+ pso.ExitedAt = exitedAt
+ pso.CreatedAt = ctr.CreatedTime()
+ pso.StartedAt = startedAt
+ pso.Labels = ctr.Labels()
+
+ if opts.Namespace {
+ pso.Cgroup = ns.Cgroup
+ pso.IPC = ns.IPC
+ pso.MNT = ns.MNT
+ pso.NET = ns.NET
+ pso.User = ns.User
+ pso.UTS = ns.UTS
+ pso.PIDNS = ns.PIDNS
+ }
+
+ return pso, nil
+}
+
+type pFunc func() (PsContainerOutput, error)
+
+type workerInput struct {
+ parallelFunc pFunc
+ opts PsOptions
+ cid string
+ job int
+}
+
+// worker is a "threaded" worker that takes jobs from the channel "queue"
+func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContainerOutput, errors chan<- error) {
+ for j := range jobs {
+ r, err := j.parallelFunc()
+ // If we find an error, we return just the error
+ if err != nil {
+ errors <- err
+ } else {
+ // Return the result
+ results <- r
+ }
+ wg.Done()
+ }
+}
+
+// PBatch is performs batch operations on a container in parallel. It spawns the number of workers
+// relative to the the number of parallel operations desired.
+func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput {
+ var (
+ wg sync.WaitGroup
+ psResults []PsContainerOutput
+ )
+
+ // If the number of containers in question is less than the number of
+ // proposed parallel operations, we shouldnt spawn so many workers
+ if workers > len(containers) {
+ workers = len(containers)
+ }
+
+ jobs := make(chan workerInput, len(containers))
+ results := make(chan PsContainerOutput, len(containers))
+ batchErrors := make(chan error, len(containers))
+
+ // Create the workers
+ for w := 1; w <= workers; w++ {
+ go worker(&wg, jobs, results, batchErrors)
+ }
+
+ // Add jobs to the workers
+ for i, j := range containers {
+ j := j
+ wg.Add(1)
+ f := func() (PsContainerOutput, error) {
+ return NewBatchContainer(j, opts)
+ }
+ jobs <- workerInput{
+ parallelFunc: f,
+ opts: opts,
+ cid: j.ID(),
+ job: i,
+ }
+ }
+ close(jobs)
+ wg.Wait()
+ close(results)
+ close(batchErrors)
+ for err := range batchErrors {
+ logrus.Errorf("unable to get container info: %q", err)
+ }
+ for res := range results {
+ // We sort out running vs non-running here to save lots of copying
+ // later.
+ if !opts.All && !opts.Latest && opts.Last < 1 {
+ if !res.IsInfra && res.State == libpod.ContainerStateRunning {
+ psResults = append(psResults, res)
+ }
+ } else {
+ psResults = append(psResults, res)
+ }
+ }
+ return psResults
+}
+
// BatchContainer is used in ps to reduce performance hits by "batching"
// locks.
func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) {
@@ -325,3 +569,19 @@ func getCgroup(spec *specs.Spec) string {
}
return cgroup
}
+
+// portsToString converts the ports used to a string of the from "port1, port2"
+func portsToString(ports []ocicni.PortMapping) string {
+ var portDisplay []string
+ if len(ports) == 0 {
+ return ""
+ }
+ for _, v := range ports {
+ hostIP := v.HostIP
+ if hostIP == "" {
+ hostIP = "0.0.0.0"
+ }
+ portDisplay = append(portDisplay, fmt.Sprintf("%s:%d->%d/%s", hostIP, v.HostPort, v.ContainerPort, v.Protocol))
+ }
+ return strings.Join(portDisplay, ", ")
+}
diff --git a/cmd/podman/stats.go b/cmd/podman/stats.go
index dea351e88..f6beac1a8 100644
--- a/cmd/podman/stats.go
+++ b/cmd/podman/stats.go
@@ -84,8 +84,7 @@ func statsCmd(c *cli.Context) error {
if ctr > 1 {
return errors.Errorf("--all, --latest and containers cannot be used together")
} else if ctr == 0 {
- // If user didn't specify, imply --all
- all = true
+ return errors.Errorf("you must specify --all, --latest, or at least one container")
}
runtime, err := libpodruntime.GetRuntime(c)
@@ -126,6 +125,10 @@ func statsCmd(c *cli.Context) error {
for _, ctr := range ctrs {
initialStats, err := ctr.GetContainerStats(&libpod.ContainerStats{})
if err != nil {
+ // when doing "all", dont worry about containers that are not running
+ if c.Bool("all") && errors.Cause(err) == libpod.ErrCtrRemoved || errors.Cause(err) == libpod.ErrNoSuchCtr || errors.Cause(err) == libpod.ErrCtrStateInvalid {
+ continue
+ }
return err
}
containerStats[ctr.ID()] = initialStats
diff --git a/cmd/podman/stop.go b/cmd/podman/stop.go
index ff0b36bf1..edadbda89 100644
--- a/cmd/podman/stop.go
+++ b/cmd/podman/stop.go
@@ -2,7 +2,6 @@ package main
import (
"fmt"
- "os"
rt "runtime"
"github.com/containers/libpod/cmd/podman/libpodruntime"
@@ -44,16 +43,11 @@ var (
)
func stopCmd(c *cli.Context) error {
- args := c.Args()
- if (c.Bool("all") || c.Bool("latest")) && len(args) > 0 {
- return errors.Errorf("no arguments are needed with --all or --latest")
- }
- if c.Bool("all") && c.Bool("latest") {
- return errors.Errorf("--all and --latest cannot be used together")
- }
- if len(args) < 1 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("you must provide at least one container name or id")
+
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
+
if err := validateFlags(c, stopFlags); err != nil {
return err
}
@@ -65,39 +59,7 @@ func stopCmd(c *cli.Context) error {
}
defer runtime.Shutdown(false)
- var filterFuncs []libpod.ContainerFilter
- var containers []*libpod.Container
- var lastError error
-
- if c.Bool("all") {
- // only get running containers
- filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
- state, _ := c.State()
- return state == libpod.ContainerStateRunning
- })
- containers, err = runtime.GetContainers(filterFuncs...)
- if err != nil {
- return errors.Wrapf(err, "unable to get running containers")
- }
- } else if c.Bool("latest") {
- lastCtr, err := runtime.GetLatestContainer()
- if err != nil {
- return errors.Wrapf(err, "unable to get last created container")
- }
- containers = append(containers, lastCtr)
- } else {
- for _, i := range args {
- container, err := runtime.LookupContainer(i)
- if err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
- }
- lastError = errors.Wrapf(err, "unable to find container %s", i)
- continue
- }
- containers = append(containers, container)
- }
- }
+ containers, lastError := getAllOrLatestContainers(c, runtime, libpod.ContainerStateRunning, "running")
var stopFuncs []workerInput
for _, ctr := range containers {
diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go
index b193cf889..f9971fd88 100644
--- a/cmd/podman/utils.go
+++ b/cmd/podman/utils.go
@@ -160,15 +160,8 @@ func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
}
func checkMutuallyExclusiveFlags(c *cli.Context) error {
- argLen := len(c.Args())
- if (c.Bool("all") || c.Bool("latest")) && argLen > 0 {
- return errors.Errorf("no arguments are needed with --all or --latest")
- }
- if c.Bool("all") && c.Bool("latest") {
- return errors.Errorf("--all and --latest cannot be used together")
- }
- if argLen < 1 && !c.Bool("all") && !c.Bool("latest") {
- return errors.Errorf("you must provide at least one pod name or id")
+ if err := checkAllAndLatest(c); err != nil {
+ return err
}
if err := validateFlags(c, startFlags); err != nil {
return err
diff --git a/contrib/cirrus/build_vm_images.sh b/contrib/cirrus/build_vm_images.sh
index 8538ee910..80c689a6c 100755
--- a/contrib/cirrus/build_vm_images.sh
+++ b/contrib/cirrus/build_vm_images.sh
@@ -13,7 +13,7 @@ UBUNTU_BASE_IMAGE $UBUNTU_BASE_IMAGE
FEDORA_BASE_IMAGE $FEDORA_BASE_IMAGE
RHEL_BASE_IMAGE $RHEL_BASE_IMAGE
RHSM_COMMAND $RHSM_COMMAND
-CIRRUS_BUILD_ID $CIRRUS_BUILD_ID
+BUILT_IMAGE_SUFFIX $BUILT_IMAGE_SUFFIX
SERVICE_ACCOUNT $SERVICE_ACCOUNT
GCE_SSH_USERNAME $GCE_SSH_USERNAME
GCP_PROJECT_ID $GCP_PROJECT_ID
diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh
index e69f1e040..1e0052a65 100644
--- a/contrib/cirrus/lib.sh
+++ b/contrib/cirrus/lib.sh
@@ -35,8 +35,11 @@ req_env_var() {
# and useful variables.
# ref: https://cirrus-ci.org/guide/writing-tasks/#environment-variables
show_env_vars() {
+ # This is almost always multi-line, print it separately
+ echo "export CIRRUS_CHANGE_MESSAGE=$CIRRUS_CHANGE_MESSAGE"
echo "
BUILDTAGS $BUILDTAGS
+BUILT_IMAGE_SUFFIX $BUILT_IMAGE_SUFFIX
CI $CI
CIRRUS_CI $CIRRUS_CI
CI_NODE_INDEX $CI_NODE_INDEX
@@ -47,7 +50,6 @@ CIRRUS_BASE_SHA $CIRRUS_BASE_SHA
CIRRUS_BRANCH $CIRRUS_BRANCH
CIRRUS_BUILD_ID $CIRRUS_BUILD_ID
CIRRUS_CHANGE_IN_REPO $CIRRUS_CHANGE_IN_REPO
-CIRRUS_CHANGE_MESSAGE $CIRRUS_CHANGE_MESSAGE
CIRRUS_CLONE_DEPTH $CIRRUS_CLONE_DEPTH
CIRRUS_DEFAULT_BRANCH $CIRRUS_DEFAULT_BRANCH
CIRRUS_PR $CIRRUS_PR
@@ -66,6 +68,7 @@ CIRRUS_USER_PERMISSION $CIRRUS_USER_PERMISSION
CIRRUS_WORKING_DIR $CIRRUS_WORKING_DIR
CIRRUS_HTTP_CACHE_HOST $CIRRUS_HTTP_CACHE_HOST
$(go env)
+PACKER_BUILDS $PACKER_BUILDS
" | while read NAME VALUE
do
[[ -z "$NAME" ]] || echo "export $NAME=\"$VALUE\""
@@ -93,6 +96,19 @@ stub() {
echo "STUB: Pretending to do $1"
}
+ircmsg() {
+ req_env_var "
+ SCRIPT_BASE $SCRIPT_BASE
+ GOSRC $GOSRC
+ CIRRUS_TASK_ID $CIRRUS_TASK_ID
+ 1 $1
+ "
+ SCRIPT="$GOSRC/$SCRIPT_BASE/podbot.py"
+ NICK="podbot_$CIRRUS_TASK_ID"
+ NICK="${NICK:0:15}" # Any longer will break things
+ $SCRIPT $NICK $1
+}
+
# Run sudo in directory with GOPATH set
cdsudo() {
DIR="$1"
@@ -199,6 +215,21 @@ install_conmon(){
sudo install -D -m 755 bin/conmon /usr/libexec/podman/conmon
}
+install_criu(){
+ echo "Installing CRIU from commit $CRIU_COMMIT"
+ req_env_var "
+ CRIU_COMMIT $CRIU_COMMIT
+ "
+ DEST="/tmp/criu"
+ rm -rf "$DEST"
+ ooe.sh git clone https://github.com/checkpoint-restore/criu.git "$DEST"
+ cd $DEST
+ ooe.sh git fetch origin --tags
+ ooe.sh git checkout -q "$CRIU_COMMIT"
+ ooe.sh make
+ sudo install -D -m 755 criu/criu /usr/sbin/
+}
+
# Runs in testing VM, not image building
install_testing_dependencies() {
echo "Installing ginkgo, gomega, and easyjson into \$GOPATH=$GOPATH"
diff --git a/contrib/cirrus/packer/centos_setup.sh b/contrib/cirrus/packer/centos_setup.sh
index 2253d7b35..7b2308739 100644
--- a/contrib/cirrus/packer/centos_setup.sh
+++ b/contrib/cirrus/packer/centos_setup.sh
@@ -12,6 +12,7 @@ req_env_var "
SCRIPT_BASE $SCRIPT_BASE
CNI_COMMIT $CNI_COMMIT
CRIO_COMMIT $CRIO_COMMIT
+CRIU_COMMIT $CRIU_COMMIT
"
install_ooe
@@ -24,35 +25,46 @@ ooe.sh sudo yum -y update
ooe.sh sudo yum -y install centos-release-scl epel-release
ooe.sh sudo yum -y install \
- atomic-registries \
- btrfs-progs-devel \
- bzip2 \
- device-mapper-devel \
- findutils \
- glib2-devel \
- glibc-static \
- gnupg \
- golang \
- golang-github-cpuguy83-go-md2man \
- golang-github-cpuguy83-go-md2man \
- gpgme-devel \
- iptables \
- libassuan-devel \
- libseccomp-devel \
- libselinux-devel \
- lsof \
- make \
- nmap-ncat \
- ostree-devel \
- python \
- python3-dateutil \
- python3-psutil \
- python3-pytoml \
- runc \
- skopeo-containers \
- unzip \
- which \
- xz
+ atomic-registries \
+ btrfs-progs-devel \
+ bzip2 \
+ device-mapper-devel \
+ findutils \
+ glib2-devel \
+ glibc-static \
+ gnupg \
+ golang \
+ golang-github-cpuguy83-go-md2man \
+ golang-github-cpuguy83-go-md2man \
+ gpgme-devel \
+ iptables \
+ libassuan-devel \
+ libcap-devel \
+ libnet \
+ libnet-devel \
+ libnl3-devel \
+ libseccomp-devel \
+ libselinux-devel \
+ lsof \
+ make \
+ nmap-ncat \
+ ostree-devel \
+ protobuf \
+ protobuf-c \
+ protobuf-c-devel \
+ protobuf-compiler \
+ protobuf-devel \
+ protobuf-python \
+ python \
+ python2-future \
+ python3-dateutil \
+ python3-psutil \
+ python3-pytoml \
+ runc \
+ skopeo-containers \
+ unzip \
+ which \
+ xz
install_scl_git
@@ -62,6 +74,8 @@ install_buildah
install_conmon
+install_criu
+
install_packer_copied_files
rh_finalize
diff --git a/contrib/cirrus/packer/fedora_setup.sh b/contrib/cirrus/packer/fedora_setup.sh
index 53709fbdd..16b6e4e6b 100644
--- a/contrib/cirrus/packer/fedora_setup.sh
+++ b/contrib/cirrus/packer/fedora_setup.sh
@@ -12,6 +12,7 @@ req_env_var "
SCRIPT_BASE $SCRIPT_BASE
CNI_COMMIT $CNI_COMMIT
CRIO_COMMIT $CRIO_COMMIT
+CRIU_COMMIT $CRIU_COMMIT
RUNC_COMMIT $RUNC_COMMIT
"
@@ -40,6 +41,10 @@ ooe.sh sudo dnf install -y \
gpgme-devel \
iptables \
libassuan-devel \
+ libcap-devel \
+ libnet \
+ libnet-devel \
+ libnl3-devel \
libseccomp-devel \
libselinux-devel \
lsof \
@@ -47,14 +52,21 @@ ooe.sh sudo dnf install -y \
nmap-ncat \
ostree-devel \
procps-ng \
+ protobuf \
+ protobuf-c \
+ protobuf-c-devel \
+ protobuf-compiler \
+ protobuf-devel \
+ protobuf-python \
python \
+ python2-future \
python3-dateutil \
python3-psutil \
python3-pytoml \
runc \
skopeo-containers \
slirp4netns \
- which\
+ which \
xz
install_varlink
@@ -65,6 +77,8 @@ install_buildah
install_conmon
+install_criu
+
install_packer_copied_files
rh_finalize # N/B: Halts system!
diff --git a/contrib/cirrus/packer/libpod_images.json b/contrib/cirrus/packer/libpod_images.json
index 82a41ca25..9dac3e8ea 100644
--- a/contrib/cirrus/packer/libpod_images.json
+++ b/contrib/cirrus/packer/libpod_images.json
@@ -1,7 +1,9 @@
{
"variables": {
+ "FEDORA_CNI_COMMIT": "{{env `FEDORA_CNI_COMMIT`}}",
"CNI_COMMIT": "{{env `CNI_COMMIT`}}",
"CRIO_COMMIT": "{{env `CRIO_COMMIT`}}",
+ "CRIU_COMMIT": "{{env `CRIU_COMMIT`}}",
"RUNC_COMMIT": "{{env `RUNC_COMMIT`}}",
"CENTOS_BASE_IMAGE": "{{env `CENTOS_BASE_IMAGE`}}" ,
@@ -15,7 +17,7 @@
"SERVICE_ACCOUNT": "{{env `SERVICE_ACCOUNT`}}",
"GCP_PROJECT_ID": "{{env `GCP_PROJECT_ID`}}",
- "CIRRUS_BUILD_ID": "{{env `CIRRUS_BUILD_ID`}}",
+ "BUILT_IMAGE_SUFFIX": "{{env `BUILT_IMAGE_SUFFIX`}}",
"GCE_SSH_USERNAME": "{{env `GCE_SSH_USERNAME`}}",
"RHSM_COMMAND": "{{env `RHSM_COMMAND`}}"
},
@@ -29,7 +31,7 @@
"project_id": "{{user `GCP_PROJECT_ID`}}",
"zone": "us-central1-a",
"source_image": "{{user `RHEL_BASE_IMAGE`}}",
- "image_name": "{{user `RHEL_BASE_IMAGE`}}-libpod-{{user `CIRRUS_BUILD_ID`}}",
+ "image_name": "{{user `RHEL_BASE_IMAGE`}}{{user `BUILT_IMAGE_SUFFIX`}}",
"image_family": "{{user `RHEL_BASE_IMAGE`}}-libpod",
"service_account_email": "{{user `SERVICE_ACCOUNT`}}",
"communicator": "ssh",
@@ -41,7 +43,7 @@
"project_id": "{{user `GCP_PROJECT_ID`}}",
"zone": "us-central1-a",
"source_image": "{{user `CENTOS_BASE_IMAGE`}}",
- "image_name": "{{user `CENTOS_BASE_IMAGE`}}-libpod-{{user `CIRRUS_BUILD_ID`}}",
+ "image_name": "{{user `CENTOS_BASE_IMAGE`}}{{user `BUILT_IMAGE_SUFFIX`}}",
"image_family": "{{user `CENTOS_BASE_IMAGE`}}-libpod",
"service_account_email": "{{user `SERVICE_ACCOUNT`}}",
"communicator": "ssh",
@@ -53,7 +55,7 @@
"project_id": "{{user `GCP_PROJECT_ID`}}",
"zone": "us-central1-a",
"source_image": "{{user `FEDORA_BASE_IMAGE`}}",
- "image_name": "{{user `FEDORA_BASE_IMAGE`}}-libpod-{{user `CIRRUS_BUILD_ID`}}",
+ "image_name": "{{user `FEDORA_BASE_IMAGE`}}{{user `BUILT_IMAGE_SUFFIX`}}",
"image_family": "{{user `FEDORA_BASE_IMAGE`}}-libpod",
"service_account_email": "{{user `SERVICE_ACCOUNT`}}",
"communicator": "ssh",
@@ -65,7 +67,7 @@
"project_id": "{{user `GCP_PROJECT_ID`}}",
"zone": "us-central1-a",
"source_image": "{{user `UBUNTU_BASE_IMAGE`}}",
- "image_name": "{{user `UBUNTU_BASE_IMAGE`}}-libpod-{{user `CIRRUS_BUILD_ID`}}",
+ "image_name": "{{user `UBUNTU_BASE_IMAGE`}}{{user `BUILT_IMAGE_SUFFIX`}}",
"image_family": "{{user `UBUNTU_BASE_IMAGE`}}-libpod",
"service_account_email": "{{user `SERVICE_ACCOUNT`}}",
"communicator": "ssh",
@@ -86,6 +88,7 @@
"SCRIPT_BASE={{user `SCRIPT_BASE`}}",
"CNI_COMMIT={{user `CNI_COMMIT`}}",
"CRIO_COMMIT={{user `CRIO_COMMIT`}}",
+ "CRIU_COMMIT={{user `CRIU_COMMIT`}}",
"RUNC_COMMIT={{user `RUNC_COMMIT`}}",
"RHSM_COMMAND={{user `RHSM_COMMAND`}}"
]
@@ -97,6 +100,7 @@
"SCRIPT_BASE={{user `SCRIPT_BASE`}}",
"CNI_COMMIT={{user `CNI_COMMIT`}}",
"CRIO_COMMIT={{user `CRIO_COMMIT`}}",
+ "CRIU_COMMIT={{user `CRIU_COMMIT`}}",
"RUNC_COMMIT={{user `RUNC_COMMIT`}}"
]
},{
@@ -105,8 +109,9 @@
"script": "{{user `GOSRC`}}/{{user `PACKER_BASE`}}/fedora_setup.sh",
"environment_vars": [
"SCRIPT_BASE={{user `SCRIPT_BASE`}}",
- "CNI_COMMIT={{user `CNI_COMMIT`}}",
+ "CNI_COMMIT={{user `FEDORA_CNI_COMMIT`}}",
"CRIO_COMMIT={{user `CRIO_COMMIT`}}",
+ "CRIU_COMMIT={{user `CRIU_COMMIT`}}",
"RUNC_COMMIT={{user `RUNC_COMMIT`}}"
]
},{
@@ -117,6 +122,7 @@
"SCRIPT_BASE={{user `SCRIPT_BASE`}}",
"CNI_COMMIT={{user `CNI_COMMIT`}}",
"CRIO_COMMIT={{user `CRIO_COMMIT`}}",
+ "CRIU_COMMIT={{user `CRIU_COMMIT`}}",
"RUNC_COMMIT={{user `RUNC_COMMIT`}}"
]
}
diff --git a/contrib/cirrus/packer/rhel_setup.sh b/contrib/cirrus/packer/rhel_setup.sh
index b776a0d97..d296713fc 100644
--- a/contrib/cirrus/packer/rhel_setup.sh
+++ b/contrib/cirrus/packer/rhel_setup.sh
@@ -12,6 +12,7 @@ req_env_var "
SCRIPT_BASE $SCRIPT_BASE
CNI_COMMIT $CNI_COMMIT
CRIO_COMMIT $CRIO_COMMIT
+CRIU_COMMIT $CRIU_COMMIT
RHSM_COMMAND $RHSM_COMMAND
"
@@ -62,37 +63,48 @@ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
EOM
ooe.sh sudo yum -y install \
- atomic-registries \
- btrfs-progs-devel \
- bzip2 \
- device-mapper-devel \
- findutils \
- glib2-devel \
- glibc-static \
- gnupg \
- golang \
- golang-github-cpuguy83-go-md2man \
- golang-github-cpuguy83-go-md2man \
- google-compute-engine \
- google-compute-engine-oslogin \
- gpgme-devel \
- iptables \
- libassuan-devel \
- libseccomp-devel \
- libselinux-devel \
- lsof \
- make \
- nmap-ncat \
- ostree-devel \
- python \
- python34-dateutil \
- python34-psutil \
- python34-pytoml \
- runc \
- skopeo-containers \
- unzip \
- which \
- xz
+ atomic-registries \
+ btrfs-progs-devel \
+ bzip2 \
+ device-mapper-devel \
+ findutils \
+ glib2-devel \
+ glibc-static \
+ gnupg \
+ golang \
+ golang-github-cpuguy83-go-md2man \
+ golang-github-cpuguy83-go-md2man \
+ google-compute-engine \
+ google-compute-engine-oslogin \
+ gpgme-devel \
+ iptables \
+ libassuan-devel \
+ libcap-devel \
+ libnet \
+ libnet-devel \
+ libnl3-devel \
+ libseccomp-devel \
+ libselinux-devel \
+ lsof \
+ make \
+ nmap-ncat \
+ ostree-devel \
+ protobuf \
+ protobuf-c \
+ protobuf-c-devel \
+ protobuf-compiler \
+ protobuf-devel \
+ protobuf-python \
+ python \
+ python2-future \
+ python34-dateutil \
+ python34-psutil \
+ python34-pytoml \
+ runc \
+ skopeo-containers \
+ unzip \
+ which \
+ xz
install_scl_git
@@ -102,6 +114,8 @@ install_buildah
install_conmon
+install_criu
+
install_packer_copied_files
exit_handler # release subscription!
diff --git a/contrib/cirrus/packer/ubuntu_setup.sh b/contrib/cirrus/packer/ubuntu_setup.sh
index 96b3a573f..ff20944dc 100644
--- a/contrib/cirrus/packer/ubuntu_setup.sh
+++ b/contrib/cirrus/packer/ubuntu_setup.sh
@@ -12,6 +12,7 @@ req_env_var "
SCRIPT_BASE $SCRIPT_BASE
CNI_COMMIT $CNI_COMMIT
CRIO_COMMIT $CRIO_COMMIT
+CRIU_COMMIT $CRIU_COMMIT
RUNC_COMMIT $RUNC_COMMIT
"
@@ -34,8 +35,8 @@ ooe.sh sudo apt-get -qq install --no-install-recommends \
e2fslibs-dev \
gawk \
gettext \
- golang \
go-md2man \
+ golang \
iptables \
libaio-dev \
libapparmor-dev \
@@ -46,18 +47,22 @@ ooe.sh sudo apt-get -qq install --no-install-recommends \
libglib2.0-dev \
libgpgme11-dev \
liblzma-dev \
+ libnet1 \
+ libnet1-dev \
+ libnl-3-dev \
libostree-dev \
libprotobuf-c0-dev \
libprotobuf-dev \
libtool \
- libtool \
libudev-dev \
lsof \
netcat \
pkg-config \
protobuf-c-compiler \
protobuf-compiler \
+ python-future \
python-minimal \
+ python-protobuf \
python3-dateutil \
python3-pip \
python3-psutil \
@@ -77,6 +82,8 @@ install_runc
install_conmon
+install_criu
+
install_cni_plugins
install_buildah
diff --git a/contrib/cirrus/podbot.py b/contrib/cirrus/podbot.py
new file mode 100755
index 000000000..1be41a8ed
--- /dev/null
+++ b/contrib/cirrus/podbot.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python3
+
+# Simple and dumb script to send a message to the #podman IRC channel on frenode
+# Based on example from: https://pythonspot.com/building-an-irc-bot/
+
+import os
+import time
+import random
+import errno
+import socket
+import sys
+
+class IRC:
+
+ response_timeout = 10 # seconds
+ irc = socket.socket()
+
+ def __init__(self, server, nickname, channel):
+ self.server = server
+ self.nickname = nickname
+ self.channel = channel
+ self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ def _send(self, cmdstr):
+ self.irc.send(bytes(cmdstr + '\r\n', 'utf-8'))
+
+ def message(self, msg):
+ data = 'PRIVMSG {0} :{1}\r\n'.format(self.channel, msg)
+ print(data)
+ self._send(data)
+
+ @staticmethod
+ def fix_newlines(bufr):
+ return bufr.replace('\\r\\n', '\n')
+
+ def _required_response(self, needle, haystack):
+ start = time.time()
+ end = start + self.response_timeout
+ while time.time() < end:
+ if haystack.find(needle) != -1:
+ return (False, haystack)
+ time.sleep(0.1)
+ try:
+ haystack += str(self.irc.recv(4096, socket.MSG_DONTWAIT))
+ except socket.error as serr:
+ if serr.errno == errno.EWOULDBLOCK:
+ continue
+ raise # can't handle this
+ return (True, haystack) # Error
+
+ def connect(self, username, password):
+ # This is ugly as sin, but seems to be a working send/expect sequence
+
+ print("connecting to: {0}".format(self.server))
+ self.irc.connect((self.server, 6667)) #connects to the server
+ self._send("USER {0} {0} {0} :I am {0}".format(self.nickname))
+ self._send("NICK {0}".format(self.nickname))
+
+ err, haystack = self._required_response('End of /MOTD command.'
+ ''.format(self.nickname), "")
+ if err:
+ print(self.fix_newlines(haystack))
+ print("Error connecting to {0}".format(self.server))
+ return True
+
+ print("Logging in as {0}".format(username))
+ self._send("PRIVMSG NickServ :IDENTIFY {0} {1}".format(username, password))
+ err, _ = self._required_response("You are now identified for", "")
+ if err:
+ print("Error logging in to {0} as {1}".format(self.server, username))
+ return True
+
+ print("Joining {0}".format(self.channel))
+ self._send("JOIN {0}".format(self.channel))
+ err, haystack = self._required_response("{0} {1} :End of /NAMES list."
+ "".format(self.nickname, self.channel),
+ haystack)
+ print(self.fix_newlines(haystack))
+ if err:
+ print("Error joining {0}".format(self.channel))
+ return True
+ return False
+
+ def quit(self):
+ print("Quitting")
+ self._send("QUIT :my work is done here")
+ self.irc.close()
+
+
+if len(sys.argv) < 3:
+ print("Error: Must pass desired nick and message as parameters")
+else:
+ irc = IRC("irc.freenode.net", sys.argv[1], "#podman")
+ err = irc.connect(*os.environ.get('IRCID', 'Big Bug').split(" ", 2))
+ if not err:
+ irc.message(" ".join(sys.argv[2:]))
+ time.sleep(5.0) # avoid join/quit spam
+ irc.quit()
diff --git a/contrib/cirrus/setup_environment.sh b/contrib/cirrus/setup_environment.sh
index 2302f0e15..167db127f 100755
--- a/contrib/cirrus/setup_environment.sh
+++ b/contrib/cirrus/setup_environment.sh
@@ -41,6 +41,7 @@ then
"export OS_RELEASE_ID=\"$(os_release_id)\"" \
"export OS_RELEASE_VER=\"$(os_release_ver)\"" \
"export OS_REL_VER=\"${OS_RELEASE_ID}-${OS_RELEASE_VER}\"" \
+ "export BUILT_IMAGE_SUFFIX=\"-$CIRRUS_REPO_NAME-${CIRRUS_CHANGE_IN_REPO:0:8}\"" \
"export GOPATH=\"/go\"" \
'export PATH="$HOME/bin:$GOPATH/bin:/usr/local/bin:$PATH"' \
'export LD_LIBRARY_PATH="/usr/local/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}"'
diff --git a/contrib/cirrus/success.sh b/contrib/cirrus/success.sh
new file mode 100755
index 000000000..d1daf9043
--- /dev/null
+++ b/contrib/cirrus/success.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+set -e
+source $(dirname $0)/lib.sh
+
+req_env_var "
+ CIRRUS_TASK_NAME $CIRRUS_TASK_NAME
+ CIRRUS_BRANCH $CIRRUS_BRANCH
+ OS_RELEASE_ID $OS_RELEASE_ID
+ OS_RELEASE_VER $OS_RELEASE_VER
+ CIRRUS_REPO_CLONE_URL $CIRRUS_REPO_CLONE_URL
+"
+
+REF_URL="$(echo $CIRRUS_REPO_CLONE_URL | sed 's/.git$//g')"
+if [[ "$CIRRUS_BRANCH" =~ "pull" ]]
+then
+ REF_URL="$REF_URL/$CIRRUS_BRANCH" # pull request URL
+else
+ REF_URL="$REF_URL/commits/$CIRRUS_BRANCH" # branch merge
+fi
+
+ircmsg "Cirrus-CI $CIRRUS_TASK_NAME on $OS_RELEASE_ID-$OS_RELEASE_VER successful for $REF_URL"
diff --git a/contrib/python/podman/MANIFEST.in b/contrib/python/podman/MANIFEST.in
index 72e638cb9..a5897de50 100644
--- a/contrib/python/podman/MANIFEST.in
+++ b/contrib/python/podman/MANIFEST.in
@@ -1,2 +1,3 @@
prune test/
include README.md
+include requirements.txt
diff --git a/contrib/python/podman/test/test_runner.sh b/contrib/python/podman/test/test_runner.sh
index 081b90779..65cbd1e9c 100755
--- a/contrib/python/podman/test/test_runner.sh
+++ b/contrib/python/podman/test/test_runner.sh
@@ -143,18 +143,6 @@ else
RETURNCODE=$?
fi
-if [[ "$RETURNCODE" -ne 0 ]] && [[ -n "$FLAKE_DEBUG_DELAY" ]]
-then
- cat << EOF > /dev/stderr
-*****
-***** WARNING: \$FLAKE_DEBUG_DELAY IS SET AND PYTHON-PODMAN TESTS EXITED: $RETURNCODE
-***** WARNING: Sleeping for 30 minutes for test-VM preservation oportunity.
-*****
-EOF
- sleep 30m
-fi
-
-
pkill -9 podman
pkill -9 conmon
diff --git a/contrib/python/podman/tox.ini b/contrib/python/podman/tox.ini
new file mode 100644
index 000000000..797eafbe3
--- /dev/null
+++ b/contrib/python/podman/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+envlist = py34,py35,py36
+skipdist = True
+
+[testenv]
+deps=-rrequirements.txt
+whitelist_externals = bash
+commands=bash test/test_runner.sh
diff --git a/contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py b/contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py
index 411a6d5a3..ecfcb883a 100644
--- a/contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/actions/pod/processes_parser.py
@@ -10,7 +10,7 @@ class ProcessesPod(AbstractActionBase):
@classmethod
def subparser(cls, parent):
- """Add Images command to parent parser."""
+ """Add Pod Ps command to parent parser."""
parser = parent.add_parser('ps', help='list processes of pod')
super().subparser(parser)
@@ -40,7 +40,7 @@ class ProcessesPod(AbstractActionBase):
parser.set_defaults(class_=cls, method='processes')
def __init__(self, args):
- """Contstruct ProcessesPod class."""
+ """Construct ProcessesPod class."""
if args.sort == 'created':
args.sort = 'createdat'
elif args.sort == 'count':
diff --git a/contrib/python/pypodman/pypodman/lib/podman_parser.py b/contrib/python/pypodman/pypodman/lib/podman_parser.py
index 1ba9bb7fc..d3c84224f 100644
--- a/contrib/python/pypodman/pypodman/lib/podman_parser.py
+++ b/contrib/python/pypodman/pypodman/lib/podman_parser.py
@@ -154,7 +154,7 @@ class PodmanArgumentParser(argparse.ArgumentParser):
getattr(args, 'run_dir')
or os.environ.get('RUN_DIR')
or config['default'].get('run_dir')
- or Path(args.xdg_runtime_dir, 'pypodman')
+ or str(Path(args.xdg_runtime_dir, 'pypodman'))
) # yapf: disable
setattr(
@@ -211,7 +211,7 @@ class PodmanArgumentParser(argparse.ArgumentParser):
args.identity_file = None
if args.host:
- args.local_socket_path = Path(args.run_dir, 'podman.socket')
+ args.local_socket_path = str(Path(args.run_dir, 'podman.socket'))
else:
args.local_socket_path = args.remote_socket_path
diff --git a/docs/podman.1.md b/docs/podman.1.md
index 3a0943d6b..085af97ff 100644
--- a/docs/podman.1.md
+++ b/docs/podman.1.md
@@ -42,11 +42,13 @@ When namespace is set, created containers and pods will join the given namespace
**--root**=**value**
-Path to the root directory in which data, including images, is stored
+Storage root dir in which data, including images, is stored (default: "/var/lib/containers/storage" for UID 0, "$HOME/.local/share/containers/storage" for other users).
+Default root dir is configured in /etc/containers/storage.conf.
**--runroot**=**value**
-Path to the 'run directory' where all state information is stored
+Storage state directory where all state information is stored (default: "/var/run/containers/storage" for UID 0, "/var/run/user/$UID/run" for other users).
+Default state dir is configured in /etc/containers/storage.conf.
**--runtime**=**value**
@@ -73,7 +75,7 @@ Print the version
## Exit Status
-The exit code from `podman gives information about why the container
+The exit code from `podman` gives information about why the container
failed to run or why it exited. When `podman` commands exit with a non-zero code,
the exit codes follow the `chroot` standard, see below:
diff --git a/libpod/container.go b/libpod/container.go
index 62db87fa0..7bb5b2687 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -243,6 +243,8 @@ type ContainerConfig struct {
ProcessLabel string `json:"ProcessLabel,omitempty"`
// SELinux mount label for root filesystem
MountLabel string `json:"MountLabel,omitempty"`
+ // LabelOpts are options passed in by the user to setup SELinux labels
+ LabelOpts []string `json:"labelopts,omitempty"`
// User and group to use in the container
// Can be specified by name or UID/GID
User string `json:"user,omitempty"`
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 93becb80d..41a131ea2 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -666,14 +666,10 @@ func (c *Container) Batch(batchFunc func(*Container) error) error {
newCtr.valid = true
newCtr.batched = true
-
- if err := batchFunc(newCtr); err != nil {
- return err
- }
-
+ err := batchFunc(newCtr)
newCtr.batched = false
- return c.save()
+ return err
}
// Sync updates the current state of the container, checking whether its state
diff --git a/libpod/container_easyjson.go b/libpod/container_easyjson.go
index 53ad5b7ee..041cc08ac 100644
--- a/libpod/container_easyjson.go
+++ b/libpod/container_easyjson.go
@@ -1319,6 +1319,29 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.ProcessLabel = string(in.String())
case "MountLabel":
out.MountLabel = string(in.String())
+ case "labelopts":
+ if in.IsNull() {
+ in.Skip()
+ out.LabelOpts = nil
+ } else {
+ in.Delim('[')
+ if out.LabelOpts == nil {
+ if !in.IsDelim(']') {
+ out.LabelOpts = make([]string, 0, 4)
+ } else {
+ out.LabelOpts = []string{}
+ }
+ } else {
+ out.LabelOpts = (out.LabelOpts)[:0]
+ }
+ for !in.IsDelim(']') {
+ var v41 string
+ v41 = string(in.String())
+ out.LabelOpts = append(out.LabelOpts, v41)
+ in.WantComma()
+ }
+ in.Delim(']')
+ }
case "user":
out.User = string(in.String())
case "groups":
@@ -1337,9 +1360,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.Groups = (out.Groups)[:0]
}
for !in.IsDelim(']') {
- var v41 string
- v41 = string(in.String())
- out.Groups = append(out.Groups, v41)
+ var v42 string
+ v42 = string(in.String())
+ out.Groups = append(out.Groups, v42)
in.WantComma()
}
in.Delim(']')
@@ -1374,9 +1397,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.Dependencies = (out.Dependencies)[:0]
}
for !in.IsDelim(']') {
- var v42 string
- v42 = string(in.String())
- out.Dependencies = append(out.Dependencies, v42)
+ var v43 string
+ v43 = string(in.String())
+ out.Dependencies = append(out.Dependencies, v43)
in.WantComma()
}
in.Delim(']')
@@ -1403,9 +1426,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.PortMappings = (out.PortMappings)[:0]
}
for !in.IsDelim(']') {
- var v43 ocicni.PortMapping
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(in, &v43)
- out.PortMappings = append(out.PortMappings, v43)
+ var v44 ocicni.PortMapping
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(in, &v44)
+ out.PortMappings = append(out.PortMappings, v44)
in.WantComma()
}
in.Delim(']')
@@ -1426,11 +1449,11 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.DNSServer = (out.DNSServer)[:0]
}
for !in.IsDelim(']') {
- var v44 net.IP
+ var v45 net.IP
if data := in.UnsafeBytes(); in.Ok() {
- in.AddError((v44).UnmarshalText(data))
+ in.AddError((v45).UnmarshalText(data))
}
- out.DNSServer = append(out.DNSServer, v44)
+ out.DNSServer = append(out.DNSServer, v45)
in.WantComma()
}
in.Delim(']')
@@ -1451,9 +1474,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.DNSSearch = (out.DNSSearch)[:0]
}
for !in.IsDelim(']') {
- var v45 string
- v45 = string(in.String())
- out.DNSSearch = append(out.DNSSearch, v45)
+ var v46 string
+ v46 = string(in.String())
+ out.DNSSearch = append(out.DNSSearch, v46)
in.WantComma()
}
in.Delim(']')
@@ -1474,9 +1497,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.DNSOption = (out.DNSOption)[:0]
}
for !in.IsDelim(']') {
- var v46 string
- v46 = string(in.String())
- out.DNSOption = append(out.DNSOption, v46)
+ var v47 string
+ v47 = string(in.String())
+ out.DNSOption = append(out.DNSOption, v47)
in.WantComma()
}
in.Delim(']')
@@ -1497,9 +1520,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.HostAdd = (out.HostAdd)[:0]
}
for !in.IsDelim(']') {
- var v47 string
- v47 = string(in.String())
- out.HostAdd = append(out.HostAdd, v47)
+ var v48 string
+ v48 = string(in.String())
+ out.HostAdd = append(out.HostAdd, v48)
in.WantComma()
}
in.Delim(']')
@@ -1520,9 +1543,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.Networks = (out.Networks)[:0]
}
for !in.IsDelim(']') {
- var v48 string
- v48 = string(in.String())
- out.Networks = append(out.Networks, v48)
+ var v49 string
+ v49 = string(in.String())
+ out.Networks = append(out.Networks, v49)
in.WantComma()
}
in.Delim(']')
@@ -1543,9 +1566,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.UserVolumes = (out.UserVolumes)[:0]
}
for !in.IsDelim(']') {
- var v49 string
- v49 = string(in.String())
- out.UserVolumes = append(out.UserVolumes, v49)
+ var v50 string
+ v50 = string(in.String())
+ out.UserVolumes = append(out.UserVolumes, v50)
in.WantComma()
}
in.Delim(']')
@@ -1566,9 +1589,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.Entrypoint = (out.Entrypoint)[:0]
}
for !in.IsDelim(']') {
- var v50 string
- v50 = string(in.String())
- out.Entrypoint = append(out.Entrypoint, v50)
+ var v51 string
+ v51 = string(in.String())
+ out.Entrypoint = append(out.Entrypoint, v51)
in.WantComma()
}
in.Delim(']')
@@ -1589,9 +1612,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.Command = (out.Command)[:0]
}
for !in.IsDelim(']') {
- var v51 string
- v51 = string(in.String())
- out.Command = append(out.Command, v51)
+ var v52 string
+ v52 = string(in.String())
+ out.Command = append(out.Command, v52)
in.WantComma()
}
in.Delim(']')
@@ -1611,9 +1634,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
for !in.IsDelim('}') {
key := string(in.String())
in.WantColon()
- var v52 string
- v52 = string(in.String())
- (out.Labels)[key] = v52
+ var v53 string
+ v53 = string(in.String())
+ (out.Labels)[key] = v53
in.WantComma()
}
in.Delim('}')
@@ -1650,9 +1673,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.ExitCommand = (out.ExitCommand)[:0]
}
for !in.IsDelim(']') {
- var v53 string
- v53 = string(in.String())
- out.ExitCommand = append(out.ExitCommand, v53)
+ var v54 string
+ v54 = string(in.String())
+ out.ExitCommand = append(out.ExitCommand, v54)
in.WantComma()
}
in.Delim(']')
@@ -1673,9 +1696,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.LocalVolumes = (out.LocalVolumes)[:0]
}
for !in.IsDelim(']') {
- var v54 string
- v54 = string(in.String())
- out.LocalVolumes = append(out.LocalVolumes, v54)
+ var v55 string
+ v55 = string(in.String())
+ out.LocalVolumes = append(out.LocalVolumes, v55)
in.WantComma()
}
in.Delim(']')
@@ -1842,11 +1865,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v55, v56 := range in.Mounts {
- if v55 > 0 {
+ for v56, v57 := range in.Mounts {
+ if v56 > 0 {
out.RawByte(',')
}
- out.String(string(v56))
+ out.String(string(v57))
}
out.RawByte(']')
}
@@ -1881,6 +1904,25 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
out.String(string(in.MountLabel))
}
+ if len(in.LabelOpts) != 0 {
+ const prefix string = ",\"labelopts\":"
+ if first {
+ first = false
+ out.RawString(prefix[1:])
+ } else {
+ out.RawString(prefix)
+ }
+ {
+ out.RawByte('[')
+ for v58, v59 := range in.LabelOpts {
+ if v58 > 0 {
+ out.RawByte(',')
+ }
+ out.String(string(v59))
+ }
+ out.RawByte(']')
+ }
+ }
if in.User != "" {
const prefix string = ",\"user\":"
if first {
@@ -1901,11 +1943,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v57, v58 := range in.Groups {
- if v57 > 0 {
+ for v60, v61 := range in.Groups {
+ if v60 > 0 {
out.RawByte(',')
}
- out.String(string(v58))
+ out.String(string(v61))
}
out.RawByte(']')
}
@@ -1992,11 +2034,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
out.RawString("null")
} else {
out.RawByte('[')
- for v59, v60 := range in.Dependencies {
- if v59 > 0 {
+ for v62, v63 := range in.Dependencies {
+ if v62 > 0 {
out.RawByte(',')
}
- out.String(string(v60))
+ out.String(string(v63))
}
out.RawByte(']')
}
@@ -2031,11 +2073,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v61, v62 := range in.PortMappings {
- if v61 > 0 {
+ for v64, v65 := range in.PortMappings {
+ if v64 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(out, v62)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComCriOOcicniPkgOcicni(out, v65)
}
out.RawByte(']')
}
@@ -2050,11 +2092,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v63, v64 := range in.DNSServer {
- if v63 > 0 {
+ for v66, v67 := range in.DNSServer {
+ if v66 > 0 {
out.RawByte(',')
}
- out.RawText((v64).MarshalText())
+ out.RawText((v67).MarshalText())
}
out.RawByte(']')
}
@@ -2069,11 +2111,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v65, v66 := range in.DNSSearch {
- if v65 > 0 {
+ for v68, v69 := range in.DNSSearch {
+ if v68 > 0 {
out.RawByte(',')
}
- out.String(string(v66))
+ out.String(string(v69))
}
out.RawByte(']')
}
@@ -2088,11 +2130,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v67, v68 := range in.DNSOption {
- if v67 > 0 {
+ for v70, v71 := range in.DNSOption {
+ if v70 > 0 {
out.RawByte(',')
}
- out.String(string(v68))
+ out.String(string(v71))
}
out.RawByte(']')
}
@@ -2107,11 +2149,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v69, v70 := range in.HostAdd {
- if v69 > 0 {
+ for v72, v73 := range in.HostAdd {
+ if v72 > 0 {
out.RawByte(',')
}
- out.String(string(v70))
+ out.String(string(v73))
}
out.RawByte(']')
}
@@ -2126,11 +2168,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v71, v72 := range in.Networks {
- if v71 > 0 {
+ for v74, v75 := range in.Networks {
+ if v74 > 0 {
out.RawByte(',')
}
- out.String(string(v72))
+ out.String(string(v75))
}
out.RawByte(']')
}
@@ -2145,11 +2187,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v73, v74 := range in.UserVolumes {
- if v73 > 0 {
+ for v76, v77 := range in.UserVolumes {
+ if v76 > 0 {
out.RawByte(',')
}
- out.String(string(v74))
+ out.String(string(v77))
}
out.RawByte(']')
}
@@ -2164,11 +2206,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v75, v76 := range in.Entrypoint {
- if v75 > 0 {
+ for v78, v79 := range in.Entrypoint {
+ if v78 > 0 {
out.RawByte(',')
}
- out.String(string(v76))
+ out.String(string(v79))
}
out.RawByte(']')
}
@@ -2183,11 +2225,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v77, v78 := range in.Command {
- if v77 > 0 {
+ for v80, v81 := range in.Command {
+ if v80 > 0 {
out.RawByte(',')
}
- out.String(string(v78))
+ out.String(string(v81))
}
out.RawByte(']')
}
@@ -2212,16 +2254,16 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('{')
- v79First := true
- for v79Name, v79Value := range in.Labels {
- if v79First {
- v79First = false
+ v82First := true
+ for v82Name, v82Value := range in.Labels {
+ if v82First {
+ v82First = false
} else {
out.RawByte(',')
}
- out.String(string(v79Name))
+ out.String(string(v82Name))
out.RawByte(':')
- out.String(string(v79Value))
+ out.String(string(v82Value))
}
out.RawByte('}')
}
@@ -2306,11 +2348,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
{
out.RawByte('[')
- for v80, v81 := range in.ExitCommand {
- if v80 > 0 {
+ for v83, v84 := range in.ExitCommand {
+ if v83 > 0 {
out.RawByte(',')
}
- out.String(string(v81))
+ out.String(string(v84))
}
out.RawByte(']')
}
@@ -2327,11 +2369,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
out.RawString("null")
} else {
out.RawByte('[')
- for v82, v83 := range in.LocalVolumes {
- if v82 > 0 {
+ for v85, v86 := range in.LocalVolumes {
+ if v85 > 0 {
out.RawByte(',')
}
- out.String(string(v83))
+ out.String(string(v86))
}
out.RawByte(']')
}
@@ -2504,9 +2546,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComContainersSto
out.UIDMap = (out.UIDMap)[:0]
}
for !in.IsDelim(']') {
- var v84 idtools.IDMap
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComContainersStoragePkgIdtools(in, &v84)
- out.UIDMap = append(out.UIDMap, v84)
+ var v87 idtools.IDMap
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComContainersStoragePkgIdtools(in, &v87)
+ out.UIDMap = append(out.UIDMap, v87)
in.WantComma()
}
in.Delim(']')
@@ -2527,9 +2569,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComContainersSto
out.GIDMap = (out.GIDMap)[:0]
}
for !in.IsDelim(']') {
- var v85 idtools.IDMap
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComContainersStoragePkgIdtools(in, &v85)
- out.GIDMap = append(out.GIDMap, v85)
+ var v88 idtools.IDMap
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComContainersStoragePkgIdtools(in, &v88)
+ out.GIDMap = append(out.GIDMap, v88)
in.WantComma()
}
in.Delim(']')
@@ -2580,11 +2622,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComContainersSto
out.RawString("null")
} else {
out.RawByte('[')
- for v86, v87 := range in.UIDMap {
- if v86 > 0 {
+ for v89, v90 := range in.UIDMap {
+ if v89 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComContainersStoragePkgIdtools(out, v87)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComContainersStoragePkgIdtools(out, v90)
}
out.RawByte(']')
}
@@ -2601,11 +2643,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComContainersSto
out.RawString("null")
} else {
out.RawByte('[')
- for v88, v89 := range in.GIDMap {
- if v88 > 0 {
+ for v91, v92 := range in.GIDMap {
+ if v91 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComContainersStoragePkgIdtools(out, v89)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComContainersStoragePkgIdtools(out, v92)
}
out.RawByte(']')
}
@@ -2742,9 +2784,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Mounts = (out.Mounts)[:0]
}
for !in.IsDelim(']') {
- var v90 specs_go.Mount
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo4(in, &v90)
- out.Mounts = append(out.Mounts, v90)
+ var v93 specs_go.Mount
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo4(in, &v93)
+ out.Mounts = append(out.Mounts, v93)
in.WantComma()
}
in.Delim(']')
@@ -2772,9 +2814,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
for !in.IsDelim('}') {
key := string(in.String())
in.WantColon()
- var v91 string
- v91 = string(in.String())
- (out.Annotations)[key] = v91
+ var v94 string
+ v94 = string(in.String())
+ (out.Annotations)[key] = v94
in.WantComma()
}
in.Delim('}')
@@ -2883,11 +2925,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v92, v93 := range in.Mounts {
- if v92 > 0 {
+ for v95, v96 := range in.Mounts {
+ if v95 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo4(out, v93)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo4(out, v96)
}
out.RawByte(']')
}
@@ -2912,16 +2954,16 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('{')
- v94First := true
- for v94Name, v94Value := range in.Annotations {
- if v94First {
- v94First = false
+ v97First := true
+ for v97Name, v97Value := range in.Annotations {
+ if v97First {
+ v97First = false
} else {
out.RawByte(',')
}
- out.String(string(v94Name))
+ out.String(string(v97Name))
out.RawByte(':')
- out.String(string(v94Value))
+ out.String(string(v97Value))
}
out.RawByte('}')
}
@@ -3263,9 +3305,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.LayerFolders = (out.LayerFolders)[:0]
}
for !in.IsDelim(']') {
- var v95 string
- v95 = string(in.String())
- out.LayerFolders = append(out.LayerFolders, v95)
+ var v98 string
+ v98 = string(in.String())
+ out.LayerFolders = append(out.LayerFolders, v98)
in.WantComma()
}
in.Delim(']')
@@ -3286,9 +3328,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Devices = (out.Devices)[:0]
}
for !in.IsDelim(']') {
- var v96 specs_go.WindowsDevice
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo13(in, &v96)
- out.Devices = append(out.Devices, v96)
+ var v99 specs_go.WindowsDevice
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo13(in, &v99)
+ out.Devices = append(out.Devices, v99)
in.WantComma()
}
in.Delim(']')
@@ -3361,11 +3403,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
out.RawString("null")
} else {
out.RawByte('[')
- for v97, v98 := range in.LayerFolders {
- if v97 > 0 {
+ for v100, v101 := range in.LayerFolders {
+ if v100 > 0 {
out.RawByte(',')
}
- out.String(string(v98))
+ out.String(string(v101))
}
out.RawByte(']')
}
@@ -3380,11 +3422,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v99, v100 := range in.Devices {
- if v99 > 0 {
+ for v102, v103 := range in.Devices {
+ if v102 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo13(out, v100)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo13(out, v103)
}
out.RawByte(']')
}
@@ -3492,9 +3534,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.EndpointList = (out.EndpointList)[:0]
}
for !in.IsDelim(']') {
- var v101 string
- v101 = string(in.String())
- out.EndpointList = append(out.EndpointList, v101)
+ var v104 string
+ v104 = string(in.String())
+ out.EndpointList = append(out.EndpointList, v104)
in.WantComma()
}
in.Delim(']')
@@ -3517,9 +3559,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.DNSSearchList = (out.DNSSearchList)[:0]
}
for !in.IsDelim(']') {
- var v102 string
- v102 = string(in.String())
- out.DNSSearchList = append(out.DNSSearchList, v102)
+ var v105 string
+ v105 = string(in.String())
+ out.DNSSearchList = append(out.DNSSearchList, v105)
in.WantComma()
}
in.Delim(']')
@@ -3550,11 +3592,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v103, v104 := range in.EndpointList {
- if v103 > 0 {
+ for v106, v107 := range in.EndpointList {
+ if v106 > 0 {
out.RawByte(',')
}
- out.String(string(v104))
+ out.String(string(v107))
}
out.RawByte(']')
}
@@ -3579,11 +3621,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v105, v106 := range in.DNSSearchList {
- if v105 > 0 {
+ for v108, v109 := range in.DNSSearchList {
+ if v108 > 0 {
out.RawByte(',')
}
- out.String(string(v106))
+ out.String(string(v109))
}
out.RawByte(']')
}
@@ -4087,9 +4129,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Anet = (out.Anet)[:0]
}
for !in.IsDelim(']') {
- var v107 specs_go.SolarisAnet
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo20(in, &v107)
- out.Anet = append(out.Anet, v107)
+ var v110 specs_go.SolarisAnet
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo20(in, &v110)
+ out.Anet = append(out.Anet, v110)
in.WantComma()
}
in.Delim(']')
@@ -4168,11 +4210,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v108, v109 := range in.Anet {
- if v108 > 0 {
+ for v111, v112 := range in.Anet {
+ if v111 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo20(out, v109)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo20(out, v112)
}
out.RawByte(']')
}
@@ -4459,9 +4501,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.UIDMappings = (out.UIDMappings)[:0]
}
for !in.IsDelim(']') {
- var v110 specs_go.LinuxIDMapping
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo23(in, &v110)
- out.UIDMappings = append(out.UIDMappings, v110)
+ var v113 specs_go.LinuxIDMapping
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo23(in, &v113)
+ out.UIDMappings = append(out.UIDMappings, v113)
in.WantComma()
}
in.Delim(']')
@@ -4482,9 +4524,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.GIDMappings = (out.GIDMappings)[:0]
}
for !in.IsDelim(']') {
- var v111 specs_go.LinuxIDMapping
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo23(in, &v111)
- out.GIDMappings = append(out.GIDMappings, v111)
+ var v114 specs_go.LinuxIDMapping
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo23(in, &v114)
+ out.GIDMappings = append(out.GIDMappings, v114)
in.WantComma()
}
in.Delim(']')
@@ -4502,9 +4544,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
for !in.IsDelim('}') {
key := string(in.String())
in.WantColon()
- var v112 string
- v112 = string(in.String())
- (out.Sysctl)[key] = v112
+ var v115 string
+ v115 = string(in.String())
+ (out.Sysctl)[key] = v115
in.WantComma()
}
in.Delim('}')
@@ -4537,9 +4579,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Namespaces = (out.Namespaces)[:0]
}
for !in.IsDelim(']') {
- var v113 specs_go.LinuxNamespace
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo25(in, &v113)
- out.Namespaces = append(out.Namespaces, v113)
+ var v116 specs_go.LinuxNamespace
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo25(in, &v116)
+ out.Namespaces = append(out.Namespaces, v116)
in.WantComma()
}
in.Delim(']')
@@ -4560,9 +4602,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Devices = (out.Devices)[:0]
}
for !in.IsDelim(']') {
- var v114 specs_go.LinuxDevice
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo26(in, &v114)
- out.Devices = append(out.Devices, v114)
+ var v117 specs_go.LinuxDevice
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo26(in, &v117)
+ out.Devices = append(out.Devices, v117)
in.WantComma()
}
in.Delim(']')
@@ -4595,9 +4637,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.MaskedPaths = (out.MaskedPaths)[:0]
}
for !in.IsDelim(']') {
- var v115 string
- v115 = string(in.String())
- out.MaskedPaths = append(out.MaskedPaths, v115)
+ var v118 string
+ v118 = string(in.String())
+ out.MaskedPaths = append(out.MaskedPaths, v118)
in.WantComma()
}
in.Delim(']')
@@ -4618,9 +4660,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.ReadonlyPaths = (out.ReadonlyPaths)[:0]
}
for !in.IsDelim(']') {
- var v116 string
- v116 = string(in.String())
- out.ReadonlyPaths = append(out.ReadonlyPaths, v116)
+ var v119 string
+ v119 = string(in.String())
+ out.ReadonlyPaths = append(out.ReadonlyPaths, v119)
in.WantComma()
}
in.Delim(']')
@@ -4661,11 +4703,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v117, v118 := range in.UIDMappings {
- if v117 > 0 {
+ for v120, v121 := range in.UIDMappings {
+ if v120 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo23(out, v118)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo23(out, v121)
}
out.RawByte(']')
}
@@ -4680,11 +4722,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v119, v120 := range in.GIDMappings {
- if v119 > 0 {
+ for v122, v123 := range in.GIDMappings {
+ if v122 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo23(out, v120)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo23(out, v123)
}
out.RawByte(']')
}
@@ -4699,16 +4741,16 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('{')
- v121First := true
- for v121Name, v121Value := range in.Sysctl {
- if v121First {
- v121First = false
+ v124First := true
+ for v124Name, v124Value := range in.Sysctl {
+ if v124First {
+ v124First = false
} else {
out.RawByte(',')
}
- out.String(string(v121Name))
+ out.String(string(v124Name))
out.RawByte(':')
- out.String(string(v121Value))
+ out.String(string(v124Value))
}
out.RawByte('}')
}
@@ -4743,11 +4785,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v122, v123 := range in.Namespaces {
- if v122 > 0 {
+ for v125, v126 := range in.Namespaces {
+ if v125 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo25(out, v123)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo25(out, v126)
}
out.RawByte(']')
}
@@ -4762,11 +4804,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v124, v125 := range in.Devices {
- if v124 > 0 {
+ for v127, v128 := range in.Devices {
+ if v127 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo26(out, v125)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo26(out, v128)
}
out.RawByte(']')
}
@@ -4801,11 +4843,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v126, v127 := range in.MaskedPaths {
- if v126 > 0 {
+ for v129, v130 := range in.MaskedPaths {
+ if v129 > 0 {
out.RawByte(',')
}
- out.String(string(v127))
+ out.String(string(v130))
}
out.RawByte(']')
}
@@ -4820,11 +4862,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v128, v129 := range in.ReadonlyPaths {
- if v128 > 0 {
+ for v131, v132 := range in.ReadonlyPaths {
+ if v131 > 0 {
out.RawByte(',')
}
- out.String(string(v129))
+ out.String(string(v132))
}
out.RawByte(']')
}
@@ -4935,9 +4977,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Architectures = (out.Architectures)[:0]
}
for !in.IsDelim(']') {
- var v130 specs_go.Arch
- v130 = specs_go.Arch(in.String())
- out.Architectures = append(out.Architectures, v130)
+ var v133 specs_go.Arch
+ v133 = specs_go.Arch(in.String())
+ out.Architectures = append(out.Architectures, v133)
in.WantComma()
}
in.Delim(']')
@@ -4958,9 +5000,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Syscalls = (out.Syscalls)[:0]
}
for !in.IsDelim(']') {
- var v131 specs_go.LinuxSyscall
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo29(in, &v131)
- out.Syscalls = append(out.Syscalls, v131)
+ var v134 specs_go.LinuxSyscall
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo29(in, &v134)
+ out.Syscalls = append(out.Syscalls, v134)
in.WantComma()
}
in.Delim(']')
@@ -4999,11 +5041,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v132, v133 := range in.Architectures {
- if v132 > 0 {
+ for v135, v136 := range in.Architectures {
+ if v135 > 0 {
out.RawByte(',')
}
- out.String(string(v133))
+ out.String(string(v136))
}
out.RawByte(']')
}
@@ -5018,11 +5060,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v134, v135 := range in.Syscalls {
- if v134 > 0 {
+ for v137, v138 := range in.Syscalls {
+ if v137 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo29(out, v135)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo29(out, v138)
}
out.RawByte(']')
}
@@ -5064,9 +5106,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Names = (out.Names)[:0]
}
for !in.IsDelim(']') {
- var v136 string
- v136 = string(in.String())
- out.Names = append(out.Names, v136)
+ var v139 string
+ v139 = string(in.String())
+ out.Names = append(out.Names, v139)
in.WantComma()
}
in.Delim(']')
@@ -5089,9 +5131,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Args = (out.Args)[:0]
}
for !in.IsDelim(']') {
- var v137 specs_go.LinuxSeccompArg
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo30(in, &v137)
- out.Args = append(out.Args, v137)
+ var v140 specs_go.LinuxSeccompArg
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo30(in, &v140)
+ out.Args = append(out.Args, v140)
in.WantComma()
}
in.Delim(']')
@@ -5122,11 +5164,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
out.RawString("null")
} else {
out.RawByte('[')
- for v138, v139 := range in.Names {
- if v138 > 0 {
+ for v141, v142 := range in.Names {
+ if v141 > 0 {
out.RawByte(',')
}
- out.String(string(v139))
+ out.String(string(v142))
}
out.RawByte(']')
}
@@ -5151,11 +5193,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v140, v141 := range in.Args {
- if v140 > 0 {
+ for v143, v144 := range in.Args {
+ if v143 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo30(out, v141)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo30(out, v144)
}
out.RawByte(']')
}
@@ -5482,9 +5524,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Devices = (out.Devices)[:0]
}
for !in.IsDelim(']') {
- var v142 specs_go.LinuxDeviceCgroup
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo31(in, &v142)
- out.Devices = append(out.Devices, v142)
+ var v145 specs_go.LinuxDeviceCgroup
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo31(in, &v145)
+ out.Devices = append(out.Devices, v145)
in.WantComma()
}
in.Delim(']')
@@ -5545,9 +5587,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.HugepageLimits = (out.HugepageLimits)[:0]
}
for !in.IsDelim(']') {
- var v143 specs_go.LinuxHugepageLimit
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo36(in, &v143)
- out.HugepageLimits = append(out.HugepageLimits, v143)
+ var v146 specs_go.LinuxHugepageLimit
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo36(in, &v146)
+ out.HugepageLimits = append(out.HugepageLimits, v146)
in.WantComma()
}
in.Delim(']')
@@ -5575,9 +5617,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
for !in.IsDelim('}') {
key := string(in.String())
in.WantColon()
- var v144 specs_go.LinuxRdma
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo38(in, &v144)
- (out.Rdma)[key] = v144
+ var v147 specs_go.LinuxRdma
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo38(in, &v147)
+ (out.Rdma)[key] = v147
in.WantComma()
}
in.Delim('}')
@@ -5606,11 +5648,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v145, v146 := range in.Devices {
- if v145 > 0 {
+ for v148, v149 := range in.Devices {
+ if v148 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo31(out, v146)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo31(out, v149)
}
out.RawByte(']')
}
@@ -5665,11 +5707,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v147, v148 := range in.HugepageLimits {
- if v147 > 0 {
+ for v150, v151 := range in.HugepageLimits {
+ if v150 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo36(out, v148)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo36(out, v151)
}
out.RawByte(']')
}
@@ -5694,16 +5736,16 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('{')
- v149First := true
- for v149Name, v149Value := range in.Rdma {
- if v149First {
- v149First = false
+ v152First := true
+ for v152Name, v152Value := range in.Rdma {
+ if v152First {
+ v152First = false
} else {
out.RawByte(',')
}
- out.String(string(v149Name))
+ out.String(string(v152Name))
out.RawByte(':')
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo38(out, v149Value)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo38(out, v152Value)
}
out.RawByte('}')
}
@@ -5830,9 +5872,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Priorities = (out.Priorities)[:0]
}
for !in.IsDelim(']') {
- var v150 specs_go.LinuxInterfacePriority
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo39(in, &v150)
- out.Priorities = append(out.Priorities, v150)
+ var v153 specs_go.LinuxInterfacePriority
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo39(in, &v153)
+ out.Priorities = append(out.Priorities, v153)
in.WantComma()
}
in.Delim(']')
@@ -5871,11 +5913,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v151, v152 := range in.Priorities {
- if v151 > 0 {
+ for v154, v155 := range in.Priorities {
+ if v154 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo39(out, v152)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo39(out, v155)
}
out.RawByte(']')
}
@@ -6055,9 +6097,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.WeightDevice = (out.WeightDevice)[:0]
}
for !in.IsDelim(']') {
- var v153 specs_go.LinuxWeightDevice
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo40(in, &v153)
- out.WeightDevice = append(out.WeightDevice, v153)
+ var v156 specs_go.LinuxWeightDevice
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo40(in, &v156)
+ out.WeightDevice = append(out.WeightDevice, v156)
in.WantComma()
}
in.Delim(']')
@@ -6078,9 +6120,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.ThrottleReadBpsDevice = (out.ThrottleReadBpsDevice)[:0]
}
for !in.IsDelim(']') {
- var v154 specs_go.LinuxThrottleDevice
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(in, &v154)
- out.ThrottleReadBpsDevice = append(out.ThrottleReadBpsDevice, v154)
+ var v157 specs_go.LinuxThrottleDevice
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(in, &v157)
+ out.ThrottleReadBpsDevice = append(out.ThrottleReadBpsDevice, v157)
in.WantComma()
}
in.Delim(']')
@@ -6101,9 +6143,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.ThrottleWriteBpsDevice = (out.ThrottleWriteBpsDevice)[:0]
}
for !in.IsDelim(']') {
- var v155 specs_go.LinuxThrottleDevice
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(in, &v155)
- out.ThrottleWriteBpsDevice = append(out.ThrottleWriteBpsDevice, v155)
+ var v158 specs_go.LinuxThrottleDevice
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(in, &v158)
+ out.ThrottleWriteBpsDevice = append(out.ThrottleWriteBpsDevice, v158)
in.WantComma()
}
in.Delim(']')
@@ -6124,9 +6166,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.ThrottleReadIOPSDevice = (out.ThrottleReadIOPSDevice)[:0]
}
for !in.IsDelim(']') {
- var v156 specs_go.LinuxThrottleDevice
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(in, &v156)
- out.ThrottleReadIOPSDevice = append(out.ThrottleReadIOPSDevice, v156)
+ var v159 specs_go.LinuxThrottleDevice
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(in, &v159)
+ out.ThrottleReadIOPSDevice = append(out.ThrottleReadIOPSDevice, v159)
in.WantComma()
}
in.Delim(']')
@@ -6147,9 +6189,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.ThrottleWriteIOPSDevice = (out.ThrottleWriteIOPSDevice)[:0]
}
for !in.IsDelim(']') {
- var v157 specs_go.LinuxThrottleDevice
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(in, &v157)
- out.ThrottleWriteIOPSDevice = append(out.ThrottleWriteIOPSDevice, v157)
+ var v160 specs_go.LinuxThrottleDevice
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(in, &v160)
+ out.ThrottleWriteIOPSDevice = append(out.ThrottleWriteIOPSDevice, v160)
in.WantComma()
}
in.Delim(']')
@@ -6198,11 +6240,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v158, v159 := range in.WeightDevice {
- if v158 > 0 {
+ for v161, v162 := range in.WeightDevice {
+ if v161 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo40(out, v159)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo40(out, v162)
}
out.RawByte(']')
}
@@ -6217,11 +6259,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v160, v161 := range in.ThrottleReadBpsDevice {
- if v160 > 0 {
+ for v163, v164 := range in.ThrottleReadBpsDevice {
+ if v163 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(out, v161)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(out, v164)
}
out.RawByte(']')
}
@@ -6236,11 +6278,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v162, v163 := range in.ThrottleWriteBpsDevice {
- if v162 > 0 {
+ for v165, v166 := range in.ThrottleWriteBpsDevice {
+ if v165 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(out, v163)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(out, v166)
}
out.RawByte(']')
}
@@ -6255,11 +6297,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v164, v165 := range in.ThrottleReadIOPSDevice {
- if v164 > 0 {
+ for v167, v168 := range in.ThrottleReadIOPSDevice {
+ if v167 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(out, v165)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(out, v168)
}
out.RawByte(']')
}
@@ -6274,11 +6316,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v166, v167 := range in.ThrottleWriteIOPSDevice {
- if v166 > 0 {
+ for v169, v170 := range in.ThrottleWriteIOPSDevice {
+ if v169 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(out, v167)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo41(out, v170)
}
out.RawByte(']')
}
@@ -7053,9 +7095,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Prestart = (out.Prestart)[:0]
}
for !in.IsDelim(']') {
- var v168 specs_go.Hook
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(in, &v168)
- out.Prestart = append(out.Prestart, v168)
+ var v171 specs_go.Hook
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(in, &v171)
+ out.Prestart = append(out.Prestart, v171)
in.WantComma()
}
in.Delim(']')
@@ -7076,9 +7118,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Poststart = (out.Poststart)[:0]
}
for !in.IsDelim(']') {
- var v169 specs_go.Hook
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(in, &v169)
- out.Poststart = append(out.Poststart, v169)
+ var v172 specs_go.Hook
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(in, &v172)
+ out.Poststart = append(out.Poststart, v172)
in.WantComma()
}
in.Delim(']')
@@ -7099,9 +7141,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Poststop = (out.Poststop)[:0]
}
for !in.IsDelim(']') {
- var v170 specs_go.Hook
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(in, &v170)
- out.Poststop = append(out.Poststop, v170)
+ var v173 specs_go.Hook
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(in, &v173)
+ out.Poststop = append(out.Poststop, v173)
in.WantComma()
}
in.Delim(']')
@@ -7130,11 +7172,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v171, v172 := range in.Prestart {
- if v171 > 0 {
+ for v174, v175 := range in.Prestart {
+ if v174 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(out, v172)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(out, v175)
}
out.RawByte(']')
}
@@ -7149,11 +7191,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v173, v174 := range in.Poststart {
- if v173 > 0 {
+ for v176, v177 := range in.Poststart {
+ if v176 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(out, v174)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(out, v177)
}
out.RawByte(']')
}
@@ -7168,11 +7210,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v175, v176 := range in.Poststop {
- if v175 > 0 {
+ for v178, v179 := range in.Poststop {
+ if v178 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(out, v176)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo(out, v179)
}
out.RawByte(']')
}
@@ -7220,9 +7262,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Options = (out.Options)[:0]
}
for !in.IsDelim(']') {
- var v177 string
- v177 = string(in.String())
- out.Options = append(out.Options, v177)
+ var v180 string
+ v180 = string(in.String())
+ out.Options = append(out.Options, v180)
in.WantComma()
}
in.Delim(']')
@@ -7281,11 +7323,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v178, v179 := range in.Options {
- if v178 > 0 {
+ for v181, v182 := range in.Options {
+ if v181 > 0 {
out.RawByte(',')
}
- out.String(string(v179))
+ out.String(string(v182))
}
out.RawByte(']')
}
@@ -7400,9 +7442,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Args = (out.Args)[:0]
}
for !in.IsDelim(']') {
- var v180 string
- v180 = string(in.String())
- out.Args = append(out.Args, v180)
+ var v183 string
+ v183 = string(in.String())
+ out.Args = append(out.Args, v183)
in.WantComma()
}
in.Delim(']')
@@ -7423,9 +7465,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Env = (out.Env)[:0]
}
for !in.IsDelim(']') {
- var v181 string
- v181 = string(in.String())
- out.Env = append(out.Env, v181)
+ var v184 string
+ v184 = string(in.String())
+ out.Env = append(out.Env, v184)
in.WantComma()
}
in.Delim(']')
@@ -7458,9 +7500,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Rlimits = (out.Rlimits)[:0]
}
for !in.IsDelim(']') {
- var v182 specs_go.POSIXRlimit
- easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo45(in, &v182)
- out.Rlimits = append(out.Rlimits, v182)
+ var v185 specs_go.POSIXRlimit
+ easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo45(in, &v185)
+ out.Rlimits = append(out.Rlimits, v185)
in.WantComma()
}
in.Delim(']')
@@ -7537,11 +7579,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
out.RawString("null")
} else {
out.RawByte('[')
- for v183, v184 := range in.Args {
- if v183 > 0 {
+ for v186, v187 := range in.Args {
+ if v186 > 0 {
out.RawByte(',')
}
- out.String(string(v184))
+ out.String(string(v187))
}
out.RawByte(']')
}
@@ -7556,11 +7598,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v185, v186 := range in.Env {
- if v185 > 0 {
+ for v188, v189 := range in.Env {
+ if v188 > 0 {
out.RawByte(',')
}
- out.String(string(v186))
+ out.String(string(v189))
}
out.RawByte(']')
}
@@ -7595,11 +7637,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v187, v188 := range in.Rlimits {
- if v187 > 0 {
+ for v190, v191 := range in.Rlimits {
+ if v190 > 0 {
out.RawByte(',')
}
- easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo45(out, v188)
+ easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainersRuntimeSpecSpecsGo45(out, v191)
}
out.RawByte(']')
}
@@ -7752,9 +7794,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Bounding = (out.Bounding)[:0]
}
for !in.IsDelim(']') {
- var v189 string
- v189 = string(in.String())
- out.Bounding = append(out.Bounding, v189)
+ var v192 string
+ v192 = string(in.String())
+ out.Bounding = append(out.Bounding, v192)
in.WantComma()
}
in.Delim(']')
@@ -7775,9 +7817,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Effective = (out.Effective)[:0]
}
for !in.IsDelim(']') {
- var v190 string
- v190 = string(in.String())
- out.Effective = append(out.Effective, v190)
+ var v193 string
+ v193 = string(in.String())
+ out.Effective = append(out.Effective, v193)
in.WantComma()
}
in.Delim(']')
@@ -7798,9 +7840,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Inheritable = (out.Inheritable)[:0]
}
for !in.IsDelim(']') {
- var v191 string
- v191 = string(in.String())
- out.Inheritable = append(out.Inheritable, v191)
+ var v194 string
+ v194 = string(in.String())
+ out.Inheritable = append(out.Inheritable, v194)
in.WantComma()
}
in.Delim(']')
@@ -7821,9 +7863,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Permitted = (out.Permitted)[:0]
}
for !in.IsDelim(']') {
- var v192 string
- v192 = string(in.String())
- out.Permitted = append(out.Permitted, v192)
+ var v195 string
+ v195 = string(in.String())
+ out.Permitted = append(out.Permitted, v195)
in.WantComma()
}
in.Delim(']')
@@ -7844,9 +7886,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.Ambient = (out.Ambient)[:0]
}
for !in.IsDelim(']') {
- var v193 string
- v193 = string(in.String())
- out.Ambient = append(out.Ambient, v193)
+ var v196 string
+ v196 = string(in.String())
+ out.Ambient = append(out.Ambient, v196)
in.WantComma()
}
in.Delim(']')
@@ -7875,11 +7917,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v194, v195 := range in.Bounding {
- if v194 > 0 {
+ for v197, v198 := range in.Bounding {
+ if v197 > 0 {
out.RawByte(',')
}
- out.String(string(v195))
+ out.String(string(v198))
}
out.RawByte(']')
}
@@ -7894,11 +7936,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v196, v197 := range in.Effective {
- if v196 > 0 {
+ for v199, v200 := range in.Effective {
+ if v199 > 0 {
out.RawByte(',')
}
- out.String(string(v197))
+ out.String(string(v200))
}
out.RawByte(']')
}
@@ -7913,11 +7955,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v198, v199 := range in.Inheritable {
- if v198 > 0 {
+ for v201, v202 := range in.Inheritable {
+ if v201 > 0 {
out.RawByte(',')
}
- out.String(string(v199))
+ out.String(string(v202))
}
out.RawByte(']')
}
@@ -7932,11 +7974,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v200, v201 := range in.Permitted {
- if v200 > 0 {
+ for v203, v204 := range in.Permitted {
+ if v203 > 0 {
out.RawByte(',')
}
- out.String(string(v201))
+ out.String(string(v204))
}
out.RawByte(']')
}
@@ -7951,11 +7993,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v202, v203 := range in.Ambient {
- if v202 > 0 {
+ for v205, v206 := range in.Ambient {
+ if v205 > 0 {
out.RawByte(',')
}
- out.String(string(v203))
+ out.String(string(v206))
}
out.RawByte(']')
}
@@ -8001,9 +8043,9 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComOpencontainer
out.AdditionalGids = (out.AdditionalGids)[:0]
}
for !in.IsDelim(']') {
- var v204 uint32
- v204 = uint32(in.Uint32())
- out.AdditionalGids = append(out.AdditionalGids, v204)
+ var v207 uint32
+ v207 = uint32(in.Uint32())
+ out.AdditionalGids = append(out.AdditionalGids, v207)
in.WantComma()
}
in.Delim(']')
@@ -8054,11 +8096,11 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodVendorGithubComOpencontainer
}
{
out.RawByte('[')
- for v205, v206 := range in.AdditionalGids {
- if v205 > 0 {
+ for v208, v209 := range in.AdditionalGids {
+ if v208 > 0 {
out.RawByte(',')
}
- out.Uint32(uint32(v206))
+ out.Uint32(uint32(v209))
}
out.RawByte(']')
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index f2e54aeef..9b07198bc 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -79,9 +79,9 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
Name: config.Name,
Driver: driverData.Name,
MountLabel: config.MountLabel,
+ ProcessLabel: config.ProcessLabel,
EffectiveCaps: spec.Process.Capabilities.Effective,
BoundingCaps: spec.Process.Capabilities.Bounding,
- ProcessLabel: spec.Process.SelinuxLabel,
AppArmorProfile: spec.Process.ApparmorProfile,
ExecIDs: execIDs,
GraphDriver: driverData,
@@ -93,6 +93,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
HairpinMode: false, // TODO
LinkLocalIPv6Address: "", // TODO - do we even support IPv6?
LinkLocalIPv6PrefixLen: 0, // TODO - do we even support IPv6?
+
Ports: []ocicni.PortMapping{}, // TODO - maybe worth it to put this in Docker format?
SandboxKey: "", // Network namespace path
SecondaryIPAddresses: nil, // TODO - do we support this?
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 40159194f..cb6b940fd 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -194,12 +194,18 @@ func (c *Container) setupStorage(ctx context.Context) error {
return errors.Wrapf(ErrInvalidArg, "must provide image ID and image name to use an image")
}
- var options *storage.ContainerOptions
- if c.config.Rootfs == "" {
- options = &storage.ContainerOptions{c.config.IDMappings}
+ options := storage.ContainerOptions{
+ IDMappingOptions: storage.IDMappingOptions{
+ HostUIDMapping: true,
+ HostGIDMapping: true,
+ },
+ LabelOpts: c.config.LabelOpts,
+ }
+ if c.config.Rootfs == "" {
+ options.IDMappingOptions = c.config.IDMappings
}
- containerInfo, err := c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, c.config.MountLabel, options)
+ containerInfo, err := c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, options)
if err != nil {
return errors.Wrapf(err, "error creating container storage")
}
@@ -225,6 +231,8 @@ func (c *Container) setupStorage(ctx context.Context) error {
}
}
+ c.config.ProcessLabel = containerInfo.ProcessLabel
+ c.config.MountLabel = containerInfo.MountLabel
c.config.StaticDir = containerInfo.Dir
c.state.RunDir = containerInfo.RunDir
c.state.DestinationRunDir = c.state.RunDir
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 9920efd55..b25645e5c 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -18,6 +18,7 @@ import (
cnitypes "github.com/containernetworking/cni/pkg/types/current"
crioAnnotations "github.com/containers/libpod/pkg/annotations"
"github.com/containers/libpod/pkg/chrootuser"
+ "github.com/containers/libpod/pkg/criu"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage/pkg/idtools"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -368,6 +369,10 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
+ if !criu.CheckForCriu() {
+ return errors.Errorf("checkpointing a container requires at least CRIU %d", criu.MinCriuVersion)
+ }
+
if c.state.State != ContainerStateRunning {
return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
@@ -407,6 +412,10 @@ func (c *Container) checkpoint(ctx context.Context, keep bool) (err error) {
func (c *Container) restore(ctx context.Context, keep bool) (err error) {
+ if !criu.CheckForCriu() {
+ return errors.Errorf("restoring a container requires at least CRIU %d", criu.MinCriuVersion)
+ }
+
if (c.state.State != ContainerStateConfigured) && (c.state.State != ContainerStateExited) {
return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
}
diff --git a/libpod/oci.go b/libpod/oci.go
index f6d320017..ca8f967c4 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "bufio"
"bytes"
"encoding/json"
"fmt"
@@ -17,6 +18,7 @@ import (
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/util"
"github.com/coreos/go-systemd/activation"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -230,7 +232,7 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreContainer bool) (err error) {
var stderrBuf bytes.Buffer
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return err
}
@@ -377,6 +379,7 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
childPipe.Close()
return err
}
+ defer cmd.Wait()
// We don't need childPipe on the parent side
childPipe.Close()
@@ -416,7 +419,12 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
ch := make(chan syncStruct)
go func() {
var si *syncInfo
- if err = json.NewDecoder(parentPipe).Decode(&si); err != nil {
+ rdr := bufio.NewReader(parentPipe)
+ b, err := rdr.ReadBytes('\n')
+ if err != nil {
+ ch <- syncStruct{err: err}
+ }
+ if err := json.Unmarshal(b, &si); err != nil {
ch <- syncStruct{err: err}
return
}
@@ -446,7 +454,7 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
func (r *OCIRuntime) updateContainerStatus(ctr *Container) error {
state := new(spec.State)
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return err
}
@@ -477,6 +485,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container) error {
}
return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
}
+ defer cmd.Wait()
errPipe.Close()
out, err := ioutil.ReadAll(outPipe)
@@ -556,7 +565,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container) error {
// Sets time the container was started, but does not save it.
func (r *OCIRuntime) startContainer(ctr *Container) error {
// TODO: streams should probably *not* be our STDIN/OUT/ERR - redirect to buffers?
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return err
}
@@ -573,7 +582,7 @@ func (r *OCIRuntime) startContainer(ctr *Container) error {
// killContainer sends the given signal to the given container
func (r *OCIRuntime) killContainer(ctr *Container, signal uint) error {
logrus.Debugf("Sending signal %d to container %s", signal, ctr.ID())
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return err
}
@@ -636,7 +645,7 @@ func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error {
args = []string{"kill", "--all", ctr.ID(), "KILL"}
}
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return err
}
@@ -667,7 +676,7 @@ func (r *OCIRuntime) deleteContainer(ctr *Container) error {
// pauseContainer pauses the given container
func (r *OCIRuntime) pauseContainer(ctr *Container) error {
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return err
}
@@ -677,7 +686,7 @@ func (r *OCIRuntime) pauseContainer(ctr *Container) error {
// unpauseContainer unpauses the given container
func (r *OCIRuntime) unpauseContainer(ctr *Container) error {
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return err
}
@@ -698,7 +707,7 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
return nil, errors.Wrapf(ErrEmptyID, "must provide a session ID for exec")
}
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return nil, err
}
@@ -780,7 +789,7 @@ func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error {
if len(execSessions) == 0 {
return nil
}
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return err
}
diff --git a/libpod/options.go b/libpod/options.go
index 228b38ba5..8d044313b 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -391,11 +391,7 @@ func WithSecLabels(labelOpts []string) CtrCreateOption {
if ctr.valid {
return ErrCtrFinalized
}
- var err error
- ctr.config.ProcessLabel, ctr.config.MountLabel, err = ctr.runtime.initLabels(labelOpts)
- if err != nil {
- return errors.Wrapf(err, "failed to init labels")
- }
+ ctr.config.LabelOpts = labelOpts
return nil
}
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 985af2849..1b26f851f 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -1,13 +1,11 @@
package libpod
import (
- "fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sync"
- "syscall"
"github.com/BurntSushi/toml"
is "github.com/containers/image/storage"
@@ -17,6 +15,7 @@ import (
"github.com/containers/libpod/pkg/hooks"
sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/docker/pkg/namesgenerator"
@@ -215,46 +214,12 @@ var (
}
)
-// GetRootlessRuntimeDir returns the runtime directory when running as non root
-func GetRootlessRuntimeDir() (string, error) {
- runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
- uid := fmt.Sprintf("%d", rootless.GetRootlessUID())
- if runtimeDir == "" {
- tmpDir := filepath.Join("/run", "user", uid)
- os.MkdirAll(tmpDir, 0700)
- st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Getuid() && st.Mode().Perm() == 0700 {
- runtimeDir = tmpDir
- }
- }
- if runtimeDir == "" {
- tmpDir := filepath.Join(os.TempDir(), "user", uid)
- os.MkdirAll(tmpDir, 0700)
- st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Getuid() && st.Mode().Perm() == 0700 {
- runtimeDir = tmpDir
- }
- }
- if runtimeDir == "" {
- home := os.Getenv("HOME")
- if home == "" {
- return "", fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
- }
- resolvedHome, err := filepath.EvalSymlinks(home)
- if err != nil {
- return "", errors.Wrapf(err, "cannot resolve %s", home)
- }
- runtimeDir = filepath.Join(resolvedHome, "rundir")
- }
- return runtimeDir, nil
-}
-
func getDefaultTmpDir() (string, error) {
if !rootless.IsRootless() {
return "/var/run/libpod", nil
}
- rootlessRuntimeDir, err := GetRootlessRuntimeDir()
+ rootlessRuntimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return "", err
}
@@ -269,7 +234,7 @@ func SetXdgRuntimeDir(val string) error {
}
if val == "" {
var err error
- val, err = GetRootlessRuntimeDir()
+ val, err = util.GetRootlessRuntimeDir()
if err != nil {
return err
}
@@ -309,7 +274,7 @@ func NewRuntime(options ...RuntimeOption) (runtime *Runtime, err error) {
foundConfig = false
}
- runtimeDir, err := GetRootlessRuntimeDir()
+ runtimeDir, err := util.GetRootlessRuntimeDir()
if err != nil {
return nil, err
}
@@ -543,7 +508,7 @@ func makeRuntime(runtime *Runtime) (err error) {
// Set up a firewall backend
backendType := ""
- if os.Geteuid() != 0 {
+ if rootless.IsRootless() {
backendType = "none"
}
fwBackend, err := firewall.GetBackend(backendType)
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 4256a84a0..b63726f29 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/ulule/deepcopier"
@@ -329,10 +328,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
}
- if r.config.EnableLabeling {
- label.ReleaseLabel(c.ProcessLabel())
- r.reserveLabels()
- }
// Delete the container.
// Not needed in Configured and Exited states, where the container
// doesn't exist in the runtime
@@ -467,28 +462,3 @@ func (r *Runtime) GetLatestContainer() (*Container, error) {
}
return ctrs[lastCreatedIndex], nil
}
-
-// reserveLabels walks the list o fcontainers and reserves the label, so new containers will not
-// get them.
-// TODO Performance wise this should only run if the state has changed since the last time it was run.
-func (r *Runtime) reserveLabels() error {
- containers, err := r.state.AllContainers()
- if err != nil {
- return err
- }
- for _, ctr := range containers {
- label.ReserveLabel(ctr.ProcessLabel())
- }
- return nil
-}
-
-// initLabels allocates an new label to return to the caller
-func (r *Runtime) initLabels(labelOpts []string) (string, string, error) {
- if !r.config.EnableLabeling {
- return "", "", nil
- }
- if err := r.reserveLabels(); err != nil {
- return "", "", errors.Wrapf(err, "unable to reserve labels")
- }
- return label.InitLabels(labelOpts)
-}
diff --git a/libpod/storage.go b/libpod/storage.go
index 10827f13e..10026efda 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -27,10 +27,13 @@ func getStorageService(store storage.Store) (*storageService, error) {
// of its nonvolatile and volatile per-container directories, along with a copy
// of the configuration blob from the image that was used to create the
// container, if the image had a configuration.
+// It also returns the ProcessLabel and MountLabel selected for the container
type ContainerInfo struct {
- Dir string
- RunDir string
- Config *v1.Image
+ Dir string
+ RunDir string
+ Config *v1.Image
+ ProcessLabel string
+ MountLabel string
}
// RuntimeContainerMetadata is the structure that we encode as JSON and store
@@ -59,7 +62,7 @@ func (metadata *RuntimeContainerMetadata) SetMountLabel(mountLabel string) {
// CreateContainerStorage creates the storage end of things. We already have the container spec created
// TO-DO We should be passing in an Image object in the future.
-func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID, mountLabel string, options *storage.ContainerOptions) (cinfo ContainerInfo, err error) {
+func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID string, options storage.ContainerOptions) (cinfo ContainerInfo, err error) {
var imageConfig *v1.Image
if imageName != "" {
var ref types.ImageReference
@@ -101,7 +104,6 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte
ImageID: imageID,
ContainerName: containerName,
CreatedAt: time.Now().Unix(),
- MountLabel: mountLabel,
}
mdata, err := json.Marshal(&metadata)
if err != nil {
@@ -111,15 +113,7 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte
// Build the container.
names := []string{containerName}
- if options == nil {
- options = &storage.ContainerOptions{
- IDMappingOptions: storage.IDMappingOptions{
- HostUIDMapping: true,
- HostGIDMapping: true,
- },
- }
- }
- container, err := r.store.CreateContainer(containerID, names, imageID, "", string(mdata), options)
+ container, err := r.store.CreateContainer(containerID, names, imageID, "", string(mdata), &options)
if err != nil {
logrus.Debugf("failed to create container %s(%s): %v", metadata.ContainerName, containerID, err)
@@ -167,9 +161,11 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte
logrus.Debugf("container %q has run directory %q", container.ID, containerRunDir)
return ContainerInfo{
- Dir: containerDir,
- RunDir: containerRunDir,
- Config: imageConfig,
+ Dir: containerDir,
+ RunDir: containerRunDir,
+ Config: imageConfig,
+ ProcessLabel: container.ProcessLabel(),
+ MountLabel: container.MountLabel(),
}, nil
}
diff --git a/pkg/criu/criu.go b/pkg/criu/criu.go
new file mode 100644
index 000000000..f4cce238a
--- /dev/null
+++ b/pkg/criu/criu.go
@@ -0,0 +1,19 @@
+package criu
+
+import (
+ "github.com/checkpoint-restore/go-criu"
+)
+
+// MinCriuVersion for Podman at least CRIU 3.11 is required
+const MinCriuVersion = 31100
+
+// CheckForCriu uses CRIU's go bindings to check if the CRIU
+// binary exists and if it at least the version Podman needs.
+func CheckForCriu() bool {
+ c := criu.MakeCriu()
+ result, err := c.IsCriuAtLeast(MinCriuVersion)
+ if err != nil {
+ return false
+ }
+ return result
+}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 28dd015bd..9107eec5c 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -3,10 +3,13 @@ package util
import (
"fmt"
"os"
+ "path/filepath"
"strconv"
"strings"
+ "syscall"
"github.com/containers/image/types"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/image-spec/specs-go/v1"
@@ -210,3 +213,84 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
}
return &options, nil
}
+
+// GetRootlessRuntimeDir returns the runtime directory when running as non root
+func GetRootlessRuntimeDir() (string, error) {
+ runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
+ uid := fmt.Sprintf("%d", rootless.GetRootlessUID())
+ if runtimeDir == "" {
+ tmpDir := filepath.Join("/run", "user", uid)
+ os.MkdirAll(tmpDir, 0700)
+ st, err := os.Stat(tmpDir)
+ if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Getuid() && st.Mode().Perm() == 0700 {
+ runtimeDir = tmpDir
+ }
+ }
+ if runtimeDir == "" {
+ tmpDir := filepath.Join(os.TempDir(), "user", uid)
+ os.MkdirAll(tmpDir, 0700)
+ st, err := os.Stat(tmpDir)
+ if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Getuid() && st.Mode().Perm() == 0700 {
+ runtimeDir = tmpDir
+ }
+ }
+ if runtimeDir == "" {
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
+ }
+ resolvedHome, err := filepath.EvalSymlinks(home)
+ if err != nil {
+ return "", errors.Wrapf(err, "cannot resolve %s", home)
+ }
+ runtimeDir = filepath.Join(resolvedHome, "rundir")
+ }
+ return runtimeDir, nil
+}
+
+// GetRootlessStorageOpts returns the storage ops for containers running as non root
+func GetRootlessStorageOpts() (storage.StoreOptions, error) {
+ var opts storage.StoreOptions
+
+ rootlessRuntime, err := GetRootlessRuntimeDir()
+ if err != nil {
+ return opts, err
+ }
+ opts.RunRoot = filepath.Join(rootlessRuntime, "run")
+
+ dataDir := os.Getenv("XDG_DATA_HOME")
+ if dataDir == "" {
+ home := os.Getenv("HOME")
+ if home == "" {
+ return opts, fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ // runc doesn't like symlinks in the rootfs path, and at least
+ // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
+ resolvedHome, err := filepath.EvalSymlinks(home)
+ if err != nil {
+ return opts, errors.Wrapf(err, "cannot resolve %s", home)
+ }
+ dataDir = filepath.Join(resolvedHome, ".local", "share")
+ }
+ opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
+ opts.GraphDriverName = "vfs"
+ return opts, nil
+}
+
+// GetDefaultStoreOptions returns the storage ops for containers
+func GetDefaultStoreOptions() (storage.StoreOptions, error) {
+ storageOpts := storage.DefaultStoreOptions
+ if rootless.IsRootless() {
+ var err error
+ storageOpts, err = GetRootlessStorageOpts()
+ if err != nil {
+ return storageOpts, err
+ }
+
+ storageConf := filepath.Join(os.Getenv("HOME"), ".config/containers/storage.conf")
+ if _, err := os.Stat(storageConf); err == nil {
+ storage.ReloadConfigurationFile(storageConf, &storageOpts)
+ }
+ }
+ return storageOpts, nil
+}
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index 6c5d891a0..928a76324 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
+ "github.com/containers/libpod/pkg/criu"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@@ -22,10 +23,8 @@ var _ = Describe("Podman checkpoint", func() {
}
podmanTest = PodmanCreate(tempdir)
podmanTest.RestoreAllArtifacts()
- // At least CRIU 3.11 is needed
- skip, err := podmanTest.isCriuAtLeast(31100)
- if err != nil || skip {
- Skip("CRIU missing or too old.")
+ if !criu.CheckForCriu() {
+ Skip("CRIU is missing or too old.")
}
})
diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go
index cfdc819a6..250e08704 100644
--- a/test/e2e/exec_test.go
+++ b/test/e2e/exec_test.go
@@ -111,6 +111,7 @@ var _ = Describe("Podman exec", func() {
})
It("podman exec with user only in container", func() {
+ podmanTest.RestoreArtifact(fedoraMinimal)
testUser := "test123"
setup := podmanTest.Podman([]string{"run", "--name", "test1", "-d", fedoraMinimal, "sleep", "60"})
setup.WaitWithDefaultTimeout()
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index a032b0e88..56a603f3e 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -2,7 +2,6 @@ package integration
import (
"bufio"
- "bytes"
"context"
"encoding/json"
"fmt"
@@ -65,7 +64,6 @@ type PodmanTest struct {
TempDir string
CgroupManager string
Host HostOS
- CriuBinary string
}
// HostOS is a simple struct for the test os
@@ -166,7 +164,6 @@ func PodmanCreate(tempDir string) PodmanTest {
runCBinary = "/usr/bin/runc"
}
- criuBinary := "/usr/sbin/criu"
CNIConfigDir := "/etc/cni/net.d"
p := PodmanTest{
@@ -182,7 +179,6 @@ func PodmanCreate(tempDir string) PodmanTest {
TempDir: tempDir,
CgroupManager: cgroupManager,
Host: host,
- CriuBinary: criuBinary,
}
// Setup registries.conf ENV variable
@@ -315,8 +311,14 @@ func (s *PodmanSession) OutputToString() string {
// OutputToStringArray returns the output as a []string
// where each array item is a line split by newline
func (s *PodmanSession) OutputToStringArray() []string {
+ var results []string
output := fmt.Sprintf("%s", s.Out.Contents())
- return strings.Split(output, "\n")
+ for _, line := range strings.Split(output, "\n") {
+ if line != "" {
+ results = append(results, line)
+ }
+ }
+ return results
}
// ErrorGrepString takes session stderr output and behaves like grep. it returns a bool
@@ -682,39 +684,6 @@ func (p *PodmanTest) setRegistriesConfigEnv(b []byte) {
ioutil.WriteFile(outfile, b, 0644)
}
-func (p *PodmanTest) isCriuAtLeast(version int) (bool, error) {
- cmd := exec.Command(p.CriuBinary, "-V")
- var out bytes.Buffer
- cmd.Stdout = &out
- err := cmd.Run()
- if err != nil {
- return false, err
- }
-
- var x int
- var y int
- var z int
-
- fmt.Sscanf(out.String(), "Version: %d.%d.%d", &x, &y, &z)
-
- if strings.Contains(out.String(), "GitID") {
- // If CRIU is built from git it contains a git ID.
- // If that is the case, increase minor by one as this
- // could mean we are running a development version.
- y = y + 1
- }
-
- parsed_version := x*10000 + y*100 + z
-
- fmt.Println(parsed_version)
-
- if parsed_version >= version {
- return false, nil
- } else {
- return true, nil
- }
-}
-
func resetRegistriesConfigEnv() {
os.Setenv("REGISTRIES_CONFIG_PATH", "")
}
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index ca39c338e..871987db0 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -42,7 +42,7 @@ var _ = Describe("Podman logs", func() {
results := podmanTest.Podman([]string{"logs", cid})
results.WaitWithDefaultTimeout()
Expect(results.ExitCode()).To(Equal(0))
- Expect(len(results.OutputToStringArray())).To(Equal(4))
+ Expect(len(results.OutputToStringArray())).To(Equal(3))
})
It("podman logs tail two lines", func() {
@@ -55,7 +55,7 @@ var _ = Describe("Podman logs", func() {
results := podmanTest.Podman([]string{"logs", "--tail", "2", cid})
results.WaitWithDefaultTimeout()
Expect(results.ExitCode()).To(Equal(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
+ Expect(len(results.OutputToStringArray())).To(Equal(2))
})
It("podman logs tail 99 lines", func() {
@@ -68,7 +68,7 @@ var _ = Describe("Podman logs", func() {
results := podmanTest.Podman([]string{"logs", "--tail", "99", cid})
results.WaitWithDefaultTimeout()
Expect(results.ExitCode()).To(Equal(0))
- Expect(len(results.OutputToStringArray())).To(Equal(4))
+ Expect(len(results.OutputToStringArray())).To(Equal(3))
})
It("podman logs tail 2 lines with timestamps", func() {
@@ -81,7 +81,7 @@ var _ = Describe("Podman logs", func() {
results := podmanTest.Podman([]string{"logs", "--tail", "2", "-t", cid})
results.WaitWithDefaultTimeout()
Expect(results.ExitCode()).To(Equal(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
+ Expect(len(results.OutputToStringArray())).To(Equal(2))
})
It("podman logs latest with since time", func() {
@@ -94,7 +94,7 @@ var _ = Describe("Podman logs", func() {
results := podmanTest.Podman([]string{"logs", "--since", "2017-08-07T10:10:09.056611202-04:00", cid})
results.WaitWithDefaultTimeout()
Expect(results.ExitCode()).To(Equal(0))
- Expect(len(results.OutputToStringArray())).To(Equal(4))
+ Expect(len(results.OutputToStringArray())).To(Equal(3))
})
It("podman logs latest with since duration", func() {
@@ -107,6 +107,6 @@ var _ = Describe("Podman logs", func() {
results := podmanTest.Podman([]string{"logs", "--since", "10m", cid})
results.WaitWithDefaultTimeout()
Expect(results.ExitCode()).To(Equal(0))
- Expect(len(results.OutputToStringArray())).To(Equal(4))
+ Expect(len(results.OutputToStringArray())).To(Equal(3))
})
})
diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go
index 4cf685ce2..0ce1e22a8 100644
--- a/test/e2e/pod_create_test.go
+++ b/test/e2e/pod_create_test.go
@@ -77,6 +77,6 @@ var _ = Describe("Podman pod create", func() {
check := podmanTest.Podman([]string{"pod", "ps", "-q"})
check.WaitWithDefaultTimeout()
- Expect(len(check.OutputToStringArray())).To(Equal(1))
+ Expect(len(check.OutputToStringArray())).To(Equal(0))
})
})
diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go
index 8fb0c388c..f1e2375ce 100644
--- a/test/e2e/pod_infra_container_test.go
+++ b/test/e2e/pod_infra_container_test.go
@@ -155,7 +155,7 @@ var _ = Describe("Podman pod create", func() {
check.WaitWithDefaultTimeout()
Expect(check.ExitCode()).To(Equal(0))
PIDs := check.OutputToStringArray()
- Expect(len(PIDs)).To(Equal(4))
+ Expect(len(PIDs)).To(Equal(3))
ctrPID, _ := strconv.Atoi(PIDs[1])
infraPID, _ := strconv.Atoi(PIDs[2])
diff --git a/test/e2e/pod_top_test.go b/test/e2e/pod_top_test.go
index 0ecc8e6e8..f72456307 100644
--- a/test/e2e/pod_top_test.go
+++ b/test/e2e/pod_top_test.go
@@ -109,7 +109,7 @@ var _ = Describe("Podman top", func() {
result := podmanTest.Podman([]string{"pod", "top", podid})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
- Expect(len(result.OutputToStringArray())).To(Equal(4))
+ Expect(len(result.OutputToStringArray())).To(Equal(3))
})
It("podman pod top on pod with containers in different namespace", func() {
@@ -127,6 +127,6 @@ var _ = Describe("Podman top", func() {
result := podmanTest.Podman([]string{"pod", "top", podid})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
- Expect(len(result.OutputToStringArray())).To(Equal(4))
+ Expect(len(result.OutputToStringArray())).To(Equal(3))
})
})
diff --git a/test/e2e/run_entrypoint_test.go b/test/e2e/run_entrypoint_test.go
index 48ed0ce1a..5e4ef75e1 100644
--- a/test/e2e/run_entrypoint_test.go
+++ b/test/e2e/run_entrypoint_test.go
@@ -50,7 +50,7 @@ ENTRYPOINT ["grep", "Alpine", "/etc/os-release"]
session := podmanTest.Podman([]string{"run", "foobar.com/entrypoint:latest"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(len(session.OutputToStringArray())).To(Equal(3))
+ Expect(len(session.OutputToStringArray())).To(Equal(2))
})
It("podman run entrypoint with cmd", func() {
@@ -62,7 +62,7 @@ ENTRYPOINT ["grep", "Alpine", "/etc/os-release"]
session := podmanTest.Podman([]string{"run", "foobar.com/entrypoint:latest"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(len(session.OutputToStringArray())).To(Equal(5))
+ Expect(len(session.OutputToStringArray())).To(Equal(4))
})
It("podman run entrypoint with user cmd overrides image cmd", func() {
@@ -74,7 +74,7 @@ ENTRYPOINT ["grep", "Alpine", "/etc/os-release"]
session := podmanTest.Podman([]string{"run", "foobar.com/entrypoint:latest", "-i"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(len(session.OutputToStringArray())).To(Equal(6))
+ Expect(len(session.OutputToStringArray())).To(Equal(5))
})
It("podman run entrypoint with user cmd no image cmd", func() {
@@ -85,7 +85,7 @@ ENTRYPOINT ["grep", "Alpine", "/etc/os-release"]
session := podmanTest.Podman([]string{"run", "foobar.com/entrypoint:latest", "-i"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(len(session.OutputToStringArray())).To(Equal(6))
+ Expect(len(session.OutputToStringArray())).To(Equal(5))
})
It("podman run user entrypoint overrides image entrypoint and image cmd", func() {
diff --git a/test/e2e/run_passwd_test.go b/test/e2e/run_passwd_test.go
index cea457ae4..0bea092bb 100644
--- a/test/e2e/run_passwd_test.go
+++ b/test/e2e/run_passwd_test.go
@@ -32,27 +32,27 @@ var _ = Describe("Podman run passwd", func() {
})
It("podman run no user specified ", func() {
- session := podmanTest.Podman([]string{"run", ALPINE, "mount"})
+ session := podmanTest.Podman([]string{"run", BB, "mount"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.LineInOutputContains("passwd")).To(BeFalse())
})
It("podman run user specified in container", func() {
- session := podmanTest.Podman([]string{"run", "-u", "bin", ALPINE, "mount"})
+ session := podmanTest.Podman([]string{"run", "-u", "bin", BB, "mount"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.LineInOutputContains("passwd")).To(BeFalse())
})
It("podman run UID specified in container", func() {
- session := podmanTest.Podman([]string{"run", "-u", "2:1", ALPINE, "mount"})
+ session := podmanTest.Podman([]string{"run", "-u", "2:1", BB, "mount"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.LineInOutputContains("passwd")).To(BeFalse())
})
It("podman run UID not specified in container", func() {
- session := podmanTest.Podman([]string{"run", "-u", "20001:1", ALPINE, "mount"})
+ session := podmanTest.Podman([]string{"run", "-u", "20001:1", BB, "mount"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.LineInOutputContains("passwd")).To(BeTrue())
diff --git a/test/e2e/run_privileged_test.go b/test/e2e/run_privileged_test.go
index 4f81c7c45..0a62d8505 100644
--- a/test/e2e/run_privileged_test.go
+++ b/test/e2e/run_privileged_test.go
@@ -75,7 +75,7 @@ var _ = Describe("Podman privileged container tests", func() {
session := podmanTest.Podman([]string{"run", "-t", "busybox", "ls", "-l", "/dev"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(len(session.OutputToStringArray())).To(Equal(18))
+ Expect(len(session.OutputToStringArray())).To(Equal(17))
})
It("podman privileged should inherit host devices", func() {
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 052dd0566..cb436ccca 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -283,7 +283,7 @@ var _ = Describe("Podman run", func() {
})
It("podman run notify_socket", func() {
- sock := "/run/sock"
+ sock := "/run/notify"
os.Setenv("NOTIFY_SOCKET", sock)
session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "printenv", "NOTIFY_SOCKET"})
session.WaitWithDefaultTimeout()
@@ -577,6 +577,7 @@ USER mail`
})
It("podman run findmnt nothing shared", func() {
+ podmanTest.RestoreArtifact(fedoraMinimal)
vol1 := filepath.Join(podmanTest.TempDir, "vol-test1")
err := os.MkdirAll(vol1, 0755)
Expect(err).To(BeNil())
@@ -592,6 +593,7 @@ USER mail`
})
It("podman run findmnt shared", func() {
+ podmanTest.RestoreArtifact(fedoraMinimal)
vol1 := filepath.Join(podmanTest.TempDir, "vol-test1")
err := os.MkdirAll(vol1, 0755)
Expect(err).To(BeNil())
diff --git a/test/e2e/stats_test.go b/test/e2e/stats_test.go
index 8096f58b2..e456d7114 100644
--- a/test/e2e/stats_test.go
+++ b/test/e2e/stats_test.go
@@ -31,12 +31,6 @@ var _ = Describe("Podman stats", func() {
GinkgoWriter.Write([]byte(timedResult))
})
- It("podman stats should run with no containers", func() {
- session := podmanTest.Podman([]string{"stats", "--no-stream"})
- session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(0))
- })
-
It("podman stats with bogus container", func() {
session := podmanTest.Podman([]string{"stats", "--no-stream", "123"})
session.WaitWithDefaultTimeout()
@@ -53,15 +47,6 @@ var _ = Describe("Podman stats", func() {
Expect(session.ExitCode()).To(Equal(0))
})
- It("podman stats on a running container no id", func() {
- session := podmanTest.RunTopContainer("")
- session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"stats", "--no-stream"})
- session.WaitWithDefaultTimeout()
- Expect(session.ExitCode()).To(Equal(0))
- })
-
It("podman stats on all containers", func() {
session := podmanTest.RunTopContainer("")
session.WaitWithDefaultTimeout()
@@ -75,7 +60,7 @@ var _ = Describe("Podman stats", func() {
session := podmanTest.RunTopContainer("")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"stats", "--no-stream", "--format", "\"{{.Container}}\""})
+ session = podmanTest.Podman([]string{"stats", "--all", "--no-stream", "--format", "\"{{.Container}}\""})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
})
@@ -84,7 +69,7 @@ var _ = Describe("Podman stats", func() {
session := podmanTest.RunTopContainer("")
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"stats", "--no-stream", "--format", "json"})
+ session = podmanTest.Podman([]string{"stats", "--all", "--no-stream", "--format", "json"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.IsJSONOutputValid()).To(BeTrue())
diff --git a/test/e2e/version_test.go b/test/e2e/version_test.go
index 4670c6e1e..6caf0e3dd 100644
--- a/test/e2e/version_test.go
+++ b/test/e2e/version_test.go
@@ -34,6 +34,6 @@ var _ = Describe("Podman version", func() {
session := podmanTest.Podman([]string{"version"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 3))
+ Expect(len(session.OutputToStringArray())).To(BeNumerically(">", 2))
})
})
diff --git a/vendor.conf b/vendor.conf
index 2e6925e5e..8004f9056 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -6,12 +6,13 @@ github.com/Microsoft/hcsshim 43f9725307998e09f2e3816c2c0c36dc98f0c982
github.com/blang/semver v3.5.0
github.com/boltdb/bolt master
github.com/buger/goterm 2f8dfbc7dbbff5dd1d391ed91482c24df243b2d3
+github.com/checkpoint-restore/go-criu master
github.com/containerd/cgroups 58556f5ad8448d99a6f7bea69ea4bdb7747cfeb0
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
github.com/containernetworking/plugins 1562a1e60ed101aacc5e08ed9dbeba8e9f3d4ec1
github.com/containers/image bd10b1b53b2976f215b3f2f848fb8e7cad779aeb
-github.com/containers/storage 41294c85d97bef688e18f710402895dbecde3308
+github.com/containers/storage 24f0de45708bc6e4c8062828cd03812aaebc30db https://github.com/rhatdan/storage
github.com/containers/psgo 5dde6da0bc8831b35243a847625bcf18183bd1ee
github.com/coreos/go-systemd v14
github.com/cri-o/ocicni 2d2983e40c242322a56c22a903785e7f83eb378c
@@ -50,7 +51,7 @@ github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc b4e2ecb452d9ee4381137cc0a7e6715b96bed6de
github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce
github.com/opencontainers/runtime-tools master
-github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
+github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
github.com/ostreedev/ostree-go master
github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
@@ -76,7 +77,7 @@ golang.org/x/sys master
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
golang.org/x/sync master
-google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
+google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go<
gopkg.in/cheggaaa/pb.v1 v1.0.7
gopkg.in/inf.v0 v0.9.0
gopkg.in/mgo.v2 v2
diff --git a/vendor/github.com/checkpoint-restore/go-criu/LICENSE b/vendor/github.com/checkpoint-restore/go-criu/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/checkpoint-restore/go-criu/README.md b/vendor/github.com/checkpoint-restore/go-criu/README.md
new file mode 100644
index 000000000..a79b4d7fb
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/README.md
@@ -0,0 +1,27 @@
+[![master](https://travis-ci.org/checkpoint-restore/go-criu.svg?branch=master)](https://travis-ci.org/checkpoint-restore/go-criu)
+
+## go-criu -- Go bindings for [CRIU](https://criu.org/)
+
+This repository provides Go bindings for CRIU. The code is based on the Go based PHaul
+implementation from the CRIU repository. For easier inclusion into other Go projects the
+CRIU Go bindings have been moved to this repository.
+
+The Go bindings provide an easy way to use the CRIU RPC calls from Go without the need
+to set up all the infrastructure to make the actual RPC connection to CRIU.
+
+The following example would print the version of CRIU:
+```
+ c := criu.MakeCriu()
+ version, err := c.GetCriuVersion()
+ fmt.Println(version)
+```
+or to just check if at least a certain CRIU version is installed:
+```
+ c := criu.MakeCriu()
+ result, err := c.IsCriuAtLeast(31100)
+```
+
+### License
+
+The license of go-criu is the Apache 2.0 license.
+
diff --git a/vendor/github.com/checkpoint-restore/go-criu/main.go b/vendor/github.com/checkpoint-restore/go-criu/main.go
new file mode 100644
index 000000000..cf94c376e
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/main.go
@@ -0,0 +1,250 @@
+package criu
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "strconv"
+ "syscall"
+
+ "github.com/checkpoint-restore/go-criu/rpc"
+ "github.com/golang/protobuf/proto"
+)
+
+// Criu struct
+type Criu struct {
+ swrkCmd *exec.Cmd
+ swrkSk *os.File
+}
+
+// MakeCriu returns the Criu object required for most operations
+func MakeCriu() *Criu {
+ return &Criu{}
+}
+
+// Prepare sets up everything for the RPC communication to CRIU
+func (c *Criu) Prepare() error {
+ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET, 0)
+ if err != nil {
+ return err
+ }
+
+ cln := os.NewFile(uintptr(fds[0]), "criu-xprt-cln")
+ syscall.CloseOnExec(fds[0])
+ srv := os.NewFile(uintptr(fds[1]), "criu-xprt-srv")
+ defer srv.Close()
+
+ args := []string{"swrk", strconv.Itoa(fds[1])}
+ cmd := exec.Command("criu", args...)
+
+ err = cmd.Start()
+ if err != nil {
+ cln.Close()
+ return err
+ }
+
+ c.swrkCmd = cmd
+ c.swrkSk = cln
+
+ return nil
+}
+
+// Cleanup cleans up
+func (c *Criu) Cleanup() {
+ if c.swrkCmd != nil {
+ c.swrkSk.Close()
+ c.swrkSk = nil
+ c.swrkCmd.Wait()
+ c.swrkCmd = nil
+ }
+}
+
+func (c *Criu) sendAndRecv(reqB []byte) ([]byte, int, error) {
+ cln := c.swrkSk
+ _, err := cln.Write(reqB)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ respB := make([]byte, 2*4096)
+ n, err := cln.Read(respB)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return respB, n, nil
+}
+
+func (c *Criu) doSwrk(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify) error {
+ resp, err := c.doSwrkWithResp(reqType, opts, nfy)
+ if err != nil {
+ return err
+ }
+ respType := resp.GetType()
+ if respType != reqType {
+ return errors.New("unexpected responce")
+ }
+
+ return nil
+}
+
+func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify) (*rpc.CriuResp, error) {
+ var resp *rpc.CriuResp
+
+ req := rpc.CriuReq{
+ Type: &reqType,
+ Opts: opts,
+ }
+
+ if nfy != nil {
+ opts.NotifyScripts = proto.Bool(true)
+ }
+
+ if c.swrkCmd == nil {
+ err := c.Prepare()
+ if err != nil {
+ return nil, err
+ }
+
+ defer c.Cleanup()
+ }
+
+ for {
+ reqB, err := proto.Marshal(&req)
+ if err != nil {
+ return nil, err
+ }
+
+ respB, respS, err := c.sendAndRecv(reqB)
+ if err != nil {
+ return nil, err
+ }
+
+ resp = &rpc.CriuResp{}
+ err = proto.Unmarshal(respB[:respS], resp)
+ if err != nil {
+ return nil, err
+ }
+
+ if !resp.GetSuccess() {
+ return resp, fmt.Errorf("operation failed (msg:%s err:%d)",
+ resp.GetCrErrmsg(), resp.GetCrErrno())
+ }
+
+ respType := resp.GetType()
+ if respType != rpc.CriuReqType_NOTIFY {
+ break
+ }
+ if nfy == nil {
+ return resp, errors.New("unexpected notify")
+ }
+
+ notify := resp.GetNotify()
+ switch notify.GetScript() {
+ case "pre-dump":
+ err = nfy.PreDump()
+ case "post-dump":
+ err = nfy.PostDump()
+ case "pre-restore":
+ err = nfy.PreRestore()
+ case "post-restore":
+ err = nfy.PostRestore(notify.GetPid())
+ case "network-lock":
+ err = nfy.NetworkLock()
+ case "network-unlock":
+ err = nfy.NetworkUnlock()
+ case "setup-namespaces":
+ err = nfy.SetupNamespaces(notify.GetPid())
+ case "post-setup-namespaces":
+ err = nfy.PostSetupNamespaces()
+ case "post-resume":
+ err = nfy.PostResume()
+ default:
+ err = nil
+ }
+
+ if err != nil {
+ return resp, err
+ }
+
+ req = rpc.CriuReq{
+ Type: &respType,
+ NotifySuccess: proto.Bool(true),
+ }
+ }
+
+ return resp, nil
+}
+
+// Dump dumps a process
+func (c *Criu) Dump(opts rpc.CriuOpts, nfy Notify) error {
+ return c.doSwrk(rpc.CriuReqType_DUMP, &opts, nfy)
+}
+
+// Restore restores a process
+func (c *Criu) Restore(opts rpc.CriuOpts, nfy Notify) error {
+ return c.doSwrk(rpc.CriuReqType_RESTORE, &opts, nfy)
+}
+
+// PreDump does a pre-dump
+func (c *Criu) PreDump(opts rpc.CriuOpts, nfy Notify) error {
+ return c.doSwrk(rpc.CriuReqType_PRE_DUMP, &opts, nfy)
+}
+
+// StartPageServer starts the page server
+func (c *Criu) StartPageServer(opts rpc.CriuOpts) error {
+ return c.doSwrk(rpc.CriuReqType_PAGE_SERVER, &opts, nil)
+}
+
+// StartPageServerChld starts the page server and returns PID and port
+func (c *Criu) StartPageServerChld(opts rpc.CriuOpts) (int, int, error) {
+ resp, err := c.doSwrkWithResp(rpc.CriuReqType_PAGE_SERVER_CHLD, &opts, nil)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ return int(resp.Ps.GetPid()), int(resp.Ps.GetPort()), nil
+}
+
+// GetCriuVersion executes the VERSION RPC call and returns the version
+// as an integer. Major * 10000 + Minor * 100 + SubLevel
+func (c *Criu) GetCriuVersion() (int, error) {
+ resp, err := c.doSwrkWithResp(rpc.CriuReqType_VERSION, nil, nil)
+ if err != nil {
+ return 0, err
+ }
+
+ if resp.GetType() != rpc.CriuReqType_VERSION {
+ return 0, fmt.Errorf("Unexpected CRIU RPC response")
+ }
+
+ version := int(*resp.GetVersion().Major) * 10000
+ version += int(*resp.GetVersion().Minor) * 100
+ if resp.GetVersion().Sublevel != nil {
+ version += int(*resp.GetVersion().Sublevel)
+ }
+
+ if resp.GetVersion().Gitid != nil {
+ // taken from runc: if it is a git release -> increase minor by 1
+ version -= (version % 100)
+ version += 100
+ }
+
+ return version, nil
+}
+
+// IsCriuAtLeast checks if the version is at least the same
+// as the parameter version
+func (c *Criu) IsCriuAtLeast(version int) (bool, error) {
+ criuVersion, err := c.GetCriuVersion()
+ if err != nil {
+ return false, err
+ }
+
+ if criuVersion >= version {
+ return true, nil
+ }
+
+ return false, nil
+}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/notify.go b/vendor/github.com/checkpoint-restore/go-criu/notify.go
new file mode 100644
index 000000000..1c8547b43
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/notify.go
@@ -0,0 +1,63 @@
+package criu
+
+//Notify interface
+type Notify interface {
+ PreDump() error
+ PostDump() error
+ PreRestore() error
+ PostRestore(pid int32) error
+ NetworkLock() error
+ NetworkUnlock() error
+ SetupNamespaces(pid int32) error
+ PostSetupNamespaces() error
+ PostResume() error
+}
+
+// NoNotify struct
+type NoNotify struct {
+}
+
+// PreDump NoNotify
+func (c NoNotify) PreDump() error {
+ return nil
+}
+
+// PostDump NoNotify
+func (c NoNotify) PostDump() error {
+ return nil
+}
+
+// PreRestore NoNotify
+func (c NoNotify) PreRestore() error {
+ return nil
+}
+
+// PostRestore NoNotify
+func (c NoNotify) PostRestore(pid int32) error {
+ return nil
+}
+
+// NetworkLock NoNotify
+func (c NoNotify) NetworkLock() error {
+ return nil
+}
+
+// NetworkUnlock NoNotify
+func (c NoNotify) NetworkUnlock() error {
+ return nil
+}
+
+// SetupNamespaces NoNotify
+func (c NoNotify) SetupNamespaces(pid int32) error {
+ return nil
+}
+
+// PostSetupNamespaces NoNotify
+func (c NoNotify) PostSetupNamespaces() error {
+ return nil
+}
+
+// PostResume NoNotify
+func (c NoNotify) PostResume() error {
+ return nil
+}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go b/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go
new file mode 100644
index 000000000..e9d1f8047
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go
@@ -0,0 +1,1203 @@
+// Code generated by protoc-gen-go.
+// source: rpc.proto
+// DO NOT EDIT!
+
+/*
+Package rpc is a generated protocol buffer package.
+
+It is generated from these files:
+ rpc.proto
+
+It has these top-level messages:
+ CriuPageServerInfo
+ CriuVethPair
+ ExtMountMap
+ JoinNamespace
+ InheritFd
+ CgroupRoot
+ UnixSk
+ CriuOpts
+ CriuDumpResp
+ CriuRestoreResp
+ CriuNotify
+ CriuFeatures
+ CriuReq
+ CriuResp
+ CriuVersion
+*/
+package rpc
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type CriuCgMode int32
+
+const (
+ CriuCgMode_IGNORE CriuCgMode = 0
+ CriuCgMode_CG_NONE CriuCgMode = 1
+ CriuCgMode_PROPS CriuCgMode = 2
+ CriuCgMode_SOFT CriuCgMode = 3
+ CriuCgMode_FULL CriuCgMode = 4
+ CriuCgMode_STRICT CriuCgMode = 5
+ CriuCgMode_DEFAULT CriuCgMode = 6
+)
+
+var CriuCgMode_name = map[int32]string{
+ 0: "IGNORE",
+ 1: "CG_NONE",
+ 2: "PROPS",
+ 3: "SOFT",
+ 4: "FULL",
+ 5: "STRICT",
+ 6: "DEFAULT",
+}
+var CriuCgMode_value = map[string]int32{
+ "IGNORE": 0,
+ "CG_NONE": 1,
+ "PROPS": 2,
+ "SOFT": 3,
+ "FULL": 4,
+ "STRICT": 5,
+ "DEFAULT": 6,
+}
+
+func (x CriuCgMode) Enum() *CriuCgMode {
+ p := new(CriuCgMode)
+ *p = x
+ return p
+}
+func (x CriuCgMode) String() string {
+ return proto.EnumName(CriuCgMode_name, int32(x))
+}
+func (x *CriuCgMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CriuCgMode_value, data, "CriuCgMode")
+ if err != nil {
+ return err
+ }
+ *x = CriuCgMode(value)
+ return nil
+}
+func (CriuCgMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+type CriuReqType int32
+
+const (
+ CriuReqType_EMPTY CriuReqType = 0
+ CriuReqType_DUMP CriuReqType = 1
+ CriuReqType_RESTORE CriuReqType = 2
+ CriuReqType_CHECK CriuReqType = 3
+ CriuReqType_PRE_DUMP CriuReqType = 4
+ CriuReqType_PAGE_SERVER CriuReqType = 5
+ CriuReqType_NOTIFY CriuReqType = 6
+ CriuReqType_CPUINFO_DUMP CriuReqType = 7
+ CriuReqType_CPUINFO_CHECK CriuReqType = 8
+ CriuReqType_FEATURE_CHECK CriuReqType = 9
+ CriuReqType_VERSION CriuReqType = 10
+ CriuReqType_WAIT_PID CriuReqType = 11
+ CriuReqType_PAGE_SERVER_CHLD CriuReqType = 12
+)
+
+var CriuReqType_name = map[int32]string{
+ 0: "EMPTY",
+ 1: "DUMP",
+ 2: "RESTORE",
+ 3: "CHECK",
+ 4: "PRE_DUMP",
+ 5: "PAGE_SERVER",
+ 6: "NOTIFY",
+ 7: "CPUINFO_DUMP",
+ 8: "CPUINFO_CHECK",
+ 9: "FEATURE_CHECK",
+ 10: "VERSION",
+ 11: "WAIT_PID",
+ 12: "PAGE_SERVER_CHLD",
+}
+var CriuReqType_value = map[string]int32{
+ "EMPTY": 0,
+ "DUMP": 1,
+ "RESTORE": 2,
+ "CHECK": 3,
+ "PRE_DUMP": 4,
+ "PAGE_SERVER": 5,
+ "NOTIFY": 6,
+ "CPUINFO_DUMP": 7,
+ "CPUINFO_CHECK": 8,
+ "FEATURE_CHECK": 9,
+ "VERSION": 10,
+ "WAIT_PID": 11,
+ "PAGE_SERVER_CHLD": 12,
+}
+
+func (x CriuReqType) Enum() *CriuReqType {
+ p := new(CriuReqType)
+ *p = x
+ return p
+}
+func (x CriuReqType) String() string {
+ return proto.EnumName(CriuReqType_name, int32(x))
+}
+func (x *CriuReqType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CriuReqType_value, data, "CriuReqType")
+ if err != nil {
+ return err
+ }
+ *x = CriuReqType(value)
+ return nil
+}
+func (CriuReqType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+type CriuPageServerInfo struct {
+ Address *string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"`
+ Port *int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
+ Pid *int32 `protobuf:"varint,3,opt,name=pid" json:"pid,omitempty"`
+ Fd *int32 `protobuf:"varint,4,opt,name=fd" json:"fd,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuPageServerInfo) Reset() { *m = CriuPageServerInfo{} }
+func (m *CriuPageServerInfo) String() string { return proto.CompactTextString(m) }
+func (*CriuPageServerInfo) ProtoMessage() {}
+func (*CriuPageServerInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *CriuPageServerInfo) GetAddress() string {
+ if m != nil && m.Address != nil {
+ return *m.Address
+ }
+ return ""
+}
+
+func (m *CriuPageServerInfo) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return 0
+}
+
+func (m *CriuPageServerInfo) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+func (m *CriuPageServerInfo) GetFd() int32 {
+ if m != nil && m.Fd != nil {
+ return *m.Fd
+ }
+ return 0
+}
+
+type CriuVethPair struct {
+ IfIn *string `protobuf:"bytes,1,req,name=if_in,json=ifIn" json:"if_in,omitempty"`
+ IfOut *string `protobuf:"bytes,2,req,name=if_out,json=ifOut" json:"if_out,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuVethPair) Reset() { *m = CriuVethPair{} }
+func (m *CriuVethPair) String() string { return proto.CompactTextString(m) }
+func (*CriuVethPair) ProtoMessage() {}
+func (*CriuVethPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *CriuVethPair) GetIfIn() string {
+ if m != nil && m.IfIn != nil {
+ return *m.IfIn
+ }
+ return ""
+}
+
+func (m *CriuVethPair) GetIfOut() string {
+ if m != nil && m.IfOut != nil {
+ return *m.IfOut
+ }
+ return ""
+}
+
+type ExtMountMap struct {
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Val *string `protobuf:"bytes,2,req,name=val" json:"val,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ExtMountMap) Reset() { *m = ExtMountMap{} }
+func (m *ExtMountMap) String() string { return proto.CompactTextString(m) }
+func (*ExtMountMap) ProtoMessage() {}
+func (*ExtMountMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *ExtMountMap) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *ExtMountMap) GetVal() string {
+ if m != nil && m.Val != nil {
+ return *m.Val
+ }
+ return ""
+}
+
+type JoinNamespace struct {
+ Ns *string `protobuf:"bytes,1,req,name=ns" json:"ns,omitempty"`
+ NsFile *string `protobuf:"bytes,2,req,name=ns_file,json=nsFile" json:"ns_file,omitempty"`
+ ExtraOpt *string `protobuf:"bytes,3,opt,name=extra_opt,json=extraOpt" json:"extra_opt,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *JoinNamespace) Reset() { *m = JoinNamespace{} }
+func (m *JoinNamespace) String() string { return proto.CompactTextString(m) }
+func (*JoinNamespace) ProtoMessage() {}
+func (*JoinNamespace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *JoinNamespace) GetNs() string {
+ if m != nil && m.Ns != nil {
+ return *m.Ns
+ }
+ return ""
+}
+
+func (m *JoinNamespace) GetNsFile() string {
+ if m != nil && m.NsFile != nil {
+ return *m.NsFile
+ }
+ return ""
+}
+
+func (m *JoinNamespace) GetExtraOpt() string {
+ if m != nil && m.ExtraOpt != nil {
+ return *m.ExtraOpt
+ }
+ return ""
+}
+
+type InheritFd struct {
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Fd *int32 `protobuf:"varint,2,req,name=fd" json:"fd,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InheritFd) Reset() { *m = InheritFd{} }
+func (m *InheritFd) String() string { return proto.CompactTextString(m) }
+func (*InheritFd) ProtoMessage() {}
+func (*InheritFd) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *InheritFd) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *InheritFd) GetFd() int32 {
+ if m != nil && m.Fd != nil {
+ return *m.Fd
+ }
+ return 0
+}
+
+type CgroupRoot struct {
+ Ctrl *string `protobuf:"bytes,1,opt,name=ctrl" json:"ctrl,omitempty"`
+ Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CgroupRoot) Reset() { *m = CgroupRoot{} }
+func (m *CgroupRoot) String() string { return proto.CompactTextString(m) }
+func (*CgroupRoot) ProtoMessage() {}
+func (*CgroupRoot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *CgroupRoot) GetCtrl() string {
+ if m != nil && m.Ctrl != nil {
+ return *m.Ctrl
+ }
+ return ""
+}
+
+func (m *CgroupRoot) GetPath() string {
+ if m != nil && m.Path != nil {
+ return *m.Path
+ }
+ return ""
+}
+
+type UnixSk struct {
+ Inode *uint32 `protobuf:"varint,1,req,name=inode" json:"inode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UnixSk) Reset() { *m = UnixSk{} }
+func (m *UnixSk) String() string { return proto.CompactTextString(m) }
+func (*UnixSk) ProtoMessage() {}
+func (*UnixSk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *UnixSk) GetInode() uint32 {
+ if m != nil && m.Inode != nil {
+ return *m.Inode
+ }
+ return 0
+}
+
+type CriuOpts struct {
+ ImagesDirFd *int32 `protobuf:"varint,1,req,name=images_dir_fd,json=imagesDirFd" json:"images_dir_fd,omitempty"`
+ Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
+ LeaveRunning *bool `protobuf:"varint,3,opt,name=leave_running,json=leaveRunning" json:"leave_running,omitempty"`
+ ExtUnixSk *bool `protobuf:"varint,4,opt,name=ext_unix_sk,json=extUnixSk" json:"ext_unix_sk,omitempty"`
+ TcpEstablished *bool `protobuf:"varint,5,opt,name=tcp_established,json=tcpEstablished" json:"tcp_established,omitempty"`
+ EvasiveDevices *bool `protobuf:"varint,6,opt,name=evasive_devices,json=evasiveDevices" json:"evasive_devices,omitempty"`
+ ShellJob *bool `protobuf:"varint,7,opt,name=shell_job,json=shellJob" json:"shell_job,omitempty"`
+ FileLocks *bool `protobuf:"varint,8,opt,name=file_locks,json=fileLocks" json:"file_locks,omitempty"`
+ LogLevel *int32 `protobuf:"varint,9,opt,name=log_level,json=logLevel,def=2" json:"log_level,omitempty"`
+ LogFile *string `protobuf:"bytes,10,opt,name=log_file,json=logFile" json:"log_file,omitempty"`
+ Ps *CriuPageServerInfo `protobuf:"bytes,11,opt,name=ps" json:"ps,omitempty"`
+ NotifyScripts *bool `protobuf:"varint,12,opt,name=notify_scripts,json=notifyScripts" json:"notify_scripts,omitempty"`
+ Root *string `protobuf:"bytes,13,opt,name=root" json:"root,omitempty"`
+ ParentImg *string `protobuf:"bytes,14,opt,name=parent_img,json=parentImg" json:"parent_img,omitempty"`
+ TrackMem *bool `protobuf:"varint,15,opt,name=track_mem,json=trackMem" json:"track_mem,omitempty"`
+ AutoDedup *bool `protobuf:"varint,16,opt,name=auto_dedup,json=autoDedup" json:"auto_dedup,omitempty"`
+ WorkDirFd *int32 `protobuf:"varint,17,opt,name=work_dir_fd,json=workDirFd" json:"work_dir_fd,omitempty"`
+ LinkRemap *bool `protobuf:"varint,18,opt,name=link_remap,json=linkRemap" json:"link_remap,omitempty"`
+ Veths []*CriuVethPair `protobuf:"bytes,19,rep,name=veths" json:"veths,omitempty"`
+ CpuCap *uint32 `protobuf:"varint,20,opt,name=cpu_cap,json=cpuCap,def=4294967295" json:"cpu_cap,omitempty"`
+ ForceIrmap *bool `protobuf:"varint,21,opt,name=force_irmap,json=forceIrmap" json:"force_irmap,omitempty"`
+ ExecCmd []string `protobuf:"bytes,22,rep,name=exec_cmd,json=execCmd" json:"exec_cmd,omitempty"`
+ ExtMnt []*ExtMountMap `protobuf:"bytes,23,rep,name=ext_mnt,json=extMnt" json:"ext_mnt,omitempty"`
+ ManageCgroups *bool `protobuf:"varint,24,opt,name=manage_cgroups,json=manageCgroups" json:"manage_cgroups,omitempty"`
+ CgRoot []*CgroupRoot `protobuf:"bytes,25,rep,name=cg_root,json=cgRoot" json:"cg_root,omitempty"`
+ RstSibling *bool `protobuf:"varint,26,opt,name=rst_sibling,json=rstSibling" json:"rst_sibling,omitempty"`
+ InheritFd []*InheritFd `protobuf:"bytes,27,rep,name=inherit_fd,json=inheritFd" json:"inherit_fd,omitempty"`
+ AutoExtMnt *bool `protobuf:"varint,28,opt,name=auto_ext_mnt,json=autoExtMnt" json:"auto_ext_mnt,omitempty"`
+ ExtSharing *bool `protobuf:"varint,29,opt,name=ext_sharing,json=extSharing" json:"ext_sharing,omitempty"`
+ ExtMasters *bool `protobuf:"varint,30,opt,name=ext_masters,json=extMasters" json:"ext_masters,omitempty"`
+ SkipMnt []string `protobuf:"bytes,31,rep,name=skip_mnt,json=skipMnt" json:"skip_mnt,omitempty"`
+ EnableFs []string `protobuf:"bytes,32,rep,name=enable_fs,json=enableFs" json:"enable_fs,omitempty"`
+ UnixSkIno []*UnixSk `protobuf:"bytes,33,rep,name=unix_sk_ino,json=unixSkIno" json:"unix_sk_ino,omitempty"`
+ ManageCgroupsMode *CriuCgMode `protobuf:"varint,34,opt,name=manage_cgroups_mode,json=manageCgroupsMode,enum=CriuCgMode" json:"manage_cgroups_mode,omitempty"`
+ GhostLimit *uint32 `protobuf:"varint,35,opt,name=ghost_limit,json=ghostLimit,def=1048576" json:"ghost_limit,omitempty"`
+ IrmapScanPaths []string `protobuf:"bytes,36,rep,name=irmap_scan_paths,json=irmapScanPaths" json:"irmap_scan_paths,omitempty"`
+ External []string `protobuf:"bytes,37,rep,name=external" json:"external,omitempty"`
+ EmptyNs *uint32 `protobuf:"varint,38,opt,name=empty_ns,json=emptyNs" json:"empty_ns,omitempty"`
+ JoinNs []*JoinNamespace `protobuf:"bytes,39,rep,name=join_ns,json=joinNs" json:"join_ns,omitempty"`
+ CgroupProps *string `protobuf:"bytes,41,opt,name=cgroup_props,json=cgroupProps" json:"cgroup_props,omitempty"`
+ CgroupPropsFile *string `protobuf:"bytes,42,opt,name=cgroup_props_file,json=cgroupPropsFile" json:"cgroup_props_file,omitempty"`
+ CgroupDumpController []string `protobuf:"bytes,43,rep,name=cgroup_dump_controller,json=cgroupDumpController" json:"cgroup_dump_controller,omitempty"`
+ FreezeCgroup *string `protobuf:"bytes,44,opt,name=freeze_cgroup,json=freezeCgroup" json:"freeze_cgroup,omitempty"`
+ Timeout *uint32 `protobuf:"varint,45,opt,name=timeout" json:"timeout,omitempty"`
+ TcpSkipInFlight *bool `protobuf:"varint,46,opt,name=tcp_skip_in_flight,json=tcpSkipInFlight" json:"tcp_skip_in_flight,omitempty"`
+ WeakSysctls *bool `protobuf:"varint,47,opt,name=weak_sysctls,json=weakSysctls" json:"weak_sysctls,omitempty"`
+ LazyPages *bool `protobuf:"varint,48,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"`
+ StatusFd *int32 `protobuf:"varint,49,opt,name=status_fd,json=statusFd" json:"status_fd,omitempty"`
+ OrphanPtsMaster *bool `protobuf:"varint,50,opt,name=orphan_pts_master,json=orphanPtsMaster" json:"orphan_pts_master,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuOpts) Reset() { *m = CriuOpts{} }
+func (m *CriuOpts) String() string { return proto.CompactTextString(m) }
+func (*CriuOpts) ProtoMessage() {}
+func (*CriuOpts) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+const Default_CriuOpts_LogLevel int32 = 2
+const Default_CriuOpts_CpuCap uint32 = 4294967295
+const Default_CriuOpts_GhostLimit uint32 = 1048576
+
+func (m *CriuOpts) GetImagesDirFd() int32 {
+ if m != nil && m.ImagesDirFd != nil {
+ return *m.ImagesDirFd
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetLeaveRunning() bool {
+ if m != nil && m.LeaveRunning != nil {
+ return *m.LeaveRunning
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtUnixSk() bool {
+ if m != nil && m.ExtUnixSk != nil {
+ return *m.ExtUnixSk
+ }
+ return false
+}
+
+func (m *CriuOpts) GetTcpEstablished() bool {
+ if m != nil && m.TcpEstablished != nil {
+ return *m.TcpEstablished
+ }
+ return false
+}
+
+func (m *CriuOpts) GetEvasiveDevices() bool {
+ if m != nil && m.EvasiveDevices != nil {
+ return *m.EvasiveDevices
+ }
+ return false
+}
+
+func (m *CriuOpts) GetShellJob() bool {
+ if m != nil && m.ShellJob != nil {
+ return *m.ShellJob
+ }
+ return false
+}
+
+func (m *CriuOpts) GetFileLocks() bool {
+ if m != nil && m.FileLocks != nil {
+ return *m.FileLocks
+ }
+ return false
+}
+
+func (m *CriuOpts) GetLogLevel() int32 {
+ if m != nil && m.LogLevel != nil {
+ return *m.LogLevel
+ }
+ return Default_CriuOpts_LogLevel
+}
+
+func (m *CriuOpts) GetLogFile() string {
+ if m != nil && m.LogFile != nil {
+ return *m.LogFile
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetPs() *CriuPageServerInfo {
+ if m != nil {
+ return m.Ps
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetNotifyScripts() bool {
+ if m != nil && m.NotifyScripts != nil {
+ return *m.NotifyScripts
+ }
+ return false
+}
+
+func (m *CriuOpts) GetRoot() string {
+ if m != nil && m.Root != nil {
+ return *m.Root
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetParentImg() string {
+ if m != nil && m.ParentImg != nil {
+ return *m.ParentImg
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetTrackMem() bool {
+ if m != nil && m.TrackMem != nil {
+ return *m.TrackMem
+ }
+ return false
+}
+
+func (m *CriuOpts) GetAutoDedup() bool {
+ if m != nil && m.AutoDedup != nil {
+ return *m.AutoDedup
+ }
+ return false
+}
+
+func (m *CriuOpts) GetWorkDirFd() int32 {
+ if m != nil && m.WorkDirFd != nil {
+ return *m.WorkDirFd
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetLinkRemap() bool {
+ if m != nil && m.LinkRemap != nil {
+ return *m.LinkRemap
+ }
+ return false
+}
+
+func (m *CriuOpts) GetVeths() []*CriuVethPair {
+ if m != nil {
+ return m.Veths
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetCpuCap() uint32 {
+ if m != nil && m.CpuCap != nil {
+ return *m.CpuCap
+ }
+ return Default_CriuOpts_CpuCap
+}
+
+func (m *CriuOpts) GetForceIrmap() bool {
+ if m != nil && m.ForceIrmap != nil {
+ return *m.ForceIrmap
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExecCmd() []string {
+ if m != nil {
+ return m.ExecCmd
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetExtMnt() []*ExtMountMap {
+ if m != nil {
+ return m.ExtMnt
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetManageCgroups() bool {
+ if m != nil && m.ManageCgroups != nil {
+ return *m.ManageCgroups
+ }
+ return false
+}
+
+func (m *CriuOpts) GetCgRoot() []*CgroupRoot {
+ if m != nil {
+ return m.CgRoot
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetRstSibling() bool {
+ if m != nil && m.RstSibling != nil {
+ return *m.RstSibling
+ }
+ return false
+}
+
+func (m *CriuOpts) GetInheritFd() []*InheritFd {
+ if m != nil {
+ return m.InheritFd
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetAutoExtMnt() bool {
+ if m != nil && m.AutoExtMnt != nil {
+ return *m.AutoExtMnt
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtSharing() bool {
+ if m != nil && m.ExtSharing != nil {
+ return *m.ExtSharing
+ }
+ return false
+}
+
+func (m *CriuOpts) GetExtMasters() bool {
+ if m != nil && m.ExtMasters != nil {
+ return *m.ExtMasters
+ }
+ return false
+}
+
+func (m *CriuOpts) GetSkipMnt() []string {
+ if m != nil {
+ return m.SkipMnt
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetEnableFs() []string {
+ if m != nil {
+ return m.EnableFs
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetUnixSkIno() []*UnixSk {
+ if m != nil {
+ return m.UnixSkIno
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetManageCgroupsMode() CriuCgMode {
+ if m != nil && m.ManageCgroupsMode != nil {
+ return *m.ManageCgroupsMode
+ }
+ return CriuCgMode_IGNORE
+}
+
+func (m *CriuOpts) GetGhostLimit() uint32 {
+ if m != nil && m.GhostLimit != nil {
+ return *m.GhostLimit
+ }
+ return Default_CriuOpts_GhostLimit
+}
+
+func (m *CriuOpts) GetIrmapScanPaths() []string {
+ if m != nil {
+ return m.IrmapScanPaths
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetExternal() []string {
+ if m != nil {
+ return m.External
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetEmptyNs() uint32 {
+ if m != nil && m.EmptyNs != nil {
+ return *m.EmptyNs
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetJoinNs() []*JoinNamespace {
+ if m != nil {
+ return m.JoinNs
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetCgroupProps() string {
+ if m != nil && m.CgroupProps != nil {
+ return *m.CgroupProps
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetCgroupPropsFile() string {
+ if m != nil && m.CgroupPropsFile != nil {
+ return *m.CgroupPropsFile
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetCgroupDumpController() []string {
+ if m != nil {
+ return m.CgroupDumpController
+ }
+ return nil
+}
+
+func (m *CriuOpts) GetFreezeCgroup() string {
+ if m != nil && m.FreezeCgroup != nil {
+ return *m.FreezeCgroup
+ }
+ return ""
+}
+
+func (m *CriuOpts) GetTimeout() uint32 {
+ if m != nil && m.Timeout != nil {
+ return *m.Timeout
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetTcpSkipInFlight() bool {
+ if m != nil && m.TcpSkipInFlight != nil {
+ return *m.TcpSkipInFlight
+ }
+ return false
+}
+
+func (m *CriuOpts) GetWeakSysctls() bool {
+ if m != nil && m.WeakSysctls != nil {
+ return *m.WeakSysctls
+ }
+ return false
+}
+
+func (m *CriuOpts) GetLazyPages() bool {
+ if m != nil && m.LazyPages != nil {
+ return *m.LazyPages
+ }
+ return false
+}
+
+func (m *CriuOpts) GetStatusFd() int32 {
+ if m != nil && m.StatusFd != nil {
+ return *m.StatusFd
+ }
+ return 0
+}
+
+func (m *CriuOpts) GetOrphanPtsMaster() bool {
+ if m != nil && m.OrphanPtsMaster != nil {
+ return *m.OrphanPtsMaster
+ }
+ return false
+}
+
+type CriuDumpResp struct {
+ Restored *bool `protobuf:"varint,1,opt,name=restored" json:"restored,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuDumpResp) Reset() { *m = CriuDumpResp{} }
+func (m *CriuDumpResp) String() string { return proto.CompactTextString(m) }
+func (*CriuDumpResp) ProtoMessage() {}
+func (*CriuDumpResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *CriuDumpResp) GetRestored() bool {
+ if m != nil && m.Restored != nil {
+ return *m.Restored
+ }
+ return false
+}
+
+type CriuRestoreResp struct {
+ Pid *int32 `protobuf:"varint,1,req,name=pid" json:"pid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuRestoreResp) Reset() { *m = CriuRestoreResp{} }
+func (m *CriuRestoreResp) String() string { return proto.CompactTextString(m) }
+func (*CriuRestoreResp) ProtoMessage() {}
+func (*CriuRestoreResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *CriuRestoreResp) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+type CriuNotify struct {
+ Script *string `protobuf:"bytes,1,opt,name=script" json:"script,omitempty"`
+ Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuNotify) Reset() { *m = CriuNotify{} }
+func (m *CriuNotify) String() string { return proto.CompactTextString(m) }
+func (*CriuNotify) ProtoMessage() {}
+func (*CriuNotify) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+func (m *CriuNotify) GetScript() string {
+ if m != nil && m.Script != nil {
+ return *m.Script
+ }
+ return ""
+}
+
+func (m *CriuNotify) GetPid() int32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+//
+// List of features which can queried via
+// CRIU_REQ_TYPE__FEATURE_CHECK
+type CriuFeatures struct {
+ MemTrack *bool `protobuf:"varint,1,opt,name=mem_track,json=memTrack" json:"mem_track,omitempty"`
+ LazyPages *bool `protobuf:"varint,2,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuFeatures) Reset() { *m = CriuFeatures{} }
+func (m *CriuFeatures) String() string { return proto.CompactTextString(m) }
+func (*CriuFeatures) ProtoMessage() {}
+func (*CriuFeatures) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *CriuFeatures) GetMemTrack() bool {
+ if m != nil && m.MemTrack != nil {
+ return *m.MemTrack
+ }
+ return false
+}
+
+func (m *CriuFeatures) GetLazyPages() bool {
+ if m != nil && m.LazyPages != nil {
+ return *m.LazyPages
+ }
+ return false
+}
+
+type CriuReq struct {
+ Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
+ Opts *CriuOpts `protobuf:"bytes,2,opt,name=opts" json:"opts,omitempty"`
+ NotifySuccess *bool `protobuf:"varint,3,opt,name=notify_success,json=notifySuccess" json:"notify_success,omitempty"`
+ //
+ // When set service won't close the connection but
+ // will wait for more req-s to appear. Works not
+ // for all request types.
+ KeepOpen *bool `protobuf:"varint,4,opt,name=keep_open,json=keepOpen" json:"keep_open,omitempty"`
+ //
+ // 'features' can be used to query which features
+ // are supported by the installed criu/kernel
+ // via RPC.
+ Features *CriuFeatures `protobuf:"bytes,5,opt,name=features" json:"features,omitempty"`
+ // 'pid' is used for WAIT_PID
+ Pid *uint32 `protobuf:"varint,6,opt,name=pid" json:"pid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuReq) Reset() { *m = CriuReq{} }
+func (m *CriuReq) String() string { return proto.CompactTextString(m) }
+func (*CriuReq) ProtoMessage() {}
+func (*CriuReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *CriuReq) GetType() CriuReqType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return CriuReqType_EMPTY
+}
+
+func (m *CriuReq) GetOpts() *CriuOpts {
+ if m != nil {
+ return m.Opts
+ }
+ return nil
+}
+
+func (m *CriuReq) GetNotifySuccess() bool {
+ if m != nil && m.NotifySuccess != nil {
+ return *m.NotifySuccess
+ }
+ return false
+}
+
+func (m *CriuReq) GetKeepOpen() bool {
+ if m != nil && m.KeepOpen != nil {
+ return *m.KeepOpen
+ }
+ return false
+}
+
+func (m *CriuReq) GetFeatures() *CriuFeatures {
+ if m != nil {
+ return m.Features
+ }
+ return nil
+}
+
+func (m *CriuReq) GetPid() uint32 {
+ if m != nil && m.Pid != nil {
+ return *m.Pid
+ }
+ return 0
+}
+
+type CriuResp struct {
+ Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
+ Success *bool `protobuf:"varint,2,req,name=success" json:"success,omitempty"`
+ Dump *CriuDumpResp `protobuf:"bytes,3,opt,name=dump" json:"dump,omitempty"`
+ Restore *CriuRestoreResp `protobuf:"bytes,4,opt,name=restore" json:"restore,omitempty"`
+ Notify *CriuNotify `protobuf:"bytes,5,opt,name=notify" json:"notify,omitempty"`
+ Ps *CriuPageServerInfo `protobuf:"bytes,6,opt,name=ps" json:"ps,omitempty"`
+ CrErrno *int32 `protobuf:"varint,7,opt,name=cr_errno,json=crErrno" json:"cr_errno,omitempty"`
+ Features *CriuFeatures `protobuf:"bytes,8,opt,name=features" json:"features,omitempty"`
+ CrErrmsg *string `protobuf:"bytes,9,opt,name=cr_errmsg,json=crErrmsg" json:"cr_errmsg,omitempty"`
+ Version *CriuVersion `protobuf:"bytes,10,opt,name=version" json:"version,omitempty"`
+ Status *int32 `protobuf:"varint,11,opt,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuResp) Reset() { *m = CriuResp{} }
+func (m *CriuResp) String() string { return proto.CompactTextString(m) }
+func (*CriuResp) ProtoMessage() {}
+func (*CriuResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *CriuResp) GetType() CriuReqType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return CriuReqType_EMPTY
+}
+
+func (m *CriuResp) GetSuccess() bool {
+ if m != nil && m.Success != nil {
+ return *m.Success
+ }
+ return false
+}
+
+func (m *CriuResp) GetDump() *CriuDumpResp {
+ if m != nil {
+ return m.Dump
+ }
+ return nil
+}
+
+func (m *CriuResp) GetRestore() *CriuRestoreResp {
+ if m != nil {
+ return m.Restore
+ }
+ return nil
+}
+
+func (m *CriuResp) GetNotify() *CriuNotify {
+ if m != nil {
+ return m.Notify
+ }
+ return nil
+}
+
+func (m *CriuResp) GetPs() *CriuPageServerInfo {
+ if m != nil {
+ return m.Ps
+ }
+ return nil
+}
+
+func (m *CriuResp) GetCrErrno() int32 {
+ if m != nil && m.CrErrno != nil {
+ return *m.CrErrno
+ }
+ return 0
+}
+
+func (m *CriuResp) GetFeatures() *CriuFeatures {
+ if m != nil {
+ return m.Features
+ }
+ return nil
+}
+
+func (m *CriuResp) GetCrErrmsg() string {
+ if m != nil && m.CrErrmsg != nil {
+ return *m.CrErrmsg
+ }
+ return ""
+}
+
+func (m *CriuResp) GetVersion() *CriuVersion {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+func (m *CriuResp) GetStatus() int32 {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return 0
+}
+
+// Answer for criu_req_type.VERSION requests
+type CriuVersion struct {
+ Major *int32 `protobuf:"varint,1,req,name=major" json:"major,omitempty"`
+ Minor *int32 `protobuf:"varint,2,req,name=minor" json:"minor,omitempty"`
+ Gitid *string `protobuf:"bytes,3,opt,name=gitid" json:"gitid,omitempty"`
+ Sublevel *int32 `protobuf:"varint,4,opt,name=sublevel" json:"sublevel,omitempty"`
+ Extra *int32 `protobuf:"varint,5,opt,name=extra" json:"extra,omitempty"`
+ Name *string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CriuVersion) Reset() { *m = CriuVersion{} }
+func (m *CriuVersion) String() string { return proto.CompactTextString(m) }
+func (*CriuVersion) ProtoMessage() {}
+func (*CriuVersion) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *CriuVersion) GetMajor() int32 {
+ if m != nil && m.Major != nil {
+ return *m.Major
+ }
+ return 0
+}
+
+func (m *CriuVersion) GetMinor() int32 {
+ if m != nil && m.Minor != nil {
+ return *m.Minor
+ }
+ return 0
+}
+
+func (m *CriuVersion) GetGitid() string {
+ if m != nil && m.Gitid != nil {
+ return *m.Gitid
+ }
+ return ""
+}
+
+func (m *CriuVersion) GetSublevel() int32 {
+ if m != nil && m.Sublevel != nil {
+ return *m.Sublevel
+ }
+ return 0
+}
+
+func (m *CriuVersion) GetExtra() int32 {
+ if m != nil && m.Extra != nil {
+ return *m.Extra
+ }
+ return 0
+}
+
+func (m *CriuVersion) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*CriuPageServerInfo)(nil), "criu_page_server_info")
+ proto.RegisterType((*CriuVethPair)(nil), "criu_veth_pair")
+ proto.RegisterType((*ExtMountMap)(nil), "ext_mount_map")
+ proto.RegisterType((*JoinNamespace)(nil), "join_namespace")
+ proto.RegisterType((*InheritFd)(nil), "inherit_fd")
+ proto.RegisterType((*CgroupRoot)(nil), "cgroup_root")
+ proto.RegisterType((*UnixSk)(nil), "unix_sk")
+ proto.RegisterType((*CriuOpts)(nil), "criu_opts")
+ proto.RegisterType((*CriuDumpResp)(nil), "criu_dump_resp")
+ proto.RegisterType((*CriuRestoreResp)(nil), "criu_restore_resp")
+ proto.RegisterType((*CriuNotify)(nil), "criu_notify")
+ proto.RegisterType((*CriuFeatures)(nil), "criu_features")
+ proto.RegisterType((*CriuReq)(nil), "criu_req")
+ proto.RegisterType((*CriuResp)(nil), "criu_resp")
+ proto.RegisterType((*CriuVersion)(nil), "criu_version")
+ proto.RegisterEnum("CriuCgMode", CriuCgMode_name, CriuCgMode_value)
+ proto.RegisterEnum("CriuReqType", CriuReqType_name, CriuReqType_value)
+}
+
+func init() { proto.RegisterFile("rpc.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 1816 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x5b, 0x73, 0x5b, 0xb7,
+ 0x11, 0x0e, 0x29, 0xf1, 0x06, 0x5e, 0x7c, 0x8c, 0xd8, 0x0e, 0x1c, 0xd7, 0x36, 0x43, 0xc7, 0x89,
+ 0xaa, 0xb8, 0x6c, 0xc2, 0xda, 0x71, 0x9d, 0x69, 0x1f, 0x3c, 0x14, 0xe9, 0xb0, 0x91, 0x44, 0x0e,
+ 0x48, 0xb9, 0x93, 0x27, 0xcc, 0xd1, 0x39, 0x20, 0x05, 0xf3, 0xdc, 0x0a, 0x80, 0x8a, 0xe4, 0x3f,
+ 0xd1, 0xb7, 0xfe, 0xac, 0x4e, 0x7e, 0x52, 0x67, 0x17, 0xa0, 0x2c, 0x25, 0x99, 0x49, 0xdf, 0xb0,
+ 0x1f, 0x76, 0x81, 0xbd, 0xef, 0x92, 0x86, 0x2e, 0xa2, 0x7e, 0xa1, 0x73, 0x9b, 0xf7, 0x56, 0xe4,
+ 0x6e, 0xa4, 0xd5, 0x46, 0x14, 0xe1, 0x4a, 0x0a, 0x23, 0xf5, 0xb9, 0xd4, 0x42, 0x65, 0xcb, 0x9c,
+ 0x32, 0x52, 0x0b, 0xe3, 0x58, 0x4b, 0x63, 0x58, 0xa9, 0x5b, 0xda, 0x6b, 0xf0, 0x2d, 0x49, 0x29,
+ 0xd9, 0x2d, 0x72, 0x6d, 0x59, 0xb9, 0x5b, 0xda, 0xab, 0x70, 0x3c, 0xd3, 0x80, 0xec, 0x14, 0x2a,
+ 0x66, 0x3b, 0x08, 0xc1, 0x91, 0x76, 0x48, 0x79, 0x19, 0xb3, 0x5d, 0x04, 0xca, 0xcb, 0xb8, 0xf7,
+ 0x37, 0xd2, 0xc1, 0x8f, 0xce, 0xa5, 0x3d, 0x13, 0x45, 0xa8, 0x34, 0xfd, 0x98, 0x54, 0xd4, 0x52,
+ 0xa8, 0x8c, 0x95, 0xba, 0xe5, 0xbd, 0x06, 0xdf, 0x55, 0xcb, 0x49, 0x46, 0xef, 0x92, 0xaa, 0x5a,
+ 0x8a, 0x7c, 0x03, 0xcf, 0x03, 0x5a, 0x51, 0xcb, 0xe9, 0xc6, 0xf6, 0xfe, 0x42, 0xda, 0xf2, 0xc2,
+ 0x8a, 0x34, 0xdf, 0x64, 0x56, 0xa4, 0x61, 0x01, 0x1f, 0xae, 0xe5, 0xa5, 0x17, 0x85, 0x23, 0x20,
+ 0xe7, 0x61, 0xe2, 0xc5, 0xe0, 0xd8, 0x7b, 0x4b, 0x3a, 0xef, 0x72, 0x95, 0x89, 0x2c, 0x4c, 0xa5,
+ 0x29, 0xc2, 0x48, 0x82, 0x52, 0x99, 0xf1, 0x42, 0xe5, 0xcc, 0xd0, 0x4f, 0x48, 0x2d, 0x33, 0x62,
+ 0xa9, 0x12, 0xe9, 0xe5, 0xaa, 0x99, 0x19, 0xab, 0x44, 0xd2, 0x07, 0xa4, 0x21, 0x2f, 0xac, 0x0e,
+ 0x45, 0x5e, 0x58, 0xb4, 0xaa, 0xc1, 0xeb, 0x08, 0x4c, 0x0b, 0xdb, 0xeb, 0x13, 0xa2, 0xb2, 0x33,
+ 0xa9, 0x95, 0x15, 0xcb, 0xf8, 0x37, 0x34, 0x71, 0xa6, 0xc3, 0x83, 0xce, 0xf4, 0x17, 0xa4, 0x19,
+ 0xad, 0x74, 0xbe, 0x29, 0x84, 0xce, 0x73, 0x0b, 0xfe, 0x8b, 0xac, 0x4e, 0xbc, 0x5b, 0xf1, 0x8c,
+ 0x3e, 0x0d, 0xed, 0x99, 0xd7, 0x02, 0xcf, 0xbd, 0xc7, 0xa4, 0xb6, 0xc9, 0xd4, 0x85, 0x30, 0x6b,
+ 0x7a, 0x87, 0x54, 0x54, 0x96, 0xc7, 0x12, 0x7f, 0x69, 0x73, 0x47, 0xf4, 0xfe, 0xdb, 0x26, 0x0d,
+ 0xf4, 0x69, 0x5e, 0x58, 0x43, 0x7b, 0xa4, 0xad, 0xd2, 0x70, 0x25, 0x8d, 0x88, 0x95, 0x16, 0xcb,
+ 0x18, 0x79, 0x2b, 0xbc, 0xe9, 0xc0, 0x03, 0xa5, 0xc7, 0xf1, 0x36, 0x4c, 0xe5, 0x0f, 0x61, 0x7a,
+ 0x42, 0xda, 0x89, 0x0c, 0xcf, 0xa5, 0xd0, 0x9b, 0x2c, 0x53, 0xd9, 0x0a, 0x8d, 0xad, 0xf3, 0x16,
+ 0x82, 0xdc, 0x61, 0xf4, 0x11, 0x69, 0x82, 0xf7, 0xbd, 0x36, 0x18, 0xd4, 0x3a, 0x07, 0x07, 0x9d,
+ 0x64, 0xea, 0x62, 0xbe, 0xa6, 0x5f, 0x92, 0x5b, 0x36, 0x2a, 0x84, 0x34, 0x36, 0x3c, 0x4d, 0x94,
+ 0x39, 0x93, 0x31, 0xab, 0x20, 0x4f, 0xc7, 0x46, 0xc5, 0xe8, 0x03, 0x0a, 0x8c, 0xf2, 0x3c, 0x34,
+ 0xea, 0x5c, 0x8a, 0x58, 0x9e, 0xab, 0x48, 0x1a, 0x56, 0x75, 0x8c, 0x1e, 0x3e, 0x70, 0x28, 0xf8,
+ 0xdf, 0x9c, 0xc9, 0x24, 0x11, 0xef, 0xf2, 0x53, 0x56, 0x43, 0x96, 0x3a, 0x02, 0xff, 0xc8, 0x4f,
+ 0xe9, 0x43, 0x42, 0x20, 0x64, 0x22, 0xc9, 0xa3, 0xb5, 0x61, 0x75, 0xa7, 0x0d, 0x20, 0x87, 0x00,
+ 0xd0, 0x47, 0xa4, 0x91, 0xe4, 0x2b, 0x91, 0xc8, 0x73, 0x99, 0xb0, 0x06, 0x98, 0xfa, 0x5d, 0x69,
+ 0xc0, 0xeb, 0x49, 0xbe, 0x3a, 0x04, 0x88, 0xde, 0x27, 0x70, 0x76, 0x51, 0x27, 0x2e, 0xb5, 0x93,
+ 0x7c, 0x85, 0x61, 0xff, 0x82, 0x94, 0x0b, 0xc3, 0x9a, 0xdd, 0xd2, 0x5e, 0x73, 0x70, 0xaf, 0xff,
+ 0x9b, 0x85, 0xc1, 0xcb, 0x85, 0xa1, 0x4f, 0x49, 0x27, 0xcb, 0xad, 0x5a, 0x5e, 0x0a, 0x13, 0x69,
+ 0x55, 0x58, 0xc3, 0x5a, 0xa8, 0x45, 0xdb, 0xa1, 0x73, 0x07, 0x42, 0x54, 0x21, 0xe2, 0xac, 0xed,
+ 0x22, 0x8d, 0xd1, 0x7f, 0x48, 0x48, 0x11, 0x6a, 0x99, 0x59, 0xa1, 0xd2, 0x15, 0xeb, 0xe0, 0x4d,
+ 0xc3, 0x21, 0x93, 0x74, 0x05, 0x86, 0x5b, 0x1d, 0x46, 0x6b, 0x91, 0xca, 0x94, 0xdd, 0x72, 0x86,
+ 0x23, 0x70, 0x24, 0x53, 0x90, 0x0d, 0x37, 0x36, 0x17, 0xb1, 0x8c, 0x37, 0x05, 0x0b, 0x9c, 0xe1,
+ 0x80, 0x1c, 0x00, 0x00, 0x61, 0xfa, 0x29, 0xd7, 0xeb, 0x6d, 0xfc, 0x6f, 0x63, 0x94, 0x1b, 0x00,
+ 0xb9, 0xe8, 0x3f, 0x24, 0x24, 0x51, 0xd9, 0x5a, 0x68, 0x99, 0x86, 0x05, 0xa3, 0x4e, 0x1c, 0x10,
+ 0x0e, 0x00, 0x7d, 0x4a, 0x2a, 0x50, 0x9c, 0x86, 0x7d, 0xdc, 0xdd, 0xd9, 0x6b, 0x0e, 0x6e, 0xf5,
+ 0x6f, 0xd6, 0x2b, 0x77, 0xb7, 0xf4, 0x09, 0xa9, 0x45, 0xc5, 0x46, 0x44, 0x61, 0xc1, 0xee, 0x74,
+ 0x4b, 0x7b, 0xed, 0xef, 0xc8, 0xf3, 0xc1, 0xab, 0xe7, 0xaf, 0xbe, 0x7d, 0x39, 0x78, 0xf5, 0x82,
+ 0x57, 0xa3, 0x62, 0x33, 0x0c, 0x0b, 0xfa, 0x98, 0x34, 0x97, 0xb9, 0x8e, 0xa4, 0x50, 0x1a, 0xfe,
+ 0xba, 0x8b, 0x7f, 0x11, 0x84, 0x26, 0x80, 0x40, 0x10, 0xe4, 0x85, 0x8c, 0x44, 0x94, 0xc6, 0xec,
+ 0x5e, 0x77, 0x07, 0x82, 0x00, 0xf4, 0x30, 0x85, 0x24, 0xa9, 0x61, 0xad, 0x67, 0x96, 0x7d, 0x82,
+ 0x9a, 0x74, 0xfa, 0x37, 0x6a, 0x9f, 0x57, 0xe5, 0x85, 0x3d, 0xca, 0x2c, 0x44, 0x21, 0x0d, 0x33,
+ 0x88, 0x8f, 0x2b, 0x2f, 0xc3, 0x98, 0x8b, 0x82, 0x43, 0x87, 0x0e, 0xa4, 0x4f, 0x49, 0x2d, 0x5a,
+ 0x61, 0xe9, 0xb1, 0xfb, 0xf8, 0x5e, 0xab, 0x7f, 0xad, 0x1c, 0x79, 0x35, 0x5a, 0x71, 0x08, 0xcc,
+ 0x63, 0xd2, 0xd4, 0xc6, 0x0a, 0xa3, 0x4e, 0x13, 0xa8, 0x83, 0x4f, 0x9d, 0xca, 0xda, 0xd8, 0xb9,
+ 0x43, 0xe8, 0xfe, 0xf5, 0xb2, 0x67, 0x0f, 0xf0, 0xa9, 0x66, 0xff, 0x03, 0xc4, 0x1b, 0xfe, 0x3c,
+ 0x8e, 0x69, 0x97, 0xb4, 0x30, 0x52, 0x5b, 0x43, 0xfe, 0xe0, 0x5e, 0x03, 0x6c, 0xe4, 0x94, 0x7f,
+ 0xec, 0x6a, 0xca, 0x9c, 0x85, 0x1a, 0xbe, 0x7b, 0xe8, 0x18, 0xe4, 0x85, 0x9d, 0x3b, 0x64, 0xcb,
+ 0x90, 0x86, 0xc6, 0x4a, 0x6d, 0xd8, 0xa3, 0x2b, 0x86, 0x23, 0x87, 0x80, 0x0b, 0xcd, 0x5a, 0x15,
+ 0xf8, 0xfe, 0x63, 0xe7, 0x42, 0xa0, 0xe1, 0x71, 0x68, 0x5f, 0x59, 0x78, 0x9a, 0x48, 0xb1, 0x34,
+ 0xac, 0x8b, 0x77, 0x75, 0x07, 0x8c, 0x0d, 0xdd, 0x23, 0x4d, 0x5f, 0xc9, 0x42, 0x65, 0x39, 0xfb,
+ 0x0c, 0x0d, 0xa9, 0xf7, 0x3d, 0xc6, 0x1b, 0x1b, 0x2c, 0xea, 0x49, 0x96, 0xd3, 0xbf, 0x93, 0x8f,
+ 0x6f, 0x3a, 0x58, 0xa4, 0xd0, 0x84, 0x7a, 0xdd, 0xd2, 0x5e, 0x67, 0xd0, 0x76, 0xf9, 0x11, 0xad,
+ 0x10, 0xe4, 0xb7, 0x6f, 0x38, 0xfd, 0x28, 0x8f, 0x25, 0x7c, 0xb4, 0x3a, 0xcb, 0x8d, 0x15, 0x89,
+ 0x4a, 0x95, 0x65, 0x4f, 0x30, 0x5b, 0x6a, 0xdf, 0x7c, 0xfd, 0xfc, 0xaf, 0x2f, 0x5e, 0x7e, 0xcb,
+ 0x09, 0xde, 0x1d, 0xc2, 0x15, 0xdd, 0x23, 0x01, 0x26, 0x8a, 0x30, 0x51, 0x98, 0x09, 0xe8, 0x7e,
+ 0x86, 0x7d, 0x8e, 0x6a, 0x77, 0x10, 0x9f, 0x47, 0x61, 0x36, 0x03, 0x94, 0x7e, 0x0a, 0x79, 0x63,
+ 0xa5, 0xce, 0xc2, 0x84, 0x3d, 0xf5, 0x86, 0x79, 0x1a, 0x73, 0x2a, 0x2d, 0xec, 0xa5, 0xc8, 0x0c,
+ 0xfb, 0x02, 0x3e, 0xe3, 0x35, 0xa4, 0x8f, 0xc1, 0xe6, 0x9a, 0x1b, 0x05, 0x86, 0x7d, 0xe9, 0xb3,
+ 0xfb, 0xe6, 0x68, 0xe0, 0x55, 0xa0, 0x8f, 0x0d, 0xfd, 0x8c, 0xb4, 0x7c, 0x76, 0x14, 0x3a, 0x2f,
+ 0x0c, 0xfb, 0x23, 0x56, 0xa8, 0x6f, 0xe0, 0x33, 0x80, 0xe8, 0x3e, 0xb9, 0x7d, 0x9d, 0xc5, 0x75,
+ 0x92, 0x7d, 0xe4, 0xbb, 0x75, 0x8d, 0x0f, 0x3b, 0xca, 0x73, 0x72, 0xcf, 0xf3, 0xc6, 0x9b, 0xb4,
+ 0x10, 0x51, 0x9e, 0x59, 0x9d, 0x27, 0x89, 0xd4, 0xec, 0x2b, 0xd4, 0xfe, 0x8e, 0xbb, 0x3d, 0xd8,
+ 0xa4, 0xc5, 0xf0, 0xea, 0x0e, 0xba, 0xf2, 0x52, 0x4b, 0xf9, 0x7e, 0xeb, 0x78, 0xf6, 0x0c, 0x5f,
+ 0x6f, 0x39, 0xd0, 0xf9, 0x18, 0x26, 0xb4, 0x55, 0xa9, 0x84, 0x59, 0xf9, 0x27, 0x67, 0xad, 0x27,
+ 0xe9, 0x57, 0x84, 0x42, 0x3f, 0xc6, 0xec, 0x50, 0x99, 0x58, 0x26, 0x6a, 0x75, 0x66, 0x59, 0x1f,
+ 0x33, 0x08, 0x3a, 0xf5, 0x7c, 0xad, 0x8a, 0x49, 0x36, 0x46, 0x18, 0x0c, 0xfe, 0x49, 0x86, 0x6b,
+ 0x61, 0x2e, 0x4d, 0x64, 0x13, 0xc3, 0xfe, 0x8c, 0x6c, 0x4d, 0xc0, 0xe6, 0x0e, 0xc2, 0xc6, 0x11,
+ 0xbe, 0xbf, 0xc4, 0x5e, 0x68, 0xd8, 0xd7, 0xbe, 0x71, 0x84, 0xef, 0x2f, 0x67, 0x00, 0x60, 0xb3,
+ 0xb6, 0xa1, 0xdd, 0x18, 0xa8, 0x8b, 0x6f, 0xb0, 0xeb, 0xd4, 0x1d, 0x30, 0x8e, 0xc1, 0x59, 0xb9,
+ 0x2e, 0xce, 0x20, 0xac, 0xd6, 0xf8, 0x6c, 0x66, 0x03, 0xa7, 0x8a, 0xbb, 0x98, 0x59, 0xe3, 0x52,
+ 0xba, 0xf7, 0xcc, 0xef, 0x08, 0xe8, 0x2a, 0x2d, 0x4d, 0x01, 0xe1, 0xd6, 0xd2, 0xd8, 0x5c, 0xcb,
+ 0x18, 0xe7, 0x65, 0x9d, 0x5f, 0xd1, 0xbd, 0xa7, 0xe4, 0x36, 0x72, 0x7b, 0xc0, 0x09, 0xf8, 0x09,
+ 0xe7, 0x66, 0x1f, 0x1c, 0x7b, 0x2f, 0x49, 0x13, 0xd9, 0x5c, 0x6b, 0xa6, 0xf7, 0x48, 0xd5, 0xf5,
+ 0x6c, 0x3f, 0x7f, 0x3d, 0xf5, 0xeb, 0xd1, 0xd8, 0xfb, 0x81, 0xb4, 0x51, 0x70, 0x29, 0x43, 0xbb,
+ 0xd1, 0xce, 0xce, 0x54, 0xa6, 0x02, 0xdb, 0xf1, 0x56, 0x9b, 0x54, 0xa6, 0x0b, 0xa0, 0x7f, 0xe1,
+ 0xa3, 0xf2, 0x2f, 0x7c, 0xd4, 0xfb, 0xb9, 0x44, 0xea, 0x5e, 0xdb, 0x7f, 0xd1, 0x1e, 0xd9, 0xb5,
+ 0x97, 0x85, 0x9b, 0xe6, 0x9d, 0x41, 0xa7, 0xbf, 0xbd, 0x10, 0x80, 0x72, 0xbc, 0xa3, 0x8f, 0xc8,
+ 0x2e, 0x8c, 0x75, 0x7c, 0xa9, 0x39, 0x20, 0xfd, 0xab, 0x41, 0xcf, 0x11, 0xbf, 0x3e, 0x82, 0x36,
+ 0x51, 0x04, 0x6b, 0xda, 0xce, 0x8d, 0x11, 0xe4, 0x40, 0xd0, 0x79, 0x2d, 0x65, 0x21, 0xf2, 0x42,
+ 0x66, 0x7e, 0x70, 0xd7, 0x01, 0x98, 0x16, 0x32, 0xa3, 0xfb, 0xa4, 0xbe, 0x35, 0x0e, 0x07, 0x76,
+ 0x73, 0xab, 0xcb, 0x16, 0xe5, 0x57, 0xf7, 0x5b, 0xff, 0x54, 0x31, 0xd3, 0xd0, 0x3f, 0xff, 0xde,
+ 0xf1, 0xeb, 0x07, 0x3a, 0xfe, 0xff, 0xb1, 0x89, 0x91, 0xda, 0x56, 0x59, 0x58, 0x74, 0xea, 0x7c,
+ 0x4b, 0xd2, 0x27, 0x64, 0x17, 0x82, 0x8e, 0x36, 0x5c, 0x8d, 0x9e, 0xab, 0x34, 0xe0, 0x78, 0x49,
+ 0x9f, 0x91, 0x9a, 0x8f, 0x35, 0x5a, 0xd2, 0x1c, 0xd0, 0xfe, 0xaf, 0x12, 0x80, 0x6f, 0x59, 0xe8,
+ 0xe7, 0xa4, 0xea, 0x5c, 0xe1, 0x4d, 0x6b, 0xf5, 0xaf, 0xa5, 0x01, 0xf7, 0x77, 0x7e, 0xe2, 0x57,
+ 0x7f, 0x77, 0xe2, 0xdf, 0x87, 0xf0, 0x09, 0xa9, 0x75, 0x96, 0xe3, 0x3e, 0x52, 0xe1, 0xb5, 0x48,
+ 0x8f, 0x80, 0xbc, 0xe1, 0xc5, 0xfa, 0xef, 0x78, 0xf1, 0x01, 0xb8, 0x0c, 0x9e, 0x49, 0xcd, 0x0a,
+ 0x77, 0x93, 0x06, 0xaf, 0xe3, 0x3b, 0xa9, 0x59, 0xc1, 0xe0, 0x3b, 0x97, 0xda, 0xa8, 0x3c, 0xc3,
+ 0xbd, 0xa4, 0xb9, 0x6d, 0xb1, 0x1e, 0xe4, 0xdb, 0x5b, 0xcc, 0x61, 0xac, 0x2f, 0x5c, 0x55, 0x2a,
+ 0xdc, 0x53, 0xbd, 0xff, 0x94, 0x48, 0xeb, 0xba, 0x04, 0xec, 0x8d, 0x69, 0xf8, 0x2e, 0xd7, 0xbe,
+ 0x1e, 0x1c, 0x81, 0xa8, 0xca, 0x72, 0xed, 0x57, 0x54, 0x47, 0x00, 0xba, 0x52, 0xd6, 0x2f, 0xf1,
+ 0x0d, 0xee, 0x08, 0x28, 0x40, 0xb3, 0x39, 0x75, 0xbb, 0xd4, 0xae, 0x2f, 0x6d, 0x4f, 0x83, 0x04,
+ 0xee, 0xc4, 0xe8, 0xe0, 0x0a, 0x77, 0x04, 0x2c, 0x3d, 0xd0, 0x55, 0xd1, 0xa7, 0x0d, 0x8e, 0xe7,
+ 0x7d, 0xe1, 0xf5, 0xf2, 0xc3, 0x82, 0x12, 0x52, 0x9d, 0xbc, 0x39, 0x9e, 0xf2, 0x51, 0xf0, 0x11,
+ 0x6d, 0x92, 0xda, 0xf0, 0x8d, 0x38, 0x9e, 0x1e, 0x8f, 0x82, 0x12, 0x6d, 0x90, 0xca, 0x8c, 0x4f,
+ 0x67, 0xf3, 0xa0, 0x4c, 0xeb, 0x64, 0x77, 0x3e, 0x1d, 0x2f, 0x82, 0x1d, 0x38, 0x8d, 0x4f, 0x0e,
+ 0x0f, 0x83, 0x5d, 0x90, 0x9b, 0x2f, 0xf8, 0x64, 0xb8, 0x08, 0x2a, 0x20, 0x77, 0x30, 0x1a, 0xbf,
+ 0x3e, 0x39, 0x5c, 0x04, 0xd5, 0xfd, 0x9f, 0x4b, 0xbe, 0x58, 0xb7, 0x19, 0x07, 0x2f, 0x8d, 0x8e,
+ 0x66, 0x8b, 0x1f, 0x83, 0x8f, 0x40, 0xfe, 0xe0, 0xe4, 0x68, 0x16, 0x94, 0x40, 0x86, 0x8f, 0xe6,
+ 0x0b, 0xf8, 0xb8, 0x0c, 0x1c, 0xc3, 0xef, 0x47, 0xc3, 0x1f, 0x82, 0x1d, 0xda, 0x22, 0xf5, 0x19,
+ 0x1f, 0x09, 0xe4, 0xda, 0xa5, 0xb7, 0x48, 0x73, 0xf6, 0xfa, 0xcd, 0x48, 0xcc, 0x47, 0xfc, 0xed,
+ 0x88, 0x07, 0x15, 0xf8, 0xf6, 0x78, 0xba, 0x98, 0x8c, 0x7f, 0x0c, 0xaa, 0x34, 0x20, 0xad, 0xe1,
+ 0xec, 0x64, 0x72, 0x3c, 0x9e, 0x3a, 0xf6, 0x1a, 0xbd, 0x4d, 0xda, 0x5b, 0xc4, 0xbd, 0x57, 0x07,
+ 0x68, 0x3c, 0x7a, 0xbd, 0x38, 0xe1, 0x23, 0x0f, 0x35, 0xe0, 0xeb, 0xb7, 0x23, 0x3e, 0x9f, 0x4c,
+ 0x8f, 0x03, 0x02, 0xff, 0xfd, 0xf3, 0xf5, 0x64, 0x21, 0x66, 0x93, 0x83, 0xa0, 0x49, 0xef, 0x90,
+ 0xe0, 0xda, 0x7f, 0x62, 0xf8, 0xfd, 0xe1, 0x41, 0xd0, 0xfa, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0x16, 0xc7, 0x59, 0xf5, 0xa5, 0x0d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/test/main.go b/vendor/github.com/checkpoint-restore/go-criu/test/main.go
new file mode 100644
index 000000000..418ebb843
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/test/main.go
@@ -0,0 +1,133 @@
+package main
+
+import (
+ "fmt"
+ "github.com/checkpoint-restore/go-criu"
+ "github.com/checkpoint-restore/go-criu/rpc"
+ "github.com/golang/protobuf/proto"
+ "os"
+ "strconv"
+)
+
+// TestNfy struct
+type TestNfy struct {
+ criu.NoNotify
+}
+
+// PreDump test function
+func (c TestNfy) PreDump() error {
+ fmt.Printf("TEST PRE DUMP\n")
+ return nil
+}
+
+func doDump(c *criu.Criu, pidS string, imgDir string, pre bool, prevImg string) error {
+ fmt.Printf("Dumping\n")
+ pid, _ := strconv.Atoi(pidS)
+ img, err := os.Open(imgDir)
+ if err != nil {
+ return fmt.Errorf("can't open image dir (%s)", err)
+ }
+ defer img.Close()
+
+ opts := rpc.CriuOpts{
+ Pid: proto.Int32(int32(pid)),
+ ImagesDirFd: proto.Int32(int32(img.Fd())),
+ LogLevel: proto.Int32(4),
+ LogFile: proto.String("dump.log"),
+ }
+
+ if prevImg != "" {
+ opts.ParentImg = proto.String(prevImg)
+ opts.TrackMem = proto.Bool(true)
+ }
+
+ if pre {
+ err = c.PreDump(opts, TestNfy{})
+ } else {
+ err = c.Dump(opts, TestNfy{})
+ }
+ if err != nil {
+ return fmt.Errorf("dump fail (%s)", err)
+ }
+
+ return nil
+}
+
+// Usage: test $act $pid $images_dir
+func main() {
+ c := criu.MakeCriu()
+ // Read out CRIU version
+ version, err := c.GetCriuVersion()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ fmt.Println("CRIU version", version)
+ // Check if version at least 3.2
+ result, err := c.IsCriuAtLeast(30200)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ if !result {
+ fmt.Println("CRIU too old")
+ os.Exit(1)
+ }
+ act := os.Args[1]
+ switch act {
+ case "dump":
+ err := doDump(c, os.Args[2], os.Args[3], false, "")
+ if err != nil {
+ fmt.Print(err)
+ os.Exit(1)
+ }
+ case "dump2":
+ err := c.Prepare()
+ if err != nil {
+ fmt.Print(err)
+ os.Exit(1)
+ }
+
+ err = doDump(c, os.Args[2], os.Args[3]+"/pre", true, "")
+ if err != nil {
+ fmt.Printf("pre-dump failed")
+ fmt.Print(err)
+ os.Exit(1)
+ }
+ err = doDump(c, os.Args[2], os.Args[3], false, "./pre")
+ if err != nil {
+ fmt.Printf("dump failed")
+ fmt.Print(err)
+ os.Exit(1)
+ }
+
+ c.Cleanup()
+ case "restore":
+ fmt.Printf("Restoring\n")
+ img, err := os.Open(os.Args[2])
+ if err != nil {
+ fmt.Printf("can't open image dir")
+ os.Exit(1)
+ }
+ defer img.Close()
+
+ opts := rpc.CriuOpts{
+ ImagesDirFd: proto.Int32(int32(img.Fd())),
+ LogLevel: proto.Int32(4),
+ LogFile: proto.String("restore.log"),
+ }
+
+ err = c.Restore(opts, nil)
+ if err != nil {
+ fmt.Printf("Error:")
+ fmt.Print(err)
+ fmt.Printf("\n")
+ os.Exit(1)
+ }
+ default:
+ fmt.Printf("unknown action\n")
+ os.Exit(1)
+ }
+
+ fmt.Printf("Success\n")
+}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/test/phaul-main.go b/vendor/github.com/checkpoint-restore/go-criu/test/phaul-main.go
new file mode 100644
index 000000000..f1bec2c55
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/test/phaul-main.go
@@ -0,0 +1,192 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/checkpoint-restore/go-criu"
+ "github.com/checkpoint-restore/go-criu/phaul"
+ "github.com/checkpoint-restore/go-criu/rpc"
+ "github.com/golang/protobuf/proto"
+)
+
+type testLocal struct {
+ criu.NoNotify
+ r *testRemote
+}
+
+type testRemote struct {
+ srv *phaul.Server
+}
+
+/* Dir where test will put dump images */
+const imagesDir = "image"
+
+func prepareImages() error {
+ err := os.Mkdir(imagesDir, 0700)
+ if err != nil {
+ return err
+ }
+
+ /* Work dir for PhaulClient */
+ err = os.Mkdir(imagesDir+"/local", 0700)
+ if err != nil {
+ return err
+ }
+
+ /* Work dir for PhaulServer */
+ err = os.Mkdir(imagesDir+"/remote", 0700)
+ if err != nil {
+ return err
+ }
+
+ /* Work dir for DumpCopyRestore */
+ err = os.Mkdir(imagesDir+"/test", 0700)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func mergeImages(dumpDir, lastPreDumpDir string) error {
+ idir, err := os.Open(dumpDir)
+ if err != nil {
+ return err
+ }
+
+ defer idir.Close()
+
+ imgs, err := idir.Readdirnames(0)
+ if err != nil {
+ return err
+ }
+
+ for _, fname := range imgs {
+ if !strings.HasSuffix(fname, ".img") {
+ continue
+ }
+
+ fmt.Printf("\t%s -> %s/\n", fname, lastPreDumpDir)
+ err = syscall.Link(dumpDir+"/"+fname, lastPreDumpDir+"/"+fname)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *testRemote) doRestore() error {
+ lastSrvImagesDir := r.srv.LastImagesDir()
+ /*
+ * In imagesDir we have images from dump, in the
+ * lastSrvImagesDir -- where server-side images
+ * (from page server, with pages and pagemaps) are.
+ * Need to put former into latter and restore from
+ * them.
+ */
+ err := mergeImages(imagesDir+"/test", lastSrvImagesDir)
+ if err != nil {
+ return err
+ }
+
+ imgDir, err := os.Open(lastSrvImagesDir)
+ if err != nil {
+ return err
+ }
+ defer imgDir.Close()
+
+ opts := rpc.CriuOpts{
+ LogLevel: proto.Int32(4),
+ LogFile: proto.String("restore.log"),
+ ImagesDirFd: proto.Int32(int32(imgDir.Fd())),
+ }
+
+ cr := r.srv.GetCriu()
+ fmt.Printf("Do restore\n")
+ return cr.Restore(opts, nil)
+}
+
+func (l *testLocal) PostDump() error {
+ return l.r.doRestore()
+}
+
+func (l *testLocal) DumpCopyRestore(cr *criu.Criu, cfg phaul.Config, lastClnImagesDir string) error {
+ fmt.Printf("Final stage\n")
+
+ imgDir, err := os.Open(imagesDir + "/test")
+ if err != nil {
+ return err
+ }
+ defer imgDir.Close()
+
+ psi := rpc.CriuPageServerInfo{
+ Fd: proto.Int32(int32(cfg.Memfd)),
+ }
+
+ opts := rpc.CriuOpts{
+ Pid: proto.Int32(int32(cfg.Pid)),
+ LogLevel: proto.Int32(4),
+ LogFile: proto.String("dump.log"),
+ ImagesDirFd: proto.Int32(int32(imgDir.Fd())),
+ TrackMem: proto.Bool(true),
+ ParentImg: proto.String(lastClnImagesDir),
+ Ps: &psi,
+ }
+
+ fmt.Printf("Do dump\n")
+ return cr.Dump(opts, l)
+}
+
+func main() {
+ pid, _ := strconv.Atoi(os.Args[1])
+ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0)
+ if err != nil {
+ fmt.Printf("Can't make socketpair: %v\n", err)
+ os.Exit(1)
+ }
+
+ err = prepareImages()
+ if err != nil {
+ fmt.Printf("Can't prepare dirs for images: %v\n", err)
+ os.Exit(1)
+ return
+ }
+
+ fmt.Printf("Make server part (socket %d)\n", fds[1])
+ srv, err := phaul.MakePhaulServer(phaul.Config{
+ Pid: pid,
+ Memfd: fds[1],
+ Wdir: imagesDir + "/remote"})
+ if err != nil {
+ fmt.Printf("Unable to run a server: %v", err)
+ os.Exit(1)
+ return
+ }
+
+ r := &testRemote{srv}
+
+ fmt.Printf("Make client part (socket %d)\n", fds[0])
+ cln, err := phaul.MakePhaulClient(&testLocal{r: r}, srv,
+ phaul.Config{
+ Pid: pid,
+ Memfd: fds[0],
+ Wdir: imagesDir + "/local"})
+ if err != nil {
+ fmt.Printf("Unable to run a client: %v\n", err)
+ os.Exit(1)
+ }
+
+ fmt.Printf("Migrate\n")
+ err = cln.Migrate()
+ if err != nil {
+ fmt.Printf("Failed: %v\n", err)
+ os.Exit(1)
+ }
+
+ fmt.Printf("SUCCESS!\n")
+}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/test/piggie.c b/vendor/github.com/checkpoint-restore/go-criu/test/piggie.c
new file mode 100644
index 000000000..1dc0801c0
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/test/piggie.c
@@ -0,0 +1,57 @@
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <sched.h>
+
+#define STKS (4*4096)
+
+#ifndef CLONE_NEWPID
+#define CLONE_NEWPID 0x20000000
+#endif
+
+static int do_test(void *logf)
+{
+ int fd, i = 0;
+
+ setsid();
+
+ close(0);
+ close(1);
+ close(2);
+
+ fd = open("/dev/null", O_RDONLY);
+ if (fd != 0) {
+ dup2(fd, 0);
+ close(fd);
+ }
+
+ fd = open(logf, O_WRONLY | O_TRUNC | O_CREAT, 0600);
+ dup2(fd, 1);
+ dup2(fd, 2);
+ if (fd != 1 && fd != 2)
+ close(fd);
+
+ while (1) {
+ sleep(1);
+ printf("%d\n", i++);
+ fflush(stdout);
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int pid;
+ void *stk;
+
+ stk = mmap(NULL, STKS, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN, 0, 0);
+ pid = clone(do_test, stk + STKS, SIGCHLD | CLONE_NEWPID, argv[1]);
+ printf("Child forked, pid %d\n", pid);
+
+ return 0;
+}
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index f87ea15be..0a125331d 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -133,6 +133,20 @@ func copyContainer(c *Container) *Container {
}
}
+func (c *Container) MountLabel() string {
+ if label, ok := c.Flags["MountLabel"].(string); ok {
+ return label
+ }
+ return ""
+}
+
+func (c *Container) ProcessLabel() string {
+ if label, ok := c.Flags["ProcessLabel"].(string); ok {
+ return label
+ }
+ return ""
+}
+
func (r *containerStore) Containers() ([]Container, error) {
containers := make([]Container, len(r.containers))
for i := range r.containers {
@@ -297,7 +311,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: time.Now().UTC(),
- Flags: make(map[string]interface{}),
+ Flags: copyStringInterfaceMap(options.Flags),
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index bee4a598e..f14ba24b9 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -416,7 +416,7 @@ func atomicRemove(source string) error {
// Get returns the rootfs path for the id.
// This will mount the dir at its given path
-func (a *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
+func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
a.locker.Lock(id)
defer a.locker.Unlock(id)
parents, err := a.getParentLayerPaths(id)
@@ -441,7 +441,7 @@ func (a *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (s
// If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data
if len(parents) > 0 {
- if err := a.mount(id, m, mountLabel, parents); err != nil {
+ if err := a.mount(id, m, options.MountLabel, parents); err != nil {
return "", err
}
}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index 2dd81b0c0..adc34d209 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -634,7 +634,7 @@ func (d *Driver) Remove(id string) error {
}
// Get the requested filesystem id.
-func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
dir := d.subvolumesDirID(id)
st, err := os.Stat(dir)
if err != nil {
diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go
index 168bb7e34..4d4011ee0 100644
--- a/vendor/github.com/containers/storage/drivers/chown.go
+++ b/vendor/github.com/containers/storage/drivers/chown.go
@@ -114,7 +114,10 @@ func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater {
// same "container" IDs.
func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
driver := n.ProtoDriver
- layerFs, err := driver.Get(id, mountLabel, nil, nil)
+ options := MountOpts{
+ MountLabel: mountLabel,
+ }
+ layerFs, err := driver.Get(id, options)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index 4aaca6508..9fc082d7d 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -163,7 +163,7 @@ func (d *Driver) Remove(id string) error {
}
// Get mounts a device with given id into the root filesystem
-func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
d.locker.Lock(id)
defer d.locker.Unlock(id)
mp := path.Join(d.home, "mnt", id)
@@ -189,7 +189,7 @@ func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (s
}
// Mount the device
- if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
+ if err := d.DeviceSet.MountDevice(id, mp, options.MountLabel); err != nil {
d.ctr.Decrement(mp)
return "", err
}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index 40b911ab7..4569c7b59 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -42,6 +42,15 @@ type CreateOpts struct {
StorageOpt map[string]string
}
+// MountOpts contains optional arguments for LayerStope.Mount() methods.
+type MountOpts struct {
+ // Mount label is the MAC Labels to assign to mount point (SELINUX)
+ MountLabel string
+ // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
+ UidMaps []idtools.IDMap
+ GidMaps []idtools.IDMap
+}
+
// InitFunc initializes the storage driver.
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
@@ -68,7 +77,7 @@ type ProtoDriver interface {
// to by this id. You can optionally specify a mountLabel or "".
// Optionally it gets the mappings used to create the layer.
// Returns the absolute path to the mounted layered filesystem.
- Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (dir string, err error)
+ Get(id string, options MountOpts) (dir string, err error)
// Put releases the system resources for the specified id,
// e.g, unmounting layered filesystem.
Put(id string) error
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index 64541e269..19da7d101 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -51,7 +51,10 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
parentMappings = &idtools.IDMappings{}
}
- layerFs, err := driver.Get(id, mountLabel, nil, nil)
+ options := MountOpts{
+ MountLabel: mountLabel,
+ }
+ layerFs, err := driver.Get(id, options)
if err != nil {
return nil, err
}
@@ -78,7 +81,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
}), nil
}
- parentFs, err := driver.Get(parent, mountLabel, nil, nil)
+ parentFs, err := driver.Get(parent, options)
if err != nil {
return nil, err
}
@@ -119,7 +122,10 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
parentMappings = &idtools.IDMappings{}
}
- layerFs, err := driver.Get(id, mountLabel, nil, nil)
+ options := MountOpts{
+ MountLabel: mountLabel,
+ }
+ layerFs, err := driver.Get(id, options)
if err != nil {
return nil, err
}
@@ -128,7 +134,10 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
parentFs := ""
if parent != "" {
- parentFs, err = driver.Get(parent, mountLabel, nil, nil)
+ options := MountOpts{
+ MountLabel: mountLabel,
+ }
+ parentFs, err = driver.Get(parent, options)
if err != nil {
return nil, err
}
@@ -149,7 +158,10 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin
}
// Mount the root filesystem so we can apply the diff/layer.
- layerFs, err := driver.Get(id, mountLabel, nil, nil)
+ mountOpts := MountOpts{
+ MountLabel: mountLabel,
+ }
+ layerFs, err := driver.Get(id, mountOpts)
if err != nil {
return
}
@@ -189,7 +201,10 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings,
return
}
- layerFs, err := driver.Get(id, mountLabel, nil, nil)
+ options := MountOpts{
+ MountLabel: mountLabel,
+ }
+ layerFs, err := driver.Get(id, options)
if err != nil {
return
}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 323d7c274..66ccc6a63 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -642,11 +642,11 @@ func (d *Driver) Remove(id string) error {
}
// Get creates and mounts the required file system for the given id and returns the mount path.
-func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (_ string, retErr error) {
- return d.get(id, mountLabel, false, uidMaps, gidMaps)
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
+ return d.get(id, false, options)
}
-func (d *Driver) get(id, mountLabel string, disableShifting bool, uidMaps, gidMaps []idtools.IDMap) (_ string, retErr error) {
+func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
d.locker.Lock(id)
defer d.locker.Unlock(id)
dir := d.dir(id)
@@ -740,7 +740,7 @@ func (d *Driver) get(id, mountLabel string, disableShifting bool, uidMaps, gidMa
if d.options.mountOptions != "" {
opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts)
}
- mountData := label.FormatMountLabel(opts, mountLabel)
+ mountData := label.FormatMountLabel(opts, options.MountLabel)
mountFunc := unix.Mount
mountTarget := mergedDir
@@ -753,7 +753,7 @@ func (d *Driver) get(id, mountLabel string, disableShifting bool, uidMaps, gidMa
if d.options.mountProgram != "" {
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
if !disableShifting {
- label = d.optsAppendMappings(label, uidMaps, gidMaps)
+ label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps)
}
mountProgram := exec.Command(d.options.mountProgram, "-o", label, target)
@@ -763,7 +763,7 @@ func (d *Driver) get(id, mountLabel string, disableShifting bool, uidMaps, gidMa
} else if len(mountData) > pageSize {
//FIXME: We need to figure out to get this to work with additional stores
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), path.Join(id, "diff"), path.Join(id, "work"))
- mountData = label.FormatMountLabel(opts, mountLabel)
+ mountData = label.FormatMountLabel(opts, options.MountLabel)
if len(mountData) > pageSize {
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
}
@@ -952,7 +952,10 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
}
// Mount the new layer and handle ownership changes and possible copy_ups in it.
- layerFs, err := d.get(id, mountLabel, true, nil, nil)
+ options := graphdriver.MountOpts{
+ MountLabel: mountLabel,
+ }
+ layerFs, err := d.get(id, true, options)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index 115afb814..d10fb2607 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -137,7 +137,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
label.SetFileLabel(dir, mountLabel)
}
if parent != "" {
- parentDir, err := d.Get(parent, "", nil, nil)
+ parentDir, err := d.Get(parent, graphdriver.MountOpts{})
if err != nil {
return fmt.Errorf("%s: %s", parent, err)
}
@@ -179,7 +179,7 @@ func (d *Driver) Remove(id string) error {
}
// Get returns the directory for the given id.
-func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
dir := d.dir(id)
if st, err := os.Stat(dir); err != nil {
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 9d9aac701..4ccf657dc 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -362,9 +362,9 @@ func (d *Driver) Remove(id string) error {
}
// Get returns the rootfs path for the id. This will mount the dir at its given path.
-func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
panicIfUsedByLcow()
- logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
+ logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel)
var dir string
rID, err := d.resolveID(id)
@@ -620,7 +620,7 @@ func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent stri
return
}
- layerFs, err := d.Get(id, "", nil, nil)
+ layerFs, err := d.Get(id, graphdriver.MountOpts{})
if err != nil {
return
}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index b8ae59a61..cb4424f2d 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -360,15 +360,15 @@ func (d *Driver) Remove(id string) error {
}
// Get returns the mountpoint for the given id after creating the target directories if necessary.
-func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
mountpoint := d.mountPath(id)
if count := d.ctr.Increment(mountpoint); count > 1 {
return mountpoint, nil
}
filesystem := d.zfsPath(id)
- options := label.FormatMountLabel(d.options.mountOptions, mountLabel)
- logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options)
+ opts := label.FormatMountLabel(d.options.mountOptions, options.MountLabel)
+ logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
@@ -381,7 +381,7 @@ func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (s
return "", err
}
- if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
+ if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
d.ctr.Decrement(mountpoint)
return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
}
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index fe263ba63..1275ab47c 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -21,6 +21,7 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
@@ -210,7 +211,7 @@ type LayerStore interface {
// layers, it should not be written to. An SELinux label to be applied to the
// mount can be specified to override the one configured for the layer.
// The mappings used by the container can be specified.
- Mount(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error)
+ Mount(id string, options drivers.MountOpts) (string, error)
// Unmount unmounts a layer when it is no longer in use.
Unmount(id string, force bool) (bool, error)
@@ -294,6 +295,9 @@ func (r *layerStore) Load() error {
mounts := make(map[string]*Layer)
compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
+ if r.lockfile.IsReadWrite() {
+ label.ClearLabels()
+ }
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(layers))
for n, layer := range layers {
@@ -312,6 +316,9 @@ func (r *layerStore) Load() error {
if layer.UncompressedDigest != "" {
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
}
+ if layer.MountLabel != "" {
+ label.ReserveLabel(layer.MountLabel)
+ }
}
}
if shouldSave && !r.IsReadWrite() {
@@ -552,6 +559,9 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
} else {
parentMappings = &idtools.IDMappings{}
}
+ if mountLabel != "" {
+ label.ReserveLabel(mountLabel)
+ }
idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
opts := drivers.CreateOpts{
MountLabel: mountLabel,
@@ -649,7 +659,7 @@ func (r *layerStore) Mounted(id string) (int, error) {
return layer.MountCount, nil
}
-func (r *layerStore) Mount(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
+func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) {
if !r.IsReadWrite() {
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
@@ -661,16 +671,16 @@ func (r *layerStore) Mount(id, mountLabel string, uidMaps, gidMaps []idtools.IDM
layer.MountCount++
return layer.MountPoint, r.Save()
}
- if mountLabel == "" {
- mountLabel = layer.MountLabel
+ if options.MountLabel == "" {
+ options.MountLabel = layer.MountLabel
}
- if (uidMaps != nil || gidMaps != nil) && !r.driver.SupportsShifting() {
- if !reflect.DeepEqual(uidMaps, layer.UIDMap) || !reflect.DeepEqual(gidMaps, layer.GIDMap) {
+ if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting() {
+ if !reflect.DeepEqual(options.UidMaps, layer.UIDMap) || !reflect.DeepEqual(options.GidMaps, layer.GIDMap) {
return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID)
}
}
- mountpoint, err := r.driver.Get(id, mountLabel, uidMaps, gidMaps)
+ mountpoint, err := r.driver.Get(id, options)
if mountpoint != "" && err == nil {
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
@@ -839,6 +849,7 @@ func (r *layerStore) Delete(id string) error {
os.Remove(r.tspath(id))
delete(r.byid, id)
r.idindex.Delete(id)
+ mountLabel := layer.MountLabel
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
@@ -857,6 +868,18 @@ func (r *layerStore) Delete(id string) error {
r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
}
}
+ if mountLabel != "" {
+ var found bool
+ for _, candidate := range r.layers {
+ if candidate.MountLabel == mountLabel {
+ found = true
+ break
+ }
+ }
+ if !found {
+ label.ReleaseLabel(mountLabel)
+ }
+ }
if err = r.Save(); err != nil {
return err
}
@@ -957,7 +980,7 @@ func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
return getter.DiffGetter(id)
}
- path, err := r.Mount(id, "", nil, nil)
+ path, err := r.Mount(id, drivers.MountOpts{})
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
new file mode 100644
index 000000000..70f9c5564
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "storage-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 94cf1f0a7..7eaa82910 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -25,6 +25,7 @@ import (
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
@@ -251,6 +252,8 @@ type Store interface {
// Mount attempts to mount a layer, image, or container for access, and
// returns the pathname if it succeeds.
+ // Note if the mountLabel == "", the default label for the container
+ // will be used.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
@@ -497,6 +500,8 @@ type ContainerOptions struct {
// container's layer will inherit settings from the image's top layer
// or, if it is not being created based on an image, the Store object.
IDMappingOptions
+ LabelOpts []string
+ Flags map[string]interface{}
}
type store struct {
@@ -1175,7 +1180,26 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
},
}
}
- clayer, err := rlstore.Create(layer, imageTopLayer, nil, "", nil, layerOptions, true)
+ if options.Flags == nil {
+ options.Flags = make(map[string]interface{})
+ }
+ plabel, _ := options.Flags["ProcessLabel"].(string)
+ mlabel, _ := options.Flags["MountLabel"].(string)
+ if (plabel == "" && mlabel != "") ||
+ (plabel != "" && mlabel == "") {
+ return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified")
+ }
+
+ if plabel == "" {
+ processLabel, mountLabel, err := label.InitLabels(options.LabelOpts)
+ if err != nil {
+ return nil, err
+ }
+ options.Flags["ProcessLabel"] = processLabel
+ options.Flags["MountLabel"] = mountLabel
+ }
+
+ clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true)
if err != nil {
return nil, err
}
@@ -1189,13 +1213,11 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
- options = &ContainerOptions{
- IDMappingOptions: IDMappingOptions{
- HostUIDMapping: len(options.UIDMap) == 0,
- HostGIDMapping: len(options.GIDMap) == 0,
- UIDMap: copyIDMap(options.UIDMap),
- GIDMap: copyIDMap(options.GIDMap),
- },
+ options.IDMappingOptions = IDMappingOptions{
+ HostUIDMapping: len(options.UIDMap) == 0,
+ HostGIDMapping: len(options.GIDMap) == 0,
+ UIDMap: copyIDMap(options.UIDMap),
+ GIDMap: copyIDMap(options.GIDMap),
}
container, err := rcstore.Create(id, names, imageID, layer, metadata, options)
if err != nil || container == nil {
@@ -2273,7 +2295,12 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
rlstore.Load()
}
if rlstore.Exists(id) {
- return rlstore.Mount(id, mountLabel, uidMap, gidMap)
+ options := drivers.MountOpts{
+ MountLabel: mountLabel,
+ UidMaps: uidMap,
+ GidMaps: gidMap,
+ }
+ return rlstore.Mount(id, options)
}
return "", ErrLayerUnknown
}
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index c0498a02d..2276d5531 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -8,7 +8,7 @@ github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
-github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
+github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
github.com/pkg/errors master
github.com/pmezard/go-difflib v1.0.0
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 000000000..1b1b1921e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md
new file mode 100644
index 000000000..037fc7c8e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/README.md
@@ -0,0 +1,241 @@
+# Go support for Protocol Buffers
+
+Google's data interchange format.
+Copyright 2010 The Go Authors.
+https://github.com/golang/protobuf
+
+This package and the code it generates requires at least Go 1.4.
+
+This software implements Go bindings for protocol buffers. For
+information about protocol buffers themselves, see
+ https://developers.google.com/protocol-buffers/
+
+## Installation ##
+
+To use this software, you must:
+- Install the standard C++ implementation of protocol buffers from
+ https://developers.google.com/protocol-buffers/
+- Of course, install the Go compiler and tools from
+ https://golang.org/
+ See
+ https://golang.org/doc/install
+ for details or, if you are using gccgo, follow the instructions at
+ https://golang.org/doc/install/gccgo
+- Grab the code from the repository and install the proto package.
+ The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`.
+ The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
+ defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
+ compiler, protoc, to find it.
+
+This software has two parts: a 'protocol compiler plugin' that
+generates Go source files that, once compiled, can access and manage
+protocol buffers; and a library that implements run-time support for
+encoding (marshaling), decoding (unmarshaling), and accessing protocol
+buffers.
+
+There is support for gRPC in Go using protocol buffers.
+See the note at the bottom of this file for details.
+
+There are no insertion points in the plugin.
+
+
+## Using protocol buffers with Go ##
+
+Once the software is installed, there are two steps to using it.
+First you must compile the protocol buffer definitions and then import
+them, with the support library, into your program.
+
+To compile the protocol buffer definition, run protoc with the --go_out
+parameter set to the directory you want to output the Go code to.
+
+ protoc --go_out=. *.proto
+
+The generated files will be suffixed .pb.go. See the Test code below
+for an example using such a file.
+
+
+The package comment for the proto library contains text describing
+the interface provided in Go for protocol buffers. Here is an edited
+version.
+
+==========
+
+The proto package converts data structures to and from the
+wire format of protocol buffers. It works in concert with the
+Go source code generated for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ Helpers for getting values are superseded by the
+ GetFoo methods and their use is deprecated.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed with the enum's type name. Enum types have
+ a String method, and a Enum method to assist in message construction.
+ - Nested groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+Consider file test.proto, containing
+
+```proto
+ package example;
+
+ enum FOO { X = 17; };
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ }
+```
+
+To create and play with a Test object from the example package,
+
+```go
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ "path/to/example"
+ )
+
+ func main() {
+ test := &example.Test {
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &example.Test_OptionalGroup {
+ RequiredField: proto.String("good bye"),
+ },
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &example.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // etc.
+ }
+```
+
+## Parameters ##
+
+To pass extra parameters to the plugin, use a comma-separated
+parameter list separated from the output directory by a colon:
+
+
+ protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
+
+
+- `import_prefix=xxx` - a prefix that is added onto the beginning of
+ all imports. Useful for things like generating protos in a
+ subdirectory, or regenerating vendored protobufs in-place.
+- `import_path=foo/bar` - used as the package if no input files
+ declare `go_package`. If it contains slashes, everything up to the
+ rightmost slash is ignored.
+- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
+ load. The only plugin in this repo is `grpc`.
+- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
+ associated with Go package quux/shme. This is subject to the
+ import_prefix parameter.
+
+## gRPC Support ##
+
+If a proto file specifies RPC services, protoc-gen-go can be instructed to
+generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
+the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
+the --go_out argument to protoc:
+
+ protoc --go_out=plugins=grpc:. *.proto
+
+## Compatibility ##
+
+The library and the generated code are expected to be stable over time.
+However, we reserve the right to make breaking changes without notice for the
+following reasons:
+
+- Security. A security issue in the specification or implementation may come to
+ light whose resolution requires breaking compatibility. We reserve the right
+ to address such security issues.
+- Unspecified behavior. There are some aspects of the Protocol Buffers
+ specification that are undefined. Programs that depend on such unspecified
+ behavior may break in future releases.
+- Specification errors or changes. If it becomes necessary to address an
+ inconsistency, incompleteness, or change in the Protocol Buffers
+ specification, resolving the issue could affect the meaning or legality of
+ existing programs. We reserve the right to address such issues, including
+ updating the implementations.
+- Bugs. If the library has a bug that violates the specification, a program
+ that depends on the buggy behavior may break if the bug is fixed. We reserve
+ the right to fix such bugs.
+- Adding methods or fields to generated structs. These may conflict with field
+ names that already exist in a schema, causing applications to break. When the
+ code generator encounters a field in the schema that would collide with a
+ generated field or method name, the code generator will append an underscore
+ to the generated field or method name.
+- Adding, removing, or changing methods or fields in generated structs that
+ start with `XXX`. These parts of the generated code are exported out of
+ necessity, but should not be considered part of the public API.
+- Adding, removing, or changing unexported symbols in generated code.
+
+Any breaking changes outside of these will be announced 6 months in advance to
+protobuf@googlegroups.com.
+
+You should, whenever possible, use generated code created by the `protoc-gen-go`
+tool built at the same commit as the `proto` package. The `proto` package
+declares package-level constants in the form `ProtoPackageIsVersionX`.
+Application code and generated code may depend on one of these constants to
+ensure that compilation will fail if the available version of the proto library
+is too old. Whenever we make a change to the generated code that requires newer
+library support, in the same commit we will increment the version number of the
+generated code and declare a new package-level constant whose name incorporates
+the latest version number. Removing a compatibility constant is considered a
+breaking change and would be subject to the announcement policy stated above.
+
+The `protoc-gen-go/generator` package exposes a plugin interface,
+which is used by the gRPC code generation. This interface is not
+supported and is subject to incompatible changes without notice.
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 000000000..e392575b3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := extendable(in.Addr().Interface()); ok {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 000000000..aa207298f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,970 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ if required > 0 {
+ // Not enough information to determine the exact field.
+ // (See below.)
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ extmap := e.extensionsWrite()
+ ext := extmap[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ extmap[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 000000000..68b9b30cf
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1355 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ p.buf = append(p.buf, data...)
+ return err
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Encode++ // Parens are to work around a goimports bug.
+ }
+
+ if len(p.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Size++ // Parens are to work around a goimports bug.
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ exts := structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionsMap(*exts); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+ exts := structPointer_Extensions(base, p.field)
+ if err := encodeExtensions(exts); err != nil {
+ return err
+ }
+ v, _ := exts.extensionsRead()
+
+ return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := structPointer_ExtMap(base, p.field)
+ return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+ v := structPointer_Extensions(base, p.field)
+ return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ if len(o.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(o.buf)+len(v) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 000000000..2ed1cf596
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 000000000..6b9b36374
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,586 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+ if ep, ok := p.(extendableProto); ok {
+ return ep, ok
+ }
+ if ep, ok := p.(extendableProtoV1); ok {
+ return extensionAdapter{ep}, ok
+ }
+ return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, ok := extendable(base)
+ if !ok {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return nil // fast path
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, ok := extendable(pb)
+ if !ok {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok = extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, ok := extendable(pb)
+ if !ok {
+ return errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 000000000..ac4ddbc07
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,898 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 000000000..fd982decd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ if err := encodeExtensions(exts); err != nil {
+ return nil, err
+ }
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ if err := encodeExtensionsMap(exts); err != nil {
+ return nil, err
+ }
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 000000000..fb512e2e1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 000000000..6b5567d47
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 000000000..ec2289c00
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.dec = (*Buffer).dec_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_slice_byte
+ p.size = size_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+ reflect.PtrTo(t).Implements(extendableProtoV1Type)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_InternalExtensions" { // special case
+ p.enc = (*Buffer).enc_exts
+ p.dec = nil // not needed
+ p.size = size_exts
+ } else if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ } else if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 000000000..965876bf0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, ok := extendable(pv.Interface()); ok {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 000000000..61f83c1e1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,895 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
index 6cfc5fded..2a31cd3c5 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
@@ -48,6 +48,11 @@ func GetPidLabel(pid int) (string, error) {
func Init() {
}
+// ClearLabels clears all reserved labels
+func ClearLabels() {
+ return
+}
+
func ReserveLabel(label string) error {
return nil
}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
index f0a055b87..63c4edd05 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
@@ -24,17 +24,22 @@ var ErrIncompatibleLabel = fmt.Errorf("Bad SELinux option z and Z can not be use
// the container. A list of options can be passed into this function to alter
// the labels. The labels returned will include a random MCS String, that is
// guaranteed to be unique.
-func InitLabels(options []string) (string, string, error) {
+func InitLabels(options []string) (plabel string, mlabel string, Err error) {
if !selinux.GetEnabled() {
return "", "", nil
}
processLabel, mountLabel := selinux.ContainerLabels()
if processLabel != "" {
+ defer func() {
+ if Err != nil {
+ ReleaseLabel(mountLabel)
+ }
+ }()
pcon := selinux.NewContext(processLabel)
mcon := selinux.NewContext(mountLabel)
for _, opt := range options {
if opt == "disable" {
- return "", "", nil
+ return "", mountLabel, nil
}
if i := strings.Index(opt, ":"); i == -1 {
return "", "", fmt.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type' followed by ':' and a value", opt)
@@ -156,6 +161,11 @@ func Init() {
selinux.GetEnabled()
}
+// ClearLabels will clear all reserved labels
+func ClearLabels() {
+ selinux.ClearLabels()
+}
+
// ReserveLabel will record the fact that the MCS label has already been used.
// This will prevent InitLabels from using the MCS label in a newly created
// container
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
index 5dc09a51e..2cd54eac1 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
@@ -409,6 +409,13 @@ func NewContext(label string) Context {
return c
}
+// ClearLabels clears all reserved labels
+func ClearLabels() {
+ state.Lock()
+ state.mcsList = make(map[string]bool)
+ state.Unlock()
+}
+
// ReserveLabel reserves the MLS/MCS level component of the specified label
func ReserveLabel(label string) {
if len(label) != 0 {
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
index 4dbfd83ed..5abf8a362 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
@@ -107,6 +107,11 @@ func NewContext(label string) Context {
return c
}
+// ClearLabels clears all reserved MLS/MCS levels
+func ClearLabels() {
+ return
+}
+
// ReserveLabel reserves the MLS/MCS level component of the specified label
func ReserveLabel(label string) {
return