aboutsummaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/buildah/add.go45
-rw-r--r--vendor/github.com/containers/buildah/buildah.go8
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go2
-rw-r--r--vendor/github.com/containers/buildah/commit.go46
-rw-r--r--vendor/github.com/containers/buildah/common.go2
-rw-r--r--vendor/github.com/containers/buildah/image.go10
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go651
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/util.go12
-rw-r--r--vendor/github.com/containers/buildah/info.go2
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go16
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go12
-rw-r--r--vendor/github.com/containers/buildah/pkg/unshare/unshare.c (renamed from vendor/github.com/containers/buildah/unshare/unshare.c)20
-rw-r--r--vendor/github.com/containers/buildah/pkg/unshare/unshare.go (renamed from vendor/github.com/containers/buildah/unshare/unshare.go)152
-rw-r--r--vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go (renamed from vendor/github.com/containers/buildah/unshare/unshare_cgo.go)4
-rw-r--r--vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go (renamed from vendor/github.com/containers/buildah/unshare/unshare_gccgo.go)4
-rw-r--r--vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go (renamed from vendor/github.com/containers/buildah/unshare/unshare_unsupported.go)4
-rw-r--r--vendor/github.com/containers/buildah/run.go63
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf3
-rw-r--r--vendor/github.com/docker/libnetwork/resolvconf/README.md1
-rw-r--r--vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go26
-rw-r--r--vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go251
-rw-r--r--vendor/github.com/docker/libnetwork/types/types.go653
-rw-r--r--vendor/github.com/ishidawataru/sctp/LICENSE201
-rw-r--r--vendor/github.com/ishidawataru/sctp/README.md18
-rw-r--r--vendor/github.com/ishidawataru/sctp/sctp.go656
-rw-r--r--vendor/github.com/ishidawataru/sctp/sctp_linux.go227
-rw-r--r--vendor/github.com/ishidawataru/sctp/sctp_unsupported.go47
27 files changed, 2801 insertions, 335 deletions
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 250d75b24..d42246d53 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -209,6 +209,10 @@ func DockerIgnoreHelper(lines []string, contextDir string) []DockerIgnore {
}
func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
+ dirsInDockerignore, err := getDirsInDockerignore(options.ContextDir, excludes)
+ if err != nil {
+ return errors.Wrapf(err, "error checking directories in .dockerignore")
+ }
for _, src := range source {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// We assume that source is a file, and we're copying
@@ -274,10 +278,15 @@ func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.Fil
if err != nil {
return err
}
- if !match {
+ prefix, exist := dirsInDockerignore[exclude.ExcludePath]
+ hasPrefix := false
+ if exist {
+ hasPrefix = filepath.HasPrefix(path, prefix)
+ }
+ if !(match || hasPrefix) {
continue
}
- if exclude.IsExcluded {
+ if (hasPrefix && exclude.IsExcluded) || (match && exclude.IsExcluded) {
return nil
}
break
@@ -333,3 +342,35 @@ func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.Fil
}
return nil
}
+
+func getDirsInDockerignore(srcAbsPath string, excludes []DockerIgnore) (map[string]string, error) {
+ visitedDir := make(map[string]string)
+ if len(excludes) == 0 {
+ return visitedDir, nil
+ }
+ err := filepath.Walk(srcAbsPath, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ for _, exclude := range excludes {
+ match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), filepath.Clean(path))
+ if err != nil {
+ return err
+ }
+ if !match {
+ continue
+ }
+ if _, exist := visitedDir[exclude.ExcludePath]; exist {
+ continue
+ }
+ visitedDir[exclude.ExcludePath] = path
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return visitedDir, err
+ }
+ return visitedDir, nil
+}
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 9625fff96..8b076630f 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -26,7 +26,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.7.2"
+ Version = "1.8-dev"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
@@ -284,6 +284,12 @@ type CommonBuildOptions struct {
CPUSetMems string
// Memory is the upper limit (in bytes) on how much memory running containers can use.
Memory int64
+ // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf
+ DNSSearch []string
+ // DNSServers is the list of DNS servers to add to the build container's /etc/resolv.conf
+ DNSServers []string
+ // DNSOptions is the list of DNS
+ DNSOptions []string
// MemorySwap limits the amount of memory and swap together.
MemorySwap int64
// LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable".
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 9bcac1683..ff39c2f24 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -18,7 +18,7 @@ import (
"unsafe"
"github.com/containers/buildah/bind"
- "github.com/containers/buildah/unshare"
+ "github.com/containers/buildah/pkg/unshare"
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/mount"
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 9ab90196c..5e73be881 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "strings"
"time"
"github.com/containers/buildah/pkg/blobcache"
@@ -18,6 +19,7 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/stringid"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -110,10 +112,28 @@ type PushOptions struct {
// Commit writes the contents of the container, along with its updated
// configuration, to a new image in the specified location, and if we know how,
// add any additional tags that were specified. Returns the ID of the new image
-// if commit was successful and the image destination was local
+// if commit was successful and the image destination was local.
func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) {
var imgID string
+ // If we weren't given a name, build a destination reference using a
+ // temporary name that we'll remove later. The correct thing to do
+ // would be to read the manifest and configuration blob, and ask the
+ // manifest for the ID that we'd give the image, but that computation
+ // requires that we know the digests of the layer blobs, which we don't
+ // want to compute here because we'll have to do it again when
+ // cp.Image() instantiates a source image, and we don't want to do the
+ // work twice.
+ nameToRemove := ""
+ if dest == nil {
+ nameToRemove = stringid.GenerateRandomID() + "-tmp"
+ dest2, err := is.Transport.ParseStoreReference(b.store, nameToRemove)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error creating temporary destination reference for image")
+ }
+ dest = dest2
+ }
+
systemContext := getSystemContext(b.store, options.SystemContext, options.SignaturePolicyPath)
blocked, err := isReferenceBlocked(dest, systemContext)
@@ -148,10 +168,13 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
}
}
+ // Build an image reference from which we can copy the finished image.
src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp, options.OmitTimestamp)
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
}
+ // In case we're using caching, decide how to handle compression for a cache.
+ // If we're using blob caching, set it up for the source.
var maybeCachedSrc = types.ImageReference(src)
var maybeCachedDest = types.ImageReference(dest)
if options.BlobDirectory != "" {
@@ -181,6 +204,8 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(b.store, options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
+ // If we've got more names to attach, and we know how to do that for
+ // the transport that we're writing the new image to, add them now.
if len(options.AdditionalTags) > 0 {
switch dest.Transport().Name() {
case is.Transport.Name():
@@ -201,10 +226,25 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
if err != nil && err != storage.ErrImageUnknown {
return imgID, nil, "", errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(dest))
}
-
if err == nil {
imgID = img.ID
-
+ prunedNames := make([]string, 0, len(img.Names))
+ for _, name := range img.Names {
+ if !(nameToRemove != "" && strings.Contains(name, nameToRemove)) {
+ prunedNames = append(prunedNames, name)
+ }
+ }
+ if len(prunedNames) < len(img.Names) {
+ if err = b.store.SetNames(imgID, prunedNames); err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "failed to prune temporary name from image %q", imgID)
+ }
+ logrus.Debugf("reassigned names %v to image %q", prunedNames, img.ID)
+ dest2, err := is.Transport.ParseStoreReference(b.store, "@"+imgID)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error creating unnamed destination reference for image")
+ }
+ dest = dest2
+ }
if options.IIDFile != "" {
if err = ioutil.WriteFile(options.IIDFile, []byte(img.ID), 0644); err != nil {
return imgID, nil, "", errors.Wrapf(err, "failed to write image ID to file %q", options.IIDFile)
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index 667a1a484..6b4e2ee90 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -5,7 +5,7 @@ import (
"os"
"path/filepath"
- "github.com/containers/buildah/unshare"
+ "github.com/containers/buildah/pkg/unshare"
cp "github.com/containers/image/copy"
"github.com/containers/image/types"
"github.com/containers/storage"
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index b0876fb6d..1cd329c85 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -9,6 +9,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "strings"
"time"
"github.com/containers/buildah/docker"
@@ -661,6 +662,13 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
if historyTimestamp != nil {
created = historyTimestamp.UTC()
}
+ createdBy := b.CreatedBy()
+ if createdBy == "" {
+ createdBy = strings.Join(b.Shell(), " ")
+ if createdBy == "" {
+ createdBy = "/bin/sh"
+ }
+ }
if omitTimestamp {
created = time.Unix(0, 0)
@@ -677,7 +685,7 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
oconfig: oconfig,
dconfig: dconfig,
created: created,
- createdBy: b.CreatedBy(),
+ createdBy: createdBy,
historyComment: b.HistoryComment(),
annotations: b.Annotations(),
preferredManifestType: manifestType,
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index b1e30ca6a..b692d3bcf 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -10,7 +10,6 @@ import (
"os"
"os/exec"
"path/filepath"
- "regexp"
"sort"
"strconv"
"strings"
@@ -28,7 +27,6 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
- "github.com/containers/storage/pkg/stringid"
docker "github.com/fsouza/go-dockerclient"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -215,7 +213,8 @@ type Executor struct {
useCache bool
removeIntermediateCtrs bool
forceRmIntermediateCtrs bool
- imageMap map[string]string // Used to map images that we create to handle the AS construct.
+ imageMap map[string]string // Used to map images that we create to handle the AS construct.
+ containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers.
blobDirectory string
excludes []string
unusedArgs map[string]struct{}
@@ -496,6 +495,8 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
} else if len(copy.From) > 0 {
if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
sources = append(sources, filepath.Join(other.mountPoint, src))
+ } else if builder, ok := s.executor.containerMap[copy.From]; ok {
+ sources = append(sources, filepath.Join(builder.MountPoint, src))
} else {
return errors.Errorf("the stage %q has not been built", copy.From)
}
@@ -654,6 +655,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
removeIntermediateCtrs: options.RemoveIntermediateCtrs,
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
imageMap: make(map[string]string),
+ containerMap: make(map[string]*buildah.Builder),
blobDirectory: options.BlobDirectory,
unusedArgs: make(map[string]struct{}),
}
@@ -680,18 +682,18 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
return &exec, nil
}
-// Prepare creates a working container based on the specified image, or if one
+// prepare creates a working container based on the specified image, or if one
// isn't specified, the first argument passed to the first FROM instruction we
// can find in the stage's parsed tree.
-func (s *StageExecutor) Prepare(ctx context.Context, stage imagebuilder.Stage, from string) error {
+func (s *StageExecutor) prepare(ctx context.Context, stage imagebuilder.Stage, from string, initializeIBConfig, rebase bool) (builder *buildah.Builder, err error) {
ib := stage.Builder
node := stage.Node
if from == "" {
base, err := ib.From(node)
if err != nil {
- logrus.Debugf("Prepare(node.Children=%#v)", node.Children)
- return errors.Wrapf(err, "error determining starting point for build")
+ logrus.Debugf("prepare(node.Children=%#v)", node.Children)
+ return nil, errors.Wrapf(err, "error determining starting point for build")
}
from = base
}
@@ -707,9 +709,11 @@ func (s *StageExecutor) Prepare(ctx context.Context, stage imagebuilder.Stage, f
}
}
- logrus.Debugf("FROM %#v", displayFrom)
- if !s.executor.quiet {
- s.executor.log("FROM %s", displayFrom)
+ if initializeIBConfig && rebase {
+ logrus.Debugf("FROM %#v", displayFrom)
+ if !s.executor.quiet {
+ s.executor.log("FROM %s", displayFrom)
+ }
}
builderOptions := buildah.BuilderOptions{
@@ -737,74 +741,79 @@ func (s *StageExecutor) Prepare(ctx context.Context, stage imagebuilder.Stage, f
if asImageFound, ok := s.executor.imageMap[from]; ok {
builderOptions.FromImage = asImageFound
}
- builder, err := buildah.NewBuilder(ctx, s.executor.store, builderOptions)
- if err != nil {
- return errors.Wrapf(err, "error creating build container")
- }
-
- volumes := map[string]struct{}{}
- for _, v := range builder.Volumes() {
- volumes[v] = struct{}{}
- }
- ports := map[docker.Port]struct{}{}
- for _, p := range builder.Ports() {
- ports[docker.Port(p)] = struct{}{}
- }
- dConfig := docker.Config{
- Hostname: builder.Hostname(),
- Domainname: builder.Domainname(),
- User: builder.User(),
- Env: builder.Env(),
- Cmd: builder.Cmd(),
- Image: from,
- Volumes: volumes,
- WorkingDir: builder.WorkDir(),
- Entrypoint: builder.Entrypoint(),
- Labels: builder.Labels(),
- Shell: builder.Shell(),
- StopSignal: builder.StopSignal(),
- OnBuild: builder.OnBuild(),
- ExposedPorts: ports,
- }
- var rootfs *docker.RootFS
- if builder.Docker.RootFS != nil {
- rootfs = &docker.RootFS{
- Type: builder.Docker.RootFS.Type,
- }
- for _, id := range builder.Docker.RootFS.DiffIDs {
- rootfs.Layers = append(rootfs.Layers, id.String())
- }
- }
- dImage := docker.Image{
- Parent: builder.FromImage,
- ContainerConfig: dConfig,
- Container: builder.Container,
- Author: builder.Maintainer(),
- Architecture: builder.Architecture(),
- RootFS: rootfs,
- }
- dImage.Config = &dImage.ContainerConfig
- err = ib.FromImage(&dImage, node)
+ builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
if err != nil {
- if err2 := builder.Delete(); err2 != nil {
- logrus.Debugf("error deleting container which we failed to update: %v", err2)
+ return nil, errors.Wrapf(err, "error creating build container")
+ }
+
+ if initializeIBConfig {
+ volumes := map[string]struct{}{}
+ for _, v := range builder.Volumes() {
+ volumes[v] = struct{}{}
+ }
+ ports := map[docker.Port]struct{}{}
+ for _, p := range builder.Ports() {
+ ports[docker.Port(p)] = struct{}{}
+ }
+ dConfig := docker.Config{
+ Hostname: builder.Hostname(),
+ Domainname: builder.Domainname(),
+ User: builder.User(),
+ Env: builder.Env(),
+ Cmd: builder.Cmd(),
+ Image: from,
+ Volumes: volumes,
+ WorkingDir: builder.WorkDir(),
+ Entrypoint: builder.Entrypoint(),
+ Labels: builder.Labels(),
+ Shell: builder.Shell(),
+ StopSignal: builder.StopSignal(),
+ OnBuild: builder.OnBuild(),
+ ExposedPorts: ports,
+ }
+ var rootfs *docker.RootFS
+ if builder.Docker.RootFS != nil {
+ rootfs = &docker.RootFS{
+ Type: builder.Docker.RootFS.Type,
+ }
+ for _, id := range builder.Docker.RootFS.DiffIDs {
+ rootfs.Layers = append(rootfs.Layers, id.String())
+ }
+ }
+ dImage := docker.Image{
+ Parent: builder.FromImage,
+ ContainerConfig: dConfig,
+ Container: builder.Container,
+ Author: builder.Maintainer(),
+ Architecture: builder.Architecture(),
+ RootFS: rootfs,
+ }
+ dImage.Config = &dImage.ContainerConfig
+ err = ib.FromImage(&dImage, node)
+ if err != nil {
+ if err2 := builder.Delete(); err2 != nil {
+ logrus.Debugf("error deleting container which we failed to update: %v", err2)
+ }
+ return nil, errors.Wrapf(err, "error updating build context")
}
- return errors.Wrapf(err, "error updating build context")
}
mountPoint, err := builder.Mount(builder.MountLabel)
if err != nil {
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
}
- return errors.Wrapf(err, "error mounting new container")
+ return nil, errors.Wrapf(err, "error mounting new container")
+ }
+ if rebase {
+ // Make this our "current" working container.
+ s.mountPoint = mountPoint
+ s.builder = builder
+ // Add the top layer of this image to b.topLayers so we can
+ // keep track of them when building with cached images.
+ s.executor.topLayers = append(s.executor.topLayers, builder.TopLayer)
}
- s.mountPoint = mountPoint
- s.builder = builder
- // Add the top layer of this image to b.topLayers so we can keep track of them
- // when building with cached images.
- s.executor.topLayers = append(s.executor.topLayers, builder.TopLayer)
logrus.Debugln("Container ID:", builder.ContainerID)
- return nil
+ return builder, nil
}
// Delete deletes the stage's working container, if we have one.
@@ -816,47 +825,118 @@ func (s *StageExecutor) Delete() (err error) {
return err
}
-// resolveNameToImageRef creates a types.ImageReference from b.output
+// resolveNameToImageRef creates a types.ImageReference for the output name in local storage
func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) {
- var (
- imageRef types.ImageReference
- err error
- )
- if output != "" {
- imageRef, err = alltransports.ParseImageName(output)
+ imageRef, err := alltransports.ParseImageName(output)
+ if err != nil {
+ candidates, _, _, err := util.ResolveName(output, "", b.systemContext, b.store)
if err != nil {
- candidates, _, _, err := util.ResolveName(output, "", b.systemContext, b.store)
- if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q", output)
- }
- if len(candidates) == 0 {
- return nil, errors.Errorf("error parsing target image name %q", output)
- }
- imageRef2, err2 := is.Transport.ParseStoreReference(b.store, candidates[0])
- if err2 != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q", output)
- }
- return imageRef2, nil
+ return nil, errors.Wrapf(err, "error parsing target image name %q", output)
}
- return imageRef, nil
+ if len(candidates) == 0 {
+ return nil, errors.Errorf("error parsing target image name %q", output)
+ }
+ imageRef2, err2 := is.Transport.ParseStoreReference(b.store, candidates[0])
+ if err2 != nil {
+ return nil, errors.Wrapf(err, "error parsing target image name %q", output)
+ }
+ return imageRef2, nil
+ }
+ return imageRef, nil
+}
+
+// stepRequiresCommit indicates whether or not the step should be followed by
+// committing the in-progress container to create an intermediate image.
+func (*StageExecutor) stepRequiresCommit(step *imagebuilder.Step) bool {
+ switch strings.ToUpper(step.Command) {
+ case "ADD", "COPY", "RUN":
+ return true
+ }
+ return false
+}
+
+// getImageRootfs checks for an image matching the passed-in name in local
+// storage. If it isn't found, it pulls down a copy. Then, if we don't have a
+// working container root filesystem based on the image, it creates one. Then
+// it returns that root filesystem's location.
+func (s *StageExecutor) getImageRootfs(ctx context.Context, stage imagebuilder.Stage, image string) (mountPoint string, err error) {
+ if builder, ok := s.executor.containerMap[image]; ok {
+ return builder.MountPoint, nil
}
- imageRef, err = is.Transport.ParseStoreReference(b.store, "@"+stringid.GenerateRandomID())
+ builder, err := s.prepare(ctx, stage, image, false, false)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing reference for image to be written")
+ return "", err
}
- return imageRef, nil
+ s.executor.containerMap[image] = builder
+ return builder.MountPoint, nil
}
// Execute runs each of the steps in the stage's parsed tree, in turn.
-func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage) (imgID string, ref reference.Canonical, err error) {
+func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, base string) (imgID string, ref reference.Canonical, err error) {
ib := stage.Builder
- node := stage.Node
- checkForLayers := true
- children := node.Children
- commitName := s.output
+ checkForLayers := s.executor.layers && s.executor.useCache
+
+ // If the base image's name corresponds to the result of an earlier
+ // stage, substitute that image's ID for the base image's name here.
+ // If not, then go on assuming that it's just a regular image that's
+ // either in local storage, or one that we have to pull from a
+ // registry.
+ if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage {
+ base = stageImage
+ }
+
+ // Create the (first) working container for this stage. Reinitializing
+ // the imagebuilder configuration may alter the list of steps we have,
+ // so take a snapshot of them *after* that.
+ if _, err := s.prepare(ctx, stage, base, true, true); err != nil {
+ return "", nil, err
+ }
+ children := stage.Node.Children
+
+ // A helper function to only log "COMMIT" as an explicit step if it's
+ // the very last step of a (possibly multi-stage) build.
+ logCommit := func(output string, instruction int) {
+ if instruction < len(children)-1 || s.index < s.stages-1 {
+ return
+ }
+ commitMessage := "COMMIT"
+ if output != "" {
+ commitMessage = fmt.Sprintf("%s %s", commitMessage, output)
+ }
+ logrus.Debugf(commitMessage)
+ if !s.executor.quiet {
+ s.executor.log(commitMessage)
+ }
+ }
+ logImageID := func(imgID string) {
+ if s.executor.iidfile == "" {
+ fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
+ }
+ }
+
+ if len(children) == 0 {
+ // There are no steps.
+ if s.builder.FromImageID == "" || s.executor.squash {
+ // We either don't have a base image, or we need to
+ // squash the contents of the base image. Whichever is
+ // the case, we need to commit() to create a new image.
+ logCommit(s.output, -1)
+ if imgID, ref, err = s.commit(ctx, ib, getCreatedBy(nil), s.output); err != nil {
+ return "", nil, errors.Wrapf(err, "error committing base container")
+ }
+ } else {
+ // We don't need to squash the base image, so just
+ // reuse the base image.
+ logCommit(s.output, -1)
+ if imgID, ref, err = s.copyExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
+ return "", nil, err
+ }
+ }
+ logImageID(imgID)
+ }
- for i, node := range node.Children {
- // Resolve any arguments in this instruction so that we don't have to.
+ for i, node := range children {
+ // Resolve any arguments in this instruction.
step := ib.Step()
if err := step.Resolve(node); err != nil {
return "", nil, errors.Wrapf(err, "error resolving step %+v", *node)
@@ -868,7 +948,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage) (
// If this instruction declares an argument, remove it from the
// set of arguments that we were passed but which we haven't
- // seen used by the Dockerfile.
+ // yet seen used by the Dockerfile.
if step.Command == "arg" {
for _, Arg := range step.Args {
list := strings.SplitN(Arg, "=", 2)
@@ -884,12 +964,17 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage) (
s.copyFrom = s.executor.contextDir
for _, n := range step.Flags {
if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") {
+ var mountPoint string
arr := strings.Split(n, "=")
- stage, ok := s.executor.stages[arr[1]]
+ otherStage, ok := s.executor.stages[arr[1]]
if !ok {
- return "", nil, errors.Errorf("%s --from=%s: no stage found with that name", step.Command, arr[1])
+ if mountPoint, err = s.getImageRootfs(ctx, stage, arr[1]); err != nil {
+ return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", step.Command, arr[1])
+ }
+ } else {
+ mountPoint = otherStage.mountPoint
}
- s.copyFrom = stage.mountPoint
+ s.copyFrom = mountPoint
break
}
}
@@ -903,101 +988,159 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage) (
noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]})
}
- // If we're doing a single-layer build and not looking to take
- // shortcuts using the cache, make a note of the instruction,
- // process it, and then move on to the next instruction.
- if !s.executor.layers && s.executor.useCache {
+ // If we're doing a single-layer build, just process the
+ // instruction.
+ if !s.executor.layers {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
- return "", nil, errors.Wrapf(err, "error building at step %+v", *step)
+ logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
+ return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
+ }
+ if i < len(children)-1 {
+ // There are still more instructions to process
+ // for this stage. Make a note of the
+ // instruction in the history that we'll write
+ // for the image when we eventually commit it.
+ now := time.Now()
+ s.builder.AddPrependedEmptyLayer(&now, getCreatedBy(node), "", "")
+ continue
+ } else {
+ // This is the last instruction for this stage,
+ // so we should commit this container to create
+ // an image.
+ logCommit(s.output, i)
+ imgID, ref, err = s.commit(ctx, ib, getCreatedBy(node), s.output)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
+ }
+ logImageID(imgID)
+ break
}
- continue
- }
-
- if i < len(children)-1 {
- commitName = ""
- } else {
- commitName = s.output
}
- // TODO: this makes the tests happy, but it shouldn't be
- // necessary unless this is the final stage.
- commitName = s.executor.output
-
+ // We're in a multi-layered build.
var (
- cacheID string
- err error
+ commitName string
+ cacheID string
+ err error
+ rebase bool
)
+ // If we have to commit for this instruction, only assign the
+ // stage's configured output name to the last layer.
+ if i == len(children)-1 {
+ commitName = s.output
+ }
+
// If we're using the cache, and we've managed to stick with
// cached images so far, look for one that matches what we
// expect to produce for this instruction.
- if checkForLayers && s.executor.useCache {
+ // Only check at steps where we commit, so that we don't
+ // abandon the cache at this step just because we can't find an
+ // image with a history entry in it that we wouldn't have
+ // committed.
+ if checkForLayers && (s.stepRequiresCommit(step) || i == len(children)-1) && !(s.executor.squash && i == len(children)-1 && s.index == s.stages-1) {
cacheID, err = s.layerExists(ctx, node, children[:i])
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
- }
- if cacheID != "" {
- fmt.Fprintf(s.executor.out, "--> Using cache %s\n", cacheID)
- }
-
- // If a cache is found and we're on the last step, that means
- // nothing in this phase changed. Just create a copy of the
- // existing image and save it with the name that we were going
- // to assign to the one that we were building, and make sure
- // that the builder's root fs matches it.
- if cacheID != "" && i == len(children)-1 {
- if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil {
- return "", nil, err
+ if cacheID != "" {
+ // Note the cache hit.
+ fmt.Fprintf(s.executor.out, "--> Using cache %s\n", cacheID)
+ } else {
+ // We're not going to find any more cache hits.
+ checkForLayers = false
}
- break
}
- // If we didn't find a cached step that we could just reuse,
- // process the instruction and commit the layer.
- if cacheID == "" || !checkForLayers {
- checkForLayers = false
- err := ib.Run(step, s, noRunsRemaining)
- if err != nil {
- return "", nil, errors.Wrapf(err, "error building at step %+v", *step)
+ if cacheID != "" {
+ // A suitable cached image was found, so just reuse it.
+ // If we need to name the resulting image because it's
+ // the last step in this stage, add the name to the
+ // image.
+ imgID = cacheID
+ if commitName != "" && (s.stepRequiresCommit(step) || i == len(children)-1) {
+ logCommit(s.output, i)
+ if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil {
+ return "", nil, err
+ }
+ logImageID(imgID)
}
- }
-
- // Commit if no cache is found
- if cacheID == "" {
- imgID, ref, err = s.Commit(ctx, ib, getCreatedBy(node), commitName)
+ // Update our working container to be based off of the
+ // cached image, in case we need to read content from
+ // its root filesystem.
+ rebase = true
+ } else {
+ // If we didn't find a cached image that we could just reuse,
+ // process the instruction directly.
+ err := ib.Run(step, s, noRunsRemaining)
if err != nil {
- return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
+ logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
+ return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
- if i == len(children)-1 {
- s.executor.log("COMMIT %s", commitName)
+ if s.stepRequiresCommit(step) || i == len(children)-1 {
+ // Either this is the last instruction, or
+ // there are more instructions and we need to
+ // create a layer from this one before
+ // continuing.
+ // TODO: only commit for the last instruction
+ // case if we need to use this stage's image as
+ // a base image later, or if we're the final
+ // stage.
+ logCommit(s.output, i)
+ imgID, ref, err = s.commit(ctx, ib, getCreatedBy(node), commitName)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
+ }
+ logImageID(imgID)
+ // We only need to build a new container rootfs
+ // using this image if we plan on making
+ // further changes to it. Subsequent stages
+ // that just want to use the rootfs as a source
+ // for COPY or ADD will be content with what we
+ // already have.
+ rebase = i < len(children)-1
+ } else {
+ // There are still more instructions to process
+ // for this stage, and we don't need to commit
+ // here. Make a note of the instruction in the
+ // history for the next commit.
+ now := time.Now()
+ s.builder.AddPrependedEmptyLayer(&now, getCreatedBy(node), "", "")
}
- } else {
- // If we did find a cache, reuse the cached image's ID
- // as the basis for the container for the next step.
- imgID = cacheID
}
- // Prepare for the next step with imgID as the new base image.
- if i < len(children)-1 {
+ if rebase {
+ // Since we either committed the working container or
+ // are about to replace it with one based on a cached
+ // image, add the current working container's ID to the
+ // list of successful intermediate containers that
+ // we'll clean up later.
s.containerIDs = append(s.containerIDs, s.builder.ContainerID)
- if err := s.Prepare(ctx, stage, imgID); err != nil {
+
+ // Prepare for the next step or subsequent phases by
+ // creating a new working container with the
+ // just-committed or updated cached image as its new
+ // base image.
+ // TODO: only create a new container if we know that
+ // we'll need the updated root filesystem.
+ if _, err := s.prepare(ctx, stage, imgID, false, true); err != nil {
return "", nil, errors.Wrap(err, "error preparing container for next step")
}
}
}
- if s.executor.layers { // print out the final imageID if we're using layers flag
- fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
- }
-
return imgID, ref, nil
}
// copyExistingImage creates a copy of an image already in the store
func (s *StageExecutor) copyExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) {
- // Get the destination Image Reference
+ // If we don't need to attach a name to the image, just return the cache ID.
+ if output == "" {
+ return cacheID, nil, nil
+ }
+
+ // Get the destination image reference.
dest, err := s.executor.resolveNameToImageRef(output)
if err != nil {
return "", nil, err
@@ -1026,7 +1169,6 @@ func (s *StageExecutor) copyExistingImage(ctx context.Context, cacheID, output s
if err != nil {
return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest))
}
- s.executor.log("COMMIT %s", s.output)
var ref reference.Canonical
if dref := dest.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
@@ -1094,6 +1236,9 @@ func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.Hi
// getCreatedBy returns the command the image at node will be created by.
func getCreatedBy(node *parser.Node) string {
+ if node == nil {
+ return "/bin/sh"
+ }
if node.Value == "run" {
return "/bin/sh -c " + node.Original[4:]
}
@@ -1201,12 +1346,16 @@ func urlContentModified(url string, historyTime *time.Time) (bool, error) {
return true, nil
}
-// Commit writes the container's contents to an image, using a passed-in tag as
+// commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise.
-func (s *StageExecutor) Commit(ctx context.Context, ib *imagebuilder.Builder, createdBy, output string) (string, reference.Canonical, error) {
- imageRef, err := s.executor.resolveNameToImageRef(output)
- if err != nil {
- return "", nil, err
+func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy, output string) (string, reference.Canonical, error) {
+ var imageRef types.ImageReference
+ if output != "" {
+ imageRef2, err := s.executor.resolveNameToImageRef(output)
+ if err != nil {
+ return "", nil, err
+ }
+ imageRef = imageRef2
}
if ib.Author != "" {
@@ -1274,14 +1423,8 @@ func (s *StageExecutor) Commit(ctx context.Context, ib *imagebuilder.Builder, cr
if imageRef != nil {
logName := transports.ImageName(imageRef)
logrus.Debugf("COMMIT %q", logName)
- if !s.executor.quiet && !s.executor.layers && s.executor.useCache {
- s.executor.log("COMMIT %s", logName)
- }
} else {
logrus.Debugf("COMMIT")
- if !s.executor.quiet && !s.executor.layers && s.executor.useCache {
- s.executor.log("COMMIT")
- }
}
writer := s.executor.reportWriter
if s.executor.layers || !s.executor.useCache {
@@ -1294,7 +1437,6 @@ func (s *StageExecutor) Commit(ctx context.Context, ib *imagebuilder.Builder, cr
ReportWriter: writer,
PreferredManifestType: s.executor.outputFormat,
SystemContext: s.executor.systemContext,
- IIDFile: s.executor.iidfile,
Squash: s.executor.squash,
BlobDirectory: s.executor.blobDirectory,
Parent: s.builder.FromImageID,
@@ -1303,13 +1445,12 @@ func (s *StageExecutor) Commit(ctx context.Context, ib *imagebuilder.Builder, cr
if err != nil {
return "", nil, err
}
- if options.IIDFile == "" && imgID != "" {
- fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
- }
var ref reference.Canonical
- if dref := imageRef.DockerReference(); dref != nil {
- if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
- return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID)
+ if imageRef != nil {
+ if dref := imageRef.DockerReference(); dref != nil {
+ if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
+ return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID)
+ }
}
}
return imgID, ref, nil
@@ -1321,10 +1462,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
if len(stages) == 0 {
return "", nil, errors.New("error building: no stages to build")
}
- var (
- stageExecutor *StageExecutor
- cleanupImages []string
- )
+ var cleanupImages []string
cleanupStages := make(map[int]*StageExecutor)
cleanup := func() error {
@@ -1339,6 +1477,14 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
}
cleanupStages = nil
+ // Clean up any builders that we used to get data from images.
+ for _, builder := range b.containerMap {
+ if err := builder.Delete(); err != nil {
+ logrus.Debugf("Failed to cleanup image containers: %v", err)
+ lastErr = err
+ }
+ }
+ b.containerMap = nil
// Clean up any intermediate containers associated with stages,
// since we're not keeping them for debugging.
if b.removeIntermediateCtrs {
@@ -1382,37 +1528,44 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
output = b.output
}
- stageExecutor = b.startStage(stage.Name, stage.Position, len(stages), base, output)
- if err := stageExecutor.Prepare(ctx, stage, base); err != nil {
- return "", nil, err
- }
+ stageExecutor := b.startStage(stage.Name, stage.Position, len(stages), base, output)
- // Always remove the intermediate/build containers, even if the build was unsuccessful.
- // If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs
- // is true.
+ // If this a single-layer build, or if it's a multi-layered
+ // build and b.forceRmIntermediateCtrs is set, make sure we
+ // remove the intermediate/build containers, regardless of
+ // whether or not the stage's build fails.
if b.forceRmIntermediateCtrs || !b.layers {
cleanupStages[stage.Position] = stageExecutor
}
- if imageID, ref, err = stageExecutor.Execute(ctx, stage); err != nil {
+
+ // Build this stage.
+ if imageID, ref, err = stageExecutor.Execute(ctx, stage, base); err != nil {
lastErr = err
}
if lastErr != nil {
return "", nil, lastErr
}
- if !b.forceRmIntermediateCtrs && b.removeIntermediateCtrs {
+
+ // The stage succeeded, so remove its build container if we're
+ // told to delete successful intermediate/build containers for
+ // multi-layered builds.
+ if b.removeIntermediateCtrs {
cleanupStages[stage.Position] = stageExecutor
}
- // If this is an intermediate stage, make a note to remove its
- // image later.
- if _, err := strconv.Atoi(stage.Name); err != nil {
- if imageID, ref, err = stageExecutor.Commit(ctx, stages[stageIndex].Builder, "", output); err != nil {
- return "", nil, err
- }
+ // If this is an intermediate stage, make a note of the ID, so
+ // that we can look it up later.
+ if stageIndex < len(stages)-1 {
b.imageMap[stage.Name] = imageID
- cleanupImages = append(cleanupImages, imageID)
+ // We're not populating the cache with intermediate
+ // images, so add this one to the list of images that
+ // we'll remove later.
+ if !b.layers {
+ cleanupImages = append(cleanupImages, imageID)
+ }
}
}
+
if len(b.unusedArgs) > 0 {
unusedList := make([]string, 0, len(b.unusedArgs))
for k := range b.unusedArgs {
@@ -1422,25 +1575,16 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList)
}
- // Check if we have a one line Dockerfile (i.e., single phase, no
- // actual steps) making layers irrelevant, or the user told us to
- // ignore layers.
- singleLineDockerfile := (len(stages) < 2 && len(stages[0].Node.Children) < 1)
- ignoreLayers := singleLineDockerfile || !b.layers && b.useCache
-
- if ignoreLayers {
- if imageID, ref, err = stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "", b.output); err != nil {
- return "", nil, err
- }
- if singleLineDockerfile {
- b.log("COMMIT %s", ref)
- }
- }
-
if err := cleanup(); err != nil {
return "", nil, err
}
+ if b.iidfile != "" {
+ if err = ioutil.WriteFile(b.iidfile, []byte(imageID), 0644); err != nil {
+ return imageID, ref, errors.Wrapf(err, "failed to write image ID to file %q", b.iidfile)
+ }
+ }
+
return imageID, ref, nil
}
@@ -1516,8 +1660,6 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
dockerfiles = append(dockerfiles, data)
}
- dockerfiles = processCopyFrom(dockerfiles)
-
mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0])
if err != nil {
return "", nil, errors.Wrapf(err, "error parsing main Dockerfile")
@@ -1548,79 +1690,6 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
return exec.Build(ctx, stages)
}
-// processCopyFrom goes through the Dockerfiles and handles any 'COPY --from' instances
-// prepending a new FROM statement the Dockerfile that do not already have a corresponding
-// FROM command within them.
-func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser {
- var newDockerfiles []io.ReadCloser
- // fromMap contains the names of the images seen in a FROM
- // line in the Dockerfiles. The boolean value just completes the map object.
- fromMap := make(map[string]bool)
- // asMap contains the names of the images seen after a "FROM image AS"
- // line in the Dockefiles. The boolean value just completes the map object.
- asMap := make(map[string]bool)
-
- copyRE := regexp.MustCompile(`\s*COPY\s+--from=`)
- fromRE := regexp.MustCompile(`\s*FROM\s+`)
- asRE := regexp.MustCompile(`(?i)\s+as\s+`)
- for _, dfile := range dockerfiles {
- if dfileBinary, err := ioutil.ReadAll(dfile); err == nil {
- dfileString := fmt.Sprintf("%s", dfileBinary)
- copyFromContent := copyRE.Split(dfileString, -1)
- // no "COPY --from=", just continue
- if len(copyFromContent) < 2 {
- newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString)))
- continue
- }
- // Load all image names in our Dockerfiles into a map
- // for easy reference later.
- fromContent := fromRE.Split(dfileString, -1)
- for i := 0; i < len(fromContent); i++ {
- imageName := strings.Split(fromContent[i], " ")
- if len(imageName) > 0 {
- finalImage := strings.Split(imageName[0], "\n")
- if finalImage[0] != "" {
- fromMap[strings.TrimSpace(finalImage[0])] = true
- }
- }
- }
- logrus.Debug("fromMap: ", fromMap)
-
- // Load all image names associated with an 'as' or 'AS' in
- // our Dockerfiles into a map for easy reference later.
- asContent := asRE.Split(dfileString, -1)
- // Skip the first entry in the array as it's stuff before
- // the " as " and we don't care.
- for i := 1; i < len(asContent); i++ {
- asName := strings.Split(asContent[i], " ")
- if len(asName) > 0 {
- finalAsImage := strings.Split(asName[0], "\n")
- if finalAsImage[0] != "" {
- asMap[strings.TrimSpace(finalAsImage[0])] = true
- }
- }
- }
- logrus.Debug("asMap: ", asMap)
-
- for i := 1; i < len(copyFromContent); i++ {
- fromArray := strings.Split(copyFromContent[i], " ")
- // If the image isn't a stage number or already declared,
- // add a FROM statement for it to the top of our Dockerfile.
- trimmedFrom := strings.TrimSpace(fromArray[0])
- _, okFrom := fromMap[trimmedFrom]
- _, okAs := asMap[trimmedFrom]
- _, err := strconv.Atoi(trimmedFrom)
- if !okFrom && !okAs && err != nil {
- from := "FROM " + trimmedFrom
- newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(from)))
- }
- }
- newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString)))
- } // End if dfileBinary, err := ioutil.ReadAll(dfile); err == nil
- } // End for _, dfile := range dockerfiles {
- return newDockerfiles
-}
-
// deleteSuccessfulIntermediateCtrs goes through the container IDs in each
// stage's containerIDs list and deletes the containers associated with those
// IDs.
diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go
index 35dc5438a..f982fcebf 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/util.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/util.go
@@ -105,6 +105,18 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
return "", "", errors.Errorf("unreachable code reached")
}
+func dedupeStringSlice(slice []string) []string {
+ done := make([]string, 0, len(slice))
+ m := make(map[string]struct{})
+ for _, s := range slice {
+ if _, present := m[s]; !present {
+ m[s] = struct{}{}
+ done = append(done, s)
+ }
+ }
+ return done
+}
+
// InitReexec is a wrapper for buildah.InitReexec(). It should be called at
// the start of main(), and if it returns true, main() should return
// immediately.
diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go
index 7c73da87e..32b54f257 100644
--- a/vendor/github.com/containers/buildah/info.go
+++ b/vendor/github.com/containers/buildah/info.go
@@ -11,7 +11,7 @@ import (
"strings"
"time"
- "github.com/containers/buildah/unshare"
+ "github.com/containers/buildah/pkg/unshare"
"github.com/containers/storage"
"github.com/containers/storage/pkg/system"
"github.com/sirupsen/logrus"
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index da07545c7..6c4d14303 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -86,6 +86,9 @@ type FromAndBudResults struct {
CPUSetCPUs string
CPUSetMems string
CPUShares uint64
+ DNSSearch []string
+ DNSServers []string
+ DNSOptions []string
Isolation string
Memory string
MemorySwap string
@@ -132,9 +135,9 @@ func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
// GetBudFlags returns common bud flags
func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs := pflag.FlagSet{}
- fs.StringSliceVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])")
+ fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])")
fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
- fs.StringSliceVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
+ fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image")
@@ -144,7 +147,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
- fs.StringSliceVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])")
+ fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])")
fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
@@ -157,7 +160,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
fs.BoolVar(&flags.Squash, "squash", false, "Squash newly built layers into a single new layer.")
- fs.StringSliceVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
+ fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
fs.BoolVar(&flags.TlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
return fs
@@ -176,10 +179,13 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
+ fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains")
+ fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers")
+ fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options")
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
- fs.StringSliceVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
+ fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
fs.StringVar(&flags.ShmSize, "shm-size", "65536k", "size of '/dev/shm'. The format is `<number><unit>`.")
fs.StringSliceVar(&flags.Ulimit, "ulimit", []string{}, "ulimit options (default [])")
fs.StringSliceVarP(&flags.Volume, "volume", "v", []string{}, "bind mount a volume into the container (default [])")
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 50318315f..c4e3e4264 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -6,7 +6,6 @@ package parse
import (
"fmt"
- "github.com/spf13/cobra"
"net"
"os"
"path/filepath"
@@ -21,6 +20,7 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/sys/unix"
)
@@ -71,6 +71,11 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
}
}
}
+
+ dnsServers, _ := c.Flags().GetStringSlice("dns")
+ dnsSearch, _ := c.Flags().GetStringSlice("dns-search")
+ dnsOptions, _ := c.Flags().GetStringSlice("dns-option")
+
if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil {
return nil, errors.Wrapf(err, "invalid --shm-size")
}
@@ -90,13 +95,16 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
CPUSetCPUs: c.Flag("cpuset-cpus").Value.String(),
CPUSetMems: c.Flag("cpuset-mems").Value.String(),
CPUShares: cpuShares,
+ DNSSearch: dnsSearch,
+ DNSServers: dnsServers,
+ DNSOptions: dnsOptions,
Memory: memoryLimit,
MemorySwap: memorySwap,
ShmSize: c.Flag("shm-size").Value.String(),
Ulimit: append(defaultLimits, ulimit...),
Volumes: volumes,
}
- securityOpts, _ := c.Flags().GetStringSlice("security-opt")
+ securityOpts, _ := c.Flags().GetStringArray("security-opt")
if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.c b/vendor/github.com/containers/buildah/pkg/unshare/unshare.c
index 8eefae41b..67a3e0e4d 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.c
+++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.c
@@ -39,7 +39,7 @@
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
-static int _buildah_unshare_parse_envint(const char *envname) {
+static int _containers_unshare_parse_envint(const char *envname) {
char *p, *q;
long l;
@@ -138,7 +138,7 @@ static char **parse_proc_stringlist(const char *list) {
return ret;
}
-static int buildah_reexec(void) {
+static int containers_reexec(void) {
char **argv, *exename;
int fd, mmfd, n_read, n_written;
struct stat st;
@@ -196,12 +196,12 @@ static int buildah_reexec(void) {
return 0;
}
-void _buildah_unshare(void)
+void _containers_unshare(void)
{
int flags, pidfd, continuefd, n, pgrp, sid, ctty;
char buf[2048];
- flags = _buildah_unshare_parse_envint("_Buildah-unshare");
+ flags = _containers_unshare_parse_envint("_Containers-unshare");
if (flags == -1) {
return;
}
@@ -213,7 +213,7 @@ void _buildah_unshare(void)
_exit(1);
}
}
- pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe");
+ pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
if (pidfd != -1) {
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
size_t size = write(pidfd, buf, strlen(buf));
@@ -223,7 +223,7 @@ void _buildah_unshare(void)
}
close(pidfd);
}
- continuefd = _buildah_unshare_parse_envint("_Buildah-continue-pipe");
+ continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
if (continuefd != -1) {
n = read(continuefd, buf, sizeof(buf));
if (n > 0) {
@@ -232,21 +232,21 @@ void _buildah_unshare(void)
}
close(continuefd);
}
- sid = _buildah_unshare_parse_envint("_Buildah-setsid");
+ sid = _containers_unshare_parse_envint("_Containers-setsid");
if (sid == 1) {
if (setsid() == -1) {
fprintf(stderr, "Error during setsid: %m\n");
_exit(1);
}
}
- pgrp = _buildah_unshare_parse_envint("_Buildah-setpgrp");
+ pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
if (pgrp == 1) {
if (setpgrp() == -1) {
fprintf(stderr, "Error during setpgrp: %m\n");
_exit(1);
}
}
- ctty = _buildah_unshare_parse_envint("_Buildah-ctty");
+ ctty = _containers_unshare_parse_envint("_Containers-ctty");
if (ctty != -1) {
if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
@@ -269,7 +269,7 @@ void _buildah_unshare(void)
_exit(1);
}
}
- if (buildah_reexec() != 0) {
+ if (containers_reexec() != 0) {
_exit(1);
}
return;
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go
index 77aee282f..5b2e7d7d1 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go
@@ -8,6 +8,7 @@ import (
"io"
"os"
"os/exec"
+ "os/user"
"runtime"
"strconv"
"strings"
@@ -18,6 +19,8 @@ import (
"github.com/containers/storage/pkg/reexec"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/syndtr/gocapability/capability"
)
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
@@ -54,7 +57,7 @@ func (c *Cmd) Start() error {
if c.Env == nil {
c.Env = os.Environ()
}
- c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags))
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags))
// Please the libpod "rootless" package to find the expected env variables.
if os.Geteuid() != 0 {
@@ -67,7 +70,7 @@ func (c *Cmd) Start() error {
if err != nil {
return errors.Wrapf(err, "error creating pid pipe")
}
- c.Env = append(c.Env, fmt.Sprintf("_Buildah-pid-pipe=%d", len(c.ExtraFiles)+3))
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
// Create the pipe for letting the child know to proceed.
@@ -77,18 +80,18 @@ func (c *Cmd) Start() error {
pidWrite.Close()
return errors.Wrapf(err, "error creating pid pipe")
}
- c.Env = append(c.Env, fmt.Sprintf("_Buildah-continue-pipe=%d", len(c.ExtraFiles)+3))
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, continueRead)
// Pass along other instructions.
if c.Setsid {
- c.Env = append(c.Env, "_Buildah-setsid=1")
+ c.Env = append(c.Env, "_Containers-setsid=1")
}
if c.Setpgrp {
- c.Env = append(c.Env, "_Buildah-setpgrp=1")
+ c.Env = append(c.Env, "_Containers-setpgrp=1")
}
if c.Ctty != nil {
- c.Env = append(c.Env, fmt.Sprintf("_Buildah-ctty=%d", len(c.ExtraFiles)+3))
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
}
@@ -306,3 +309,140 @@ func GetRootlessUID() int {
func RootlessEnv() []string {
return append(os.Environ(), UsernsEnvName+"=done")
}
+
+type Runnable interface {
+ Run() error
+}
+
+func bailOnError(err error, format string, a ...interface{}) {
+ if err != nil {
+ if format != "" {
+ logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
+ } else {
+ logrus.Errorf("%v", err)
+ }
+ os.Exit(1)
+ }
+}
+
+// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
+func MaybeReexecUsingUserNamespace(evenForRoot bool) {
+ // If we've already been through this once, no need to try again.
+ if os.Geteuid() == 0 && IsRootless() {
+ return
+ }
+
+ var uidNum, gidNum uint64
+ // Figure out who we are.
+ me, err := user.Current()
+ if !os.IsNotExist(err) {
+ bailOnError(err, "error determining current user")
+ uidNum, err = strconv.ParseUint(me.Uid, 10, 32)
+ bailOnError(err, "error parsing current UID %s", me.Uid)
+ gidNum, err = strconv.ParseUint(me.Gid, 10, 32)
+ bailOnError(err, "error parsing current GID %s", me.Gid)
+ }
+
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // ID mappings to use to reexec ourselves.
+ var uidmap, gidmap []specs.LinuxIDMapping
+ if uidNum != 0 || evenForRoot {
+ // Read the set of ID mappings that we're allowed to use. Each
+ // range in /etc/subuid and /etc/subgid file is a starting host
+ // ID and a range size.
+ uidmap, gidmap, err = util.GetSubIDMappings(me.Username, me.Username)
+ bailOnError(err, "error reading allowed ID mappings")
+ if len(uidmap) == 0 {
+ logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
+ }
+ if len(gidmap) == 0 {
+ logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username)
+ }
+ // Map our UID and GID, then the subuid and subgid ranges,
+ // consecutively, starting at 0, to get the mappings to use for
+ // a copy of ourselves.
+ uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...)
+ gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...)
+ var rangeStart uint32
+ for i := range uidmap {
+ uidmap[i].ContainerID = rangeStart
+ rangeStart += uidmap[i].Size
+ }
+ rangeStart = 0
+ for i := range gidmap {
+ gidmap[i].ContainerID = rangeStart
+ rangeStart += gidmap[i].Size
+ }
+ } else {
+ // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
+ // to use unshare(), so don't bother creating a new user namespace at this point.
+ capabilities, err := capability.NewPid(0)
+ bailOnError(err, "error reading the current capabilities sets")
+ if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
+ return
+ }
+ // Read the set of ID mappings that we're currently using.
+ uidmap, gidmap, err = util.GetHostIDMappings("")
+ bailOnError(err, "error reading current ID mappings")
+ // Just reuse them.
+ for i := range uidmap {
+ uidmap[i].HostID = uidmap[i].ContainerID
+ }
+ for i := range gidmap {
+ gidmap[i].HostID = gidmap[i].ContainerID
+ }
+ }
+
+ // Unlike most uses of reexec or unshare, we're using a name that
+ // _won't_ be recognized as a registered reexec handler, since we
+ // _want_ to fall through reexec.Init() to the normal main().
+ cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...)
+
+ // If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again.
+ err = os.Setenv(UsernsEnvName, "1")
+ bailOnError(err, "error setting %s=1 in environment", UsernsEnvName)
+
+ // Reuse our stdio.
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+
+ // Set up a new user namespace with the ID mapping.
+ cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS
+ cmd.UseNewuidmap = uidNum != 0
+ cmd.UidMappings = uidmap
+ cmd.UseNewgidmap = uidNum != 0
+ cmd.GidMappings = gidmap
+ cmd.GidMappingsEnableSetgroups = true
+
+ // Finish up.
+ logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
+ ExecRunnable(cmd)
+}
+
+// ExecRunnable runs the specified unshare command, captures its exit status,
+// and exits with the same status.
+func ExecRunnable(cmd Runnable) {
+ if err := cmd.Run(); err != nil {
+ if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
+ if exitError.ProcessState.Exited() {
+ if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
+ if waitStatus.Exited() {
+ logrus.Errorf("%v", exitError)
+ os.Exit(waitStatus.ExitStatus())
+ }
+ if waitStatus.Signaled() {
+ logrus.Errorf("%v", exitError)
+ os.Exit(int(waitStatus.Signal()) + 128)
+ }
+ }
+ }
+ }
+ logrus.Errorf("%v", err)
+ logrus.Errorf("(unable to determine exit status)")
+ os.Exit(1)
+ }
+ os.Exit(0)
+}
diff --git a/vendor/github.com/containers/buildah/unshare/unshare_cgo.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
index 26a0b2c20..b3f8099f6 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare_cgo.go
+++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
@@ -3,8 +3,8 @@
package unshare
// #cgo CFLAGS: -Wall
-// extern void _buildah_unshare(void);
+// extern void _containers_unshare(void);
// void __attribute__((constructor)) init(void) {
-// _buildah_unshare();
+// _containers_unshare();
// }
import "C"
diff --git a/vendor/github.com/containers/buildah/unshare/unshare_gccgo.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
index c4811782a..2f95da7d8 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare_gccgo.go
+++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
@@ -3,9 +3,9 @@
package unshare
// #cgo CFLAGS: -Wall -Wextra
-// extern void _buildah_unshare(void);
+// extern void _containers_unshare(void);
// void __attribute__((constructor)) init(void) {
-// _buildah_unshare();
+// _containers_unshare();
// }
import "C"
diff --git a/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
index 3336fdad9..d8d5f6f7a 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go
+++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
@@ -25,3 +25,7 @@ func GetRootlessUID() int {
func RootlessEnv() []string {
return append(os.Environ(), UsernsEnvName+"=")
}
+
+// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
+func MaybeReexecUsingUserNamespace(evenForRoot bool) {
+}
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index cd6568b66..5d28644d7 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -22,13 +22,15 @@ import (
"github.com/containers/buildah/bind"
"github.com/containers/buildah/chroot"
"github.com/containers/buildah/pkg/secrets"
- "github.com/containers/buildah/unshare"
+ "github.com/containers/buildah/pkg/unshare"
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/stringid"
units "github.com/docker/go-units"
+ "github.com/docker/libnetwork/resolvconf"
+ "github.com/docker/libnetwork/types"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
@@ -593,13 +595,51 @@ func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts
}
// addNetworkConfig copies files from host and sets them up to bind mount into container
-func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDPair) (string, error) {
- copyFileWithTar := b.copyFileWithTar(chownOpts, nil)
+func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string) (string, error) {
+ stat, err := os.Stat(hostPath)
+ if err != nil {
+ return "", errors.Wrapf(err, "error statting %q for container %q", hostPath, b.ContainerID)
+ }
+ contents, err := ioutil.ReadFile(hostPath)
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to read %s", hostPath)
+ }
+
+ search := resolvconf.GetSearchDomains(contents)
+ nameservers := resolvconf.GetNameservers(contents, types.IP)
+ options := resolvconf.GetOptions(contents)
+
+ if len(dnsSearch) > 0 {
+ search = dnsSearch
+ }
+ if len(dnsServers) != 0 {
+ dns, err := getDNSIP(dnsServers)
+ if err != nil {
+ return "", errors.Wrapf(err, "error getting dns servers")
+ }
+ nameservers = []string{}
+ for _, server := range dns {
+ nameservers = append(nameservers, server.String())
+ }
+ }
+
+ if len(dnsOptions) != 0 {
+ options = dnsOptions
+ }
cfile := filepath.Join(rdir, filepath.Base(hostPath))
+ if _, err = resolvconf.Build(cfile, nameservers, search, options); err != nil {
+ return "", errors.Wrapf(err, "error building resolv.conf for container %s", b.ContainerID)
+ }
- if err := copyFileWithTar(hostPath, cfile); err != nil {
- return "", errors.Wrapf(err, "error copying %q for container %q", cfile, b.ContainerID)
+ uid := int(stat.Sys().(*syscall.Stat_t).Uid)
+ gid := int(stat.Sys().(*syscall.Stat_t).Gid)
+ if chownOpts != nil {
+ uid = chownOpts.UID
+ gid = chownOpts.GID
+ }
+ if err = os.Chown(cfile, uid, gid); err != nil {
+ return "", errors.Wrapf(err, "error chowning file %q for container %q", cfile, b.ContainerID)
}
if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
@@ -609,6 +649,17 @@ func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDP
return cfile, nil
}
+func getDNSIP(dnsServers []string) (dns []net.IP, err error) {
+ for _, i := range dnsServers {
+ result := net.ParseIP(i)
+ if result == nil {
+ return dns, errors.Errorf("invalid IP address %s", i)
+ }
+ dns = append(dns, result)
+ }
+ return dns, nil
+}
+
// generateHosts creates a containers hosts file
func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownOpts *idtools.IDPair) (string, error) {
hostPath := "/etc/hosts"
@@ -1113,7 +1164,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
if !contains(volumes, "/etc/resolv.conf") {
- resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair)
+ resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index 327de39b2..a77130acb 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -8,7 +8,7 @@ github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1
github.com/boltdb/bolt v1.3.1
-github.com/containers/storage v1.12.1
+github.com/containers/storage v1.12.2
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
github.com/docker/docker-credential-helpers v0.6.1
@@ -65,3 +65,4 @@ github.com/klauspost/cpuid v1.2.0
github.com/onsi/gomega v1.4.3
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.3
+github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
diff --git a/vendor/github.com/docker/libnetwork/resolvconf/README.md b/vendor/github.com/docker/libnetwork/resolvconf/README.md
new file mode 100644
index 000000000..cdda554ba
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/resolvconf/README.md
@@ -0,0 +1 @@
+Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf
diff --git a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go
new file mode 100644
index 000000000..e348bc57f
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go
@@ -0,0 +1,26 @@
+package dns
+
+import (
+ "regexp"
+)
+
+// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
+const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
+
+// IPv4Localhost is a regex pattern for IPv4 localhost address range.
+const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})`
+
+var localhostIPRegexp = regexp.MustCompile(IPLocalhost)
+var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost)
+
+// IsLocalhost returns true if ip matches the localhost IP regular expression.
+// Used for determining if nameserver settings are being passed which are
+// localhost addresses
+func IsLocalhost(ip string) bool {
+ return localhostIPRegexp.MatchString(ip)
+}
+
+// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression.
+func IsIPv4Localhost(ip string) bool {
+ return localhostIPv4Regexp.MatchString(ip)
+}
diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go
new file mode 100644
index 000000000..23caf7f12
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go
@@ -0,0 +1,251 @@
+// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf
+package resolvconf
+
+import (
+ "bytes"
+ "io/ioutil"
+ "regexp"
+ "strings"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/libnetwork/resolvconf/dns"
+ "github.com/docker/libnetwork/types"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // DefaultResolvConf points to the default file used for dns configuration on a linux machine
+ DefaultResolvConf = "/etc/resolv.conf"
+)
+
+var (
+ // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS
+ defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"}
+ defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"}
+ ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
+ ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock
+ // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also
+ // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants
+ // -- e.g. other link-local types -- either won't work in containers or are unnecessary.
+ // For readability and sufficiency for Docker purposes this seemed more reasonable than a
+ // 1000+ character regexp with exact and complete IPv6 validation
+ ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?`
+
+ localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`)
+ nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`)
+ nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
+ nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`)
+ nsIPv4Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `))\s*$`)
+ searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`)
+ optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`)
+)
+
+var lastModified struct {
+ sync.Mutex
+ sha256 string
+ contents []byte
+}
+
+// File contains the resolv.conf content and its hash
+type File struct {
+ Content []byte
+ Hash string
+}
+
+// Get returns the contents of /etc/resolv.conf and its hash
+func Get() (*File, error) {
+ return GetSpecific(DefaultResolvConf)
+}
+
+// GetSpecific returns the contents of the user specified resolv.conf file and its hash
+func GetSpecific(path string) (*File, error) {
+ resolv, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ hash, err := ioutils.HashData(bytes.NewReader(resolv))
+ if err != nil {
+ return nil, err
+ }
+ return &File{Content: resolv, Hash: hash}, nil
+}
+
+// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash
+// and, if modified since last check, returns the bytes and new hash.
+// This feature is used by the resolv.conf updater for containers
+func GetIfChanged() (*File, error) {
+ lastModified.Lock()
+ defer lastModified.Unlock()
+
+ resolv, err := ioutil.ReadFile("/etc/resolv.conf")
+ if err != nil {
+ return nil, err
+ }
+ newHash, err := ioutils.HashData(bytes.NewReader(resolv))
+ if err != nil {
+ return nil, err
+ }
+ if lastModified.sha256 != newHash {
+ lastModified.sha256 = newHash
+ lastModified.contents = resolv
+ return &File{Content: resolv, Hash: newHash}, nil
+ }
+ // nothing changed, so return no data
+ return nil, nil
+}
+
+// GetLastModified retrieves the last used contents and hash of the host resolv.conf.
+// Used by containers updating on restart
+func GetLastModified() *File {
+ lastModified.Lock()
+ defer lastModified.Unlock()
+
+ return &File{Content: lastModified.contents, Hash: lastModified.sha256}
+}
+
+// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs:
+// 1. It looks for localhost (127.*|::1) entries in the provided
+// resolv.conf, removing local nameserver entries, and, if the resulting
+// cleaned config has no defined nameservers left, adds default DNS entries
+// 2. Given the caller provides the enable/disable state of IPv6, the filter
+// code will remove all IPv6 nameservers if it is not enabled for containers
+//
+func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) {
+ cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})
+ // if IPv6 is not enabled, also clean out any IPv6 address nameserver
+ if !ipv6Enabled {
+ cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{})
+ }
+ // if the resulting resolvConf has no more nameservers defined, add appropriate
+ // default DNS servers for IPv4 and (optionally) IPv6
+ if len(GetNameservers(cleanedResolvConf, types.IP)) == 0 {
+ logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns)
+ dns := defaultIPv4Dns
+ if ipv6Enabled {
+ logrus.Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns)
+ dns = append(dns, defaultIPv6Dns...)
+ }
+ cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...)
+ }
+ hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf))
+ if err != nil {
+ return nil, err
+ }
+ return &File{Content: cleanedResolvConf, Hash: hash}, nil
+}
+
+// getLines parses input into lines and strips away comments.
+func getLines(input []byte, commentMarker []byte) [][]byte {
+ lines := bytes.Split(input, []byte("\n"))
+ var output [][]byte
+ for _, currentLine := range lines {
+ var commentIndex = bytes.Index(currentLine, commentMarker)
+ if commentIndex == -1 {
+ output = append(output, currentLine)
+ } else {
+ output = append(output, currentLine[:commentIndex])
+ }
+ }
+ return output
+}
+
+// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
+func GetNameservers(resolvConf []byte, kind int) []string {
+ nameservers := []string{}
+ for _, line := range getLines(resolvConf, []byte("#")) {
+ var ns [][]byte
+ if kind == types.IP {
+ ns = nsRegexp.FindSubmatch(line)
+ } else if kind == types.IPv4 {
+ ns = nsIPv4Regexpmatch.FindSubmatch(line)
+ } else if kind == types.IPv6 {
+ ns = nsIPv6Regexpmatch.FindSubmatch(line)
+ }
+ if len(ns) > 0 {
+ nameservers = append(nameservers, string(ns[1]))
+ }
+ }
+ return nameservers
+}
+
+// GetNameserversAsCIDR returns nameservers (if any) listed in
+// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
+// This function's output is intended for net.ParseCIDR
+func GetNameserversAsCIDR(resolvConf []byte) []string {
+ nameservers := []string{}
+ for _, nameserver := range GetNameservers(resolvConf, types.IP) {
+ var address string
+ // If IPv6, strip zone if present
+ if strings.Contains(nameserver, ":") {
+ address = strings.Split(nameserver, "%")[0] + "/128"
+ } else {
+ address = nameserver + "/32"
+ }
+ nameservers = append(nameservers, address)
+ }
+ return nameservers
+}
+
+// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf
+// If more than one search line is encountered, only the contents of the last
+// one is returned.
+func GetSearchDomains(resolvConf []byte) []string {
+ domains := []string{}
+ for _, line := range getLines(resolvConf, []byte("#")) {
+ match := searchRegexp.FindSubmatch(line)
+ if match == nil {
+ continue
+ }
+ domains = strings.Fields(string(match[1]))
+ }
+ return domains
+}
+
+// GetOptions returns options (if any) listed in /etc/resolv.conf
+// If more than one options line is encountered, only the contents of the last
+// one is returned.
+func GetOptions(resolvConf []byte) []string {
+ options := []string{}
+ for _, line := range getLines(resolvConf, []byte("#")) {
+ match := optionsRegexp.FindSubmatch(line)
+ if match == nil {
+ continue
+ }
+ options = strings.Fields(string(match[1]))
+ }
+ return options
+}
+
+// Build writes a configuration file to path containing a "nameserver" entry
+// for every element in dns, a "search" entry for every element in
+// dnsSearch, and an "options" entry for every element in dnsOptions.
+func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) {
+ content := bytes.NewBuffer(nil)
+ if len(dnsSearch) > 0 {
+ if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." {
+ if _, err := content.WriteString("search " + searchString + "\n"); err != nil {
+ return nil, err
+ }
+ }
+ }
+ for _, dns := range dns {
+ if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil {
+ return nil, err
+ }
+ }
+ if len(dnsOptions) > 0 {
+ if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" {
+ if _, err := content.WriteString("options " + optsString + "\n"); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ hash, err := ioutils.HashData(bytes.NewReader(content.Bytes()))
+ if err != nil {
+ return nil, err
+ }
+
+ return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644)
+}
diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go
new file mode 100644
index 000000000..b102ba4c3
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/types/types.go
@@ -0,0 +1,653 @@
+// Package types contains types that are common across libnetwork project
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+
+ "github.com/ishidawataru/sctp"
+)
+
+// constants for the IP address type
+const (
+ IP = iota // IPv4 and IPv6
+ IPv4
+ IPv6
+)
+
+// EncryptionKey is the libnetwork representation of the key distributed by the lead
+// manager.
+type EncryptionKey struct {
+ Subsystem string
+ Algorithm int32
+ Key []byte
+ LamportTime uint64
+}
+
+// UUID represents a globally unique ID of various resources like network and endpoint
+type UUID string
+
+// QosPolicy represents a quality of service policy on an endpoint
+type QosPolicy struct {
+ MaxEgressBandwidth uint64
+}
+
+// TransportPort represents a local Layer 4 endpoint
+type TransportPort struct {
+ Proto Protocol
+ Port uint16
+}
+
+// Equal checks if this instance of Transportport is equal to the passed one
+func (t *TransportPort) Equal(o *TransportPort) bool {
+ if t == o {
+ return true
+ }
+
+ if o == nil {
+ return false
+ }
+
+ if t.Proto != o.Proto || t.Port != o.Port {
+ return false
+ }
+
+ return true
+}
+
+// GetCopy returns a copy of this TransportPort structure instance
+func (t *TransportPort) GetCopy() TransportPort {
+ return TransportPort{Proto: t.Proto, Port: t.Port}
+}
+
+// String returns the TransportPort structure in string form
+func (t *TransportPort) String() string {
+ return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port)
+}
+
+// FromString reads the TransportPort structure from string
+func (t *TransportPort) FromString(s string) error {
+ ps := strings.Split(s, "/")
+ if len(ps) == 2 {
+ t.Proto = ParseProtocol(ps[0])
+ if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil {
+ t.Port = uint16(p)
+ return nil
+ }
+ }
+ return BadRequestErrorf("invalid format for transport port: %s", s)
+}
+
+// PortBinding represents a port binding between the container and the host
+type PortBinding struct {
+ Proto Protocol
+ IP net.IP
+ Port uint16
+ HostIP net.IP
+ HostPort uint16
+ HostPortEnd uint16
+}
+
+// HostAddr returns the host side transport address
+func (p PortBinding) HostAddr() (net.Addr, error) {
+ switch p.Proto {
+ case UDP:
+ return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil
+ case TCP:
+ return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil
+ case SCTP:
+ return &sctp.SCTPAddr{IP: []net.IP{p.HostIP}, Port: int(p.HostPort)}, nil
+ default:
+ return nil, ErrInvalidProtocolBinding(p.Proto.String())
+ }
+}
+
+// ContainerAddr returns the container side transport address
+func (p PortBinding) ContainerAddr() (net.Addr, error) {
+ switch p.Proto {
+ case UDP:
+ return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil
+ case TCP:
+ return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil
+ case SCTP:
+ return &sctp.SCTPAddr{IP: []net.IP{p.IP}, Port: int(p.Port)}, nil
+ default:
+ return nil, ErrInvalidProtocolBinding(p.Proto.String())
+ }
+}
+
+// GetCopy returns a copy of this PortBinding structure instance
+func (p *PortBinding) GetCopy() PortBinding {
+ return PortBinding{
+ Proto: p.Proto,
+ IP: GetIPCopy(p.IP),
+ Port: p.Port,
+ HostIP: GetIPCopy(p.HostIP),
+ HostPort: p.HostPort,
+ HostPortEnd: p.HostPortEnd,
+ }
+}
+
+// String returns the PortBinding structure in string form
+func (p *PortBinding) String() string {
+ ret := fmt.Sprintf("%s/", p.Proto)
+ if p.IP != nil {
+ ret += p.IP.String()
+ }
+ ret = fmt.Sprintf("%s:%d/", ret, p.Port)
+ if p.HostIP != nil {
+ ret += p.HostIP.String()
+ }
+ ret = fmt.Sprintf("%s:%d", ret, p.HostPort)
+ return ret
+}
+
+// FromString reads the PortBinding structure from string s.
+// String s is a triple of "protocol/containerIP:port/hostIP:port"
+// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form.
+// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported.
+// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString
+// returns an error.
+func (p *PortBinding) FromString(s string) error {
+ ps := strings.Split(s, "/")
+ if len(ps) != 3 {
+ return BadRequestErrorf("invalid format for port binding: %s", s)
+ }
+
+ p.Proto = ParseProtocol(ps[0])
+
+ var err error
+ if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil {
+ return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error())
+ }
+
+ if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil {
+ return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error())
+ }
+
+ return nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ hoststr, portstr, err := net.SplitHostPort(s)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ ip := net.ParseIP(hoststr)
+ if ip == nil {
+ return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr)
+ }
+
+ port, err := strconv.ParseUint(portstr, 10, 16)
+ if err != nil {
+ return nil, 0, BadRequestErrorf("invalid port: %s", portstr)
+ }
+
+ return ip, uint16(port), nil
+}
+
+// Equal checks if this instance of PortBinding is equal to the passed one
+func (p *PortBinding) Equal(o *PortBinding) bool {
+ if p == o {
+ return true
+ }
+
+ if o == nil {
+ return false
+ }
+
+ if p.Proto != o.Proto || p.Port != o.Port ||
+ p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd {
+ return false
+ }
+
+ if p.IP != nil {
+ if !p.IP.Equal(o.IP) {
+ return false
+ }
+ } else {
+ if o.IP != nil {
+ return false
+ }
+ }
+
+ if p.HostIP != nil {
+ if !p.HostIP.Equal(o.HostIP) {
+ return false
+ }
+ } else {
+ if o.HostIP != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid.
+type ErrInvalidProtocolBinding string
+
+func (ipb ErrInvalidProtocolBinding) Error() string {
+ return fmt.Sprintf("invalid transport protocol: %s", string(ipb))
+}
+
+const (
+ // ICMP is for the ICMP ip protocol
+ ICMP = 1
+ // TCP is for the TCP ip protocol
+ TCP = 6
+ // UDP is for the UDP ip protocol
+ UDP = 17
+ // SCTP is for the SCTP ip protocol
+ SCTP = 132
+)
+
+// Protocol represents an IP protocol number
+type Protocol uint8
+
+func (p Protocol) String() string {
+ switch p {
+ case ICMP:
+ return "icmp"
+ case TCP:
+ return "tcp"
+ case UDP:
+ return "udp"
+ case SCTP:
+ return "sctp"
+ default:
+ return fmt.Sprintf("%d", p)
+ }
+}
+
+// ParseProtocol returns the respective Protocol type for the passed string
+func ParseProtocol(s string) Protocol {
+ switch strings.ToLower(s) {
+ case "icmp":
+ return ICMP
+ case "udp":
+ return UDP
+ case "tcp":
+ return TCP
+ case "sctp":
+ return SCTP
+ default:
+ return 0
+ }
+}
+
+// GetMacCopy returns a copy of the passed MAC address
+func GetMacCopy(from net.HardwareAddr) net.HardwareAddr {
+ if from == nil {
+ return nil
+ }
+ to := make(net.HardwareAddr, len(from))
+ copy(to, from)
+ return to
+}
+
+// GetIPCopy returns a copy of the passed IP address
+func GetIPCopy(from net.IP) net.IP {
+ if from == nil {
+ return nil
+ }
+ to := make(net.IP, len(from))
+ copy(to, from)
+ return to
+}
+
+// GetIPNetCopy returns a copy of the passed IP Network
+func GetIPNetCopy(from *net.IPNet) *net.IPNet {
+ if from == nil {
+ return nil
+ }
+ bm := make(net.IPMask, len(from.Mask))
+ copy(bm, from.Mask)
+ return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm}
+}
+
+// GetIPNetCanonical returns the canonical form for the passed network
+func GetIPNetCanonical(nw *net.IPNet) *net.IPNet {
+ if nw == nil {
+ return nil
+ }
+ c := GetIPNetCopy(nw)
+ c.IP = c.IP.Mask(nw.Mask)
+ return c
+}
+
+// CompareIPNet returns equal if the two IP Networks are equal
+func CompareIPNet(a, b *net.IPNet) bool {
+ if a == b {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask)
+}
+
+// GetMinimalIP returns the address in its shortest form
+// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned.
+// Otherwise ip is returned unchanged.
+func GetMinimalIP(ip net.IP) net.IP {
+ if ip != nil && ip.To4() != nil {
+ return ip.To4()
+ }
+ return ip
+}
+
+// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation
+func GetMinimalIPNet(nw *net.IPNet) *net.IPNet {
+ if nw == nil {
+ return nil
+ }
+ if len(nw.IP) == 16 && nw.IP.To4() != nil {
+ m := nw.Mask
+ if len(m) == 16 {
+ m = m[12:16]
+ }
+ return &net.IPNet{IP: nw.IP.To4(), Mask: m}
+ }
+ return nw
+}
+
+// IsIPNetValid returns true if the ipnet is a valid network/mask
+// combination. Otherwise returns false.
+func IsIPNetValid(nw *net.IPNet) bool {
+ return nw.String() != "0.0.0.0/0"
+}
+
+var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+// compareIPMask checks if the passed ip and mask are semantically compatible.
+// It returns the byte indexes for the address and mask so that caller can
+// do bitwise operations without modifying address representation.
+func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) {
+ // Find the effective starting of address and mask
+ if len(ip) == net.IPv6len && ip.To4() != nil {
+ is = 12
+ }
+ if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) {
+ ms = 12
+ }
+ // Check if address and mask are semantically compatible
+ if len(ip[is:]) != len(mask[ms:]) {
+ err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask)
+ }
+ return
+}
+
+// GetHostPartIP returns the host portion of the ip address identified by the mask.
+// IP address representation is not modified. If address and mask are not compatible
+// an error is returned.
+func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) {
+ // Find the effective starting of address and mask
+ is, ms, err := compareIPMask(ip, mask)
+ if err != nil {
+ return nil, fmt.Errorf("cannot compute host portion ip address because %s", err)
+ }
+
+ // Compute host portion
+ out := GetIPCopy(ip)
+ for i := 0; i < len(mask[ms:]); i++ {
+ out[is+i] &= ^mask[ms+i]
+ }
+
+ return out, nil
+}
+
+// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask).
+// IP address representation is not modified. If address and mask are not compatible
+// an error is returned.
+func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) {
+ // Find the effective starting of address and mask
+ is, ms, err := compareIPMask(ip, mask)
+ if err != nil {
+ return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err)
+ }
+
+ // Compute broadcast address
+ out := GetIPCopy(ip)
+ for i := 0; i < len(mask[ms:]); i++ {
+ out[is+i] |= ^mask[ms+i]
+ }
+
+ return out, nil
+}
+
+// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation
+func ParseCIDR(cidr string) (n *net.IPNet, e error) {
+ var i net.IP
+ if i, n, e = net.ParseCIDR(cidr); e == nil {
+ n.IP = i
+ }
+ return
+}
+
+const (
+ // NEXTHOP indicates a StaticRoute with an IP next hop.
+ NEXTHOP = iota
+
+ // CONNECTED indicates a StaticRoute with an interface for directly connected peers.
+ CONNECTED
+)
+
+// StaticRoute is a statically-provisioned IP route.
+type StaticRoute struct {
+ Destination *net.IPNet
+
+ RouteType int // NEXT_HOP or CONNECTED
+
+ // NextHop will be resolved by the kernel (i.e. as a loose hop).
+ NextHop net.IP
+}
+
+// GetCopy returns a copy of this StaticRoute structure
+func (r *StaticRoute) GetCopy() *StaticRoute {
+ d := GetIPNetCopy(r.Destination)
+ nh := GetIPCopy(r.NextHop)
+ return &StaticRoute{Destination: d,
+ RouteType: r.RouteType,
+ NextHop: nh,
+ }
+}
+
+// InterfaceStatistics represents the interface's statistics
+type InterfaceStatistics struct {
+ RxBytes uint64
+ RxPackets uint64
+ RxErrors uint64
+ RxDropped uint64
+ TxBytes uint64
+ TxPackets uint64
+ TxErrors uint64
+ TxDropped uint64
+}
+
+func (is *InterfaceStatistics) String() string {
+ return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d",
+ is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped)
+}
+
+/******************************
+ * Well-known Error Interfaces
+ ******************************/
+
+// MaskableError is an interface for errors which can be ignored by caller
+type MaskableError interface {
+ // Maskable makes implementer into MaskableError type
+ Maskable()
+}
+
+// RetryError is an interface for errors which might get resolved through retry
+type RetryError interface {
+ // Retry makes implementer into RetryError type
+ Retry()
+}
+
+// BadRequestError is an interface for errors originated by a bad request
+type BadRequestError interface {
+ // BadRequest makes implementer into BadRequestError type
+ BadRequest()
+}
+
+// NotFoundError is an interface for errors raised because a needed resource is not available
+type NotFoundError interface {
+ // NotFound makes implementer into NotFoundError type
+ NotFound()
+}
+
+// ForbiddenError is an interface for errors which denote a valid request that cannot be honored
+type ForbiddenError interface {
+ // Forbidden makes implementer into ForbiddenError type
+ Forbidden()
+}
+
+// NoServiceError is an interface for errors returned when the required service is not available
+type NoServiceError interface {
+ // NoService makes implementer into NoServiceError type
+ NoService()
+}
+
+// TimeoutError is an interface for errors raised because of timeout
+type TimeoutError interface {
+ // Timeout makes implementer into TimeoutError type
+ Timeout()
+}
+
+// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented
+type NotImplementedError interface {
+ // NotImplemented makes implementer into NotImplementedError type
+ NotImplemented()
+}
+
+// InternalError is an interface for errors raised because of an internal error
+type InternalError interface {
+ // Internal makes implementer into InternalError type
+ Internal()
+}
+
+/******************************
+ * Well-known Error Formatters
+ ******************************/
+
+// BadRequestErrorf creates an instance of BadRequestError
+func BadRequestErrorf(format string, params ...interface{}) error {
+ return badRequest(fmt.Sprintf(format, params...))
+}
+
+// NotFoundErrorf creates an instance of NotFoundError
+func NotFoundErrorf(format string, params ...interface{}) error {
+ return notFound(fmt.Sprintf(format, params...))
+}
+
+// ForbiddenErrorf creates an instance of ForbiddenError
+func ForbiddenErrorf(format string, params ...interface{}) error {
+ return forbidden(fmt.Sprintf(format, params...))
+}
+
+// NoServiceErrorf creates an instance of NoServiceError
+func NoServiceErrorf(format string, params ...interface{}) error {
+ return noService(fmt.Sprintf(format, params...))
+}
+
+// NotImplementedErrorf creates an instance of NotImplementedError
+func NotImplementedErrorf(format string, params ...interface{}) error {
+ return notImpl(fmt.Sprintf(format, params...))
+}
+
+// TimeoutErrorf creates an instance of TimeoutError
+func TimeoutErrorf(format string, params ...interface{}) error {
+ return timeout(fmt.Sprintf(format, params...))
+}
+
+// InternalErrorf creates an instance of InternalError
+func InternalErrorf(format string, params ...interface{}) error {
+ return internal(fmt.Sprintf(format, params...))
+}
+
+// InternalMaskableErrorf creates an instance of InternalError and MaskableError
+func InternalMaskableErrorf(format string, params ...interface{}) error {
+ return maskInternal(fmt.Sprintf(format, params...))
+}
+
+// RetryErrorf creates an instance of RetryError
+func RetryErrorf(format string, params ...interface{}) error {
+ return retry(fmt.Sprintf(format, params...))
+}
+
+/***********************
+ * Internal Error Types
+ ***********************/
+type badRequest string
+
+func (br badRequest) Error() string {
+ return string(br)
+}
+func (br badRequest) BadRequest() {}
+
+type maskBadRequest string
+
+type notFound string
+
+func (nf notFound) Error() string {
+ return string(nf)
+}
+func (nf notFound) NotFound() {}
+
+type forbidden string
+
+func (frb forbidden) Error() string {
+ return string(frb)
+}
+func (frb forbidden) Forbidden() {}
+
+type noService string
+
+func (ns noService) Error() string {
+ return string(ns)
+}
+func (ns noService) NoService() {}
+
+type maskNoService string
+
+type timeout string
+
+func (to timeout) Error() string {
+ return string(to)
+}
+func (to timeout) Timeout() {}
+
+type notImpl string
+
+func (ni notImpl) Error() string {
+ return string(ni)
+}
+func (ni notImpl) NotImplemented() {}
+
+type internal string
+
+func (nt internal) Error() string {
+ return string(nt)
+}
+func (nt internal) Internal() {}
+
+type maskInternal string
+
+func (mnt maskInternal) Error() string {
+ return string(mnt)
+}
+func (mnt maskInternal) Internal() {}
+func (mnt maskInternal) Maskable() {}
+
+type retry string
+
+func (r retry) Error() string {
+ return string(r)
+}
+func (r retry) Retry() {}
diff --git a/vendor/github.com/ishidawataru/sctp/LICENSE b/vendor/github.com/ishidawataru/sctp/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/ishidawataru/sctp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/ishidawataru/sctp/README.md b/vendor/github.com/ishidawataru/sctp/README.md
new file mode 100644
index 000000000..574ececa8
--- /dev/null
+++ b/vendor/github.com/ishidawataru/sctp/README.md
@@ -0,0 +1,18 @@
+Stream Control Transmission Protocol (SCTP)
+----
+
+[![Build Status](https://travis-ci.org/ishidawataru/sctp.svg?branch=master)](https://travis-ci.org/ishidawataru/sctp/builds)
+
+Examples
+----
+
+See `example/sctp.go`
+
+```go
+$ cd example
+$ go build
+$ # run example SCTP server
+$ ./example -server -port 1000 -ip 10.10.0.1,10.20.0.1
+$ # run example SCTP client
+$ ./example -port 1000 -ip 10.10.0.1,10.20.0.1
+```
diff --git a/vendor/github.com/ishidawataru/sctp/sctp.go b/vendor/github.com/ishidawataru/sctp/sctp.go
new file mode 100644
index 000000000..cac1a889c
--- /dev/null
+++ b/vendor/github.com/ishidawataru/sctp/sctp.go
@@ -0,0 +1,656 @@
+package sctp
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+const (
+ SOL_SCTP = 132
+
+ SCTP_BINDX_ADD_ADDR = 0x01
+ SCTP_BINDX_REM_ADDR = 0x02
+
+ MSG_NOTIFICATION = 0x8000
+)
+
+const (
+ SCTP_RTOINFO = iota
+ SCTP_ASSOCINFO
+ SCTP_INITMSG
+ SCTP_NODELAY
+ SCTP_AUTOCLOSE
+ SCTP_SET_PEER_PRIMARY_ADDR
+ SCTP_PRIMARY_ADDR
+ SCTP_ADAPTATION_LAYER
+ SCTP_DISABLE_FRAGMENTS
+ SCTP_PEER_ADDR_PARAMS
+ SCTP_DEFAULT_SENT_PARAM
+ SCTP_EVENTS
+ SCTP_I_WANT_MAPPED_V4_ADDR
+ SCTP_MAXSEG
+ SCTP_STATUS
+ SCTP_GET_PEER_ADDR_INFO
+ SCTP_DELAYED_ACK_TIME
+ SCTP_DELAYED_ACK = SCTP_DELAYED_ACK_TIME
+ SCTP_DELAYED_SACK = SCTP_DELAYED_ACK_TIME
+
+ SCTP_SOCKOPT_BINDX_ADD = 100
+ SCTP_SOCKOPT_BINDX_REM = 101
+ SCTP_SOCKOPT_PEELOFF = 102
+ SCTP_GET_PEER_ADDRS = 108
+ SCTP_GET_LOCAL_ADDRS = 109
+ SCTP_SOCKOPT_CONNECTX = 110
+ SCTP_SOCKOPT_CONNECTX3 = 111
+)
+
+const (
+ SCTP_EVENT_DATA_IO = 1 << iota
+ SCTP_EVENT_ASSOCIATION
+ SCTP_EVENT_ADDRESS
+ SCTP_EVENT_SEND_FAILURE
+ SCTP_EVENT_PEER_ERROR
+ SCTP_EVENT_SHUTDOWN
+ SCTP_EVENT_PARTIAL_DELIVERY
+ SCTP_EVENT_ADAPTATION_LAYER
+ SCTP_EVENT_AUTHENTICATION
+ SCTP_EVENT_SENDER_DRY
+
+ SCTP_EVENT_ALL = SCTP_EVENT_DATA_IO | SCTP_EVENT_ASSOCIATION | SCTP_EVENT_ADDRESS | SCTP_EVENT_SEND_FAILURE | SCTP_EVENT_PEER_ERROR | SCTP_EVENT_SHUTDOWN | SCTP_EVENT_PARTIAL_DELIVERY | SCTP_EVENT_ADAPTATION_LAYER | SCTP_EVENT_AUTHENTICATION | SCTP_EVENT_SENDER_DRY
+)
+
+type SCTPNotificationType int
+
+const (
+ SCTP_SN_TYPE_BASE = SCTPNotificationType(iota + (1 << 15))
+ SCTP_ASSOC_CHANGE
+ SCTP_PEER_ADDR_CHANGE
+ SCTP_SEND_FAILED
+ SCTP_REMOTE_ERROR
+ SCTP_SHUTDOWN_EVENT
+ SCTP_PARTIAL_DELIVERY_EVENT
+ SCTP_ADAPTATION_INDICATION
+ SCTP_AUTHENTICATION_INDICATION
+ SCTP_SENDER_DRY_EVENT
+)
+
+type NotificationHandler func([]byte) error
+
+type EventSubscribe struct {
+ DataIO uint8
+ Association uint8
+ Address uint8
+ SendFailure uint8
+ PeerError uint8
+ Shutdown uint8
+ PartialDelivery uint8
+ AdaptationLayer uint8
+ Authentication uint8
+ SenderDry uint8
+}
+
+const (
+ SCTP_CMSG_INIT = iota
+ SCTP_CMSG_SNDRCV
+ SCTP_CMSG_SNDINFO
+ SCTP_CMSG_RCVINFO
+ SCTP_CMSG_NXTINFO
+)
+
+const (
+ SCTP_UNORDERED = 1 << iota
+ SCTP_ADDR_OVER
+ SCTP_ABORT
+ SCTP_SACK_IMMEDIATELY
+ SCTP_EOF
+)
+
+const (
+ SCTP_MAX_STREAM = 0xffff
+)
+
+type InitMsg struct {
+ NumOstreams uint16
+ MaxInstreams uint16
+ MaxAttempts uint16
+ MaxInitTimeout uint16
+}
+
+type SndRcvInfo struct {
+ Stream uint16
+ SSN uint16
+ Flags uint16
+ _ uint16
+ PPID uint32
+ Context uint32
+ TTL uint32
+ TSN uint32
+ CumTSN uint32
+ AssocID int32
+}
+
+type SndInfo struct {
+ SID uint16
+ Flags uint16
+ PPID uint32
+ Context uint32
+ AssocID int32
+}
+
+type GetAddrsOld struct {
+ AssocID int32
+ AddrNum int32
+ Addrs uintptr
+}
+
+type NotificationHeader struct {
+ Type uint16
+ Flags uint16
+ Length uint32
+}
+
+type SCTPState uint16
+
+const (
+ SCTP_COMM_UP = SCTPState(iota)
+ SCTP_COMM_LOST
+ SCTP_RESTART
+ SCTP_SHUTDOWN_COMP
+ SCTP_CANT_STR_ASSOC
+)
+
+var nativeEndian binary.ByteOrder
+var sndRcvInfoSize uintptr
+
+func init() {
+ i := uint16(1)
+ if *(*byte)(unsafe.Pointer(&i)) == 0 {
+ nativeEndian = binary.BigEndian
+ } else {
+ nativeEndian = binary.LittleEndian
+ }
+ info := SndRcvInfo{}
+ sndRcvInfoSize = unsafe.Sizeof(info)
+}
+
+func toBuf(v interface{}) []byte {
+ var buf bytes.Buffer
+ binary.Write(&buf, nativeEndian, v)
+ return buf.Bytes()
+}
+
+func htons(h uint16) uint16 {
+ if nativeEndian == binary.LittleEndian {
+ return (h << 8 & 0xff00) | (h >> 8 & 0xff)
+ }
+ return h
+}
+
+var ntohs = htons
+
+func setNumOstreams(fd, num int) error {
+ param := InitMsg{
+ NumOstreams: uint16(num),
+ }
+ optlen := unsafe.Sizeof(param)
+ _, _, err := setsockopt(fd, SCTP_INITMSG, uintptr(unsafe.Pointer(&param)), uintptr(optlen))
+ return err
+}
+
+type SCTPAddr struct {
+ IP []net.IP
+ Port int
+}
+
+func (a *SCTPAddr) ToRawSockAddrBuf() []byte {
+ buf := []byte{}
+ p := htons(uint16(a.Port))
+ for _, ip := range a.IP {
+ if ip.To4() != nil {
+ s := syscall.RawSockaddrInet4{
+ Family: syscall.AF_INET,
+ Port: p,
+ }
+ copy(s.Addr[:], ip.To4())
+ buf = append(buf, toBuf(s)...)
+ } else {
+ s := syscall.RawSockaddrInet6{
+ Family: syscall.AF_INET6,
+ Port: p,
+ }
+ copy(s.Addr[:], ip)
+ buf = append(buf, toBuf(s)...)
+ }
+ }
+ return buf
+}
+
+func (a *SCTPAddr) String() string {
+ var b bytes.Buffer
+
+ for n, i := range a.IP {
+ if a.IP[n].To4() != nil {
+ b.WriteString(i.String())
+ } else if a.IP[n].To16() != nil {
+ b.WriteRune('[')
+ b.WriteString(i.String())
+ b.WriteRune(']')
+ }
+ if n < len(a.IP)-1 {
+ b.WriteRune('/')
+ }
+ }
+ b.WriteRune(':')
+ b.WriteString(strconv.Itoa(a.Port))
+ return b.String()
+}
+
+func (a *SCTPAddr) Network() string { return "sctp" }
+
+func ResolveSCTPAddr(network, addrs string) (*SCTPAddr, error) {
+ tcpnet := ""
+ switch network {
+ case "", "sctp":
+ case "sctp4":
+ tcpnet = "tcp4"
+ case "sctp6":
+ tcpnet = "tcp6"
+ default:
+ return nil, fmt.Errorf("invalid net: %s", network)
+ }
+ elems := strings.Split(addrs, "/")
+ if len(elems) == 0 {
+ return nil, fmt.Errorf("invalid input: %s", addrs)
+ }
+ ipaddrs := make([]net.IP, 0, len(elems))
+ for _, e := range elems[:len(elems)-1] {
+ tcpa, err := net.ResolveTCPAddr(tcpnet, e+":")
+ if err != nil {
+ return nil, err
+ }
+ ipaddrs = append(ipaddrs, tcpa.IP)
+ }
+ tcpa, err := net.ResolveTCPAddr(tcpnet, elems[len(elems)-1])
+ if err != nil {
+ return nil, err
+ }
+ if tcpa.IP != nil {
+ ipaddrs = append(ipaddrs, tcpa.IP)
+ } else {
+ ipaddrs = nil
+ }
+ return &SCTPAddr{
+ IP: ipaddrs,
+ Port: tcpa.Port,
+ }, nil
+}
+
+func SCTPConnect(fd int, addr *SCTPAddr) (int, error) {
+ buf := addr.ToRawSockAddrBuf()
+ param := GetAddrsOld{
+ AddrNum: int32(len(buf)),
+ Addrs: uintptr(uintptr(unsafe.Pointer(&buf[0]))),
+ }
+ optlen := unsafe.Sizeof(param)
+ _, _, err := getsockopt(fd, SCTP_SOCKOPT_CONNECTX3, uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
+ if err == nil {
+ return int(param.AssocID), nil
+ } else if err != syscall.ENOPROTOOPT {
+ return 0, err
+ }
+ r0, _, err := setsockopt(fd, SCTP_SOCKOPT_CONNECTX, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)))
+ return int(r0), err
+}
+
+func SCTPBind(fd int, addr *SCTPAddr, flags int) error {
+ var option uintptr
+ switch flags {
+ case SCTP_BINDX_ADD_ADDR:
+ option = SCTP_SOCKOPT_BINDX_ADD
+ case SCTP_BINDX_REM_ADDR:
+ option = SCTP_SOCKOPT_BINDX_REM
+ default:
+ return syscall.EINVAL
+ }
+
+ buf := addr.ToRawSockAddrBuf()
+ _, _, err := setsockopt(fd, option, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)))
+ return err
+}
+
+type SCTPConn struct {
+ _fd int32
+ notificationHandler NotificationHandler
+}
+
+func (c *SCTPConn) fd() int {
+ return int(atomic.LoadInt32(&c._fd))
+}
+
+func NewSCTPConn(fd int, handler NotificationHandler) *SCTPConn {
+ conn := &SCTPConn{
+ _fd: int32(fd),
+ notificationHandler: handler,
+ }
+ return conn
+}
+
+func (c *SCTPConn) Write(b []byte) (int, error) {
+ return c.SCTPWrite(b, nil)
+}
+
+func (c *SCTPConn) Read(b []byte) (int, error) {
+ n, _, err := c.SCTPRead(b)
+ if n < 0 {
+ n = 0
+ }
+ return n, err
+}
+
+func (c *SCTPConn) SetInitMsg(numOstreams, maxInstreams, maxAttempts, maxInitTimeout int) error {
+ param := InitMsg{
+ NumOstreams: uint16(numOstreams),
+ MaxInstreams: uint16(maxInstreams),
+ MaxAttempts: uint16(maxAttempts),
+ MaxInitTimeout: uint16(maxInitTimeout),
+ }
+ optlen := unsafe.Sizeof(param)
+ _, _, err := setsockopt(c.fd(), SCTP_INITMSG, uintptr(unsafe.Pointer(&param)), uintptr(optlen))
+ return err
+}
+
+func (c *SCTPConn) SubscribeEvents(flags int) error {
+ var d, a, ad, sf, p, sh, pa, ada, au, se uint8
+ if flags&SCTP_EVENT_DATA_IO > 0 {
+ d = 1
+ }
+ if flags&SCTP_EVENT_ASSOCIATION > 0 {
+ a = 1
+ }
+ if flags&SCTP_EVENT_ADDRESS > 0 {
+ ad = 1
+ }
+ if flags&SCTP_EVENT_SEND_FAILURE > 0 {
+ sf = 1
+ }
+ if flags&SCTP_EVENT_PEER_ERROR > 0 {
+ p = 1
+ }
+ if flags&SCTP_EVENT_SHUTDOWN > 0 {
+ sh = 1
+ }
+ if flags&SCTP_EVENT_PARTIAL_DELIVERY > 0 {
+ pa = 1
+ }
+ if flags&SCTP_EVENT_ADAPTATION_LAYER > 0 {
+ ada = 1
+ }
+ if flags&SCTP_EVENT_AUTHENTICATION > 0 {
+ au = 1
+ }
+ if flags&SCTP_EVENT_SENDER_DRY > 0 {
+ se = 1
+ }
+ param := EventSubscribe{
+ DataIO: d,
+ Association: a,
+ Address: ad,
+ SendFailure: sf,
+ PeerError: p,
+ Shutdown: sh,
+ PartialDelivery: pa,
+ AdaptationLayer: ada,
+ Authentication: au,
+ SenderDry: se,
+ }
+ optlen := unsafe.Sizeof(param)
+ _, _, err := setsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(&param)), uintptr(optlen))
+ return err
+}
+
+func (c *SCTPConn) SubscribedEvents() (int, error) {
+ param := EventSubscribe{}
+ optlen := unsafe.Sizeof(param)
+ _, _, err := getsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
+ if err != nil {
+ return 0, err
+ }
+ var flags int
+ if param.DataIO > 0 {
+ flags |= SCTP_EVENT_DATA_IO
+ }
+ if param.Association > 0 {
+ flags |= SCTP_EVENT_ASSOCIATION
+ }
+ if param.Address > 0 {
+ flags |= SCTP_EVENT_ADDRESS
+ }
+ if param.SendFailure > 0 {
+ flags |= SCTP_EVENT_SEND_FAILURE
+ }
+ if param.PeerError > 0 {
+ flags |= SCTP_EVENT_PEER_ERROR
+ }
+ if param.Shutdown > 0 {
+ flags |= SCTP_EVENT_SHUTDOWN
+ }
+ if param.PartialDelivery > 0 {
+ flags |= SCTP_EVENT_PARTIAL_DELIVERY
+ }
+ if param.AdaptationLayer > 0 {
+ flags |= SCTP_EVENT_ADAPTATION_LAYER
+ }
+ if param.Authentication > 0 {
+ flags |= SCTP_EVENT_AUTHENTICATION
+ }
+ if param.SenderDry > 0 {
+ flags |= SCTP_EVENT_SENDER_DRY
+ }
+ return flags, nil
+}
+
+func (c *SCTPConn) SetDefaultSentParam(info *SndRcvInfo) error {
+ optlen := unsafe.Sizeof(*info)
+ _, _, err := setsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(optlen))
+ return err
+}
+
+func (c *SCTPConn) GetDefaultSentParam() (*SndRcvInfo, error) {
+ info := &SndRcvInfo{}
+ optlen := unsafe.Sizeof(*info)
+ _, _, err := getsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(&optlen)))
+ return info, err
+}
+
+func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) {
+ addr := &SCTPAddr{
+ IP: make([]net.IP, n),
+ }
+
+ switch family := (*(*syscall.RawSockaddrAny)(ptr)).Addr.Family; family {
+ case syscall.AF_INET:
+ addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port)))
+ tmp := syscall.RawSockaddrInet4{}
+ size := unsafe.Sizeof(tmp)
+ for i := 0; i < n; i++ {
+ a := *(*syscall.RawSockaddrInet4)(unsafe.Pointer(
+ uintptr(ptr) + size*uintptr(i)))
+ addr.IP[i] = a.Addr[:]
+ }
+ case syscall.AF_INET6:
+ addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port)))
+ tmp := syscall.RawSockaddrInet6{}
+ size := unsafe.Sizeof(tmp)
+ for i := 0; i < n; i++ {
+ a := *(*syscall.RawSockaddrInet6)(unsafe.Pointer(
+ uintptr(ptr) + size*uintptr(i)))
+ addr.IP[i] = a.Addr[:]
+ }
+ default:
+ return nil, fmt.Errorf("unknown address family: %d", family)
+ }
+ return addr, nil
+}
+
+func sctpGetAddrs(fd, id, optname int) (*SCTPAddr, error) {
+
+ type getaddrs struct {
+ assocId int32
+ addrNum uint32
+ addrs [4096]byte
+ }
+ param := getaddrs{
+ assocId: int32(id),
+ }
+ optlen := unsafe.Sizeof(param)
+ _, _, err := getsockopt(fd, uintptr(optname), uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
+ if err != nil {
+ return nil, err
+ }
+ return resolveFromRawAddr(unsafe.Pointer(&param.addrs), int(param.addrNum))
+}
+
+func (c *SCTPConn) SCTPGetPrimaryPeerAddr() (*SCTPAddr, error) {
+
+ type sctpGetSetPrim struct {
+ assocId int32
+ addrs [128]byte
+ }
+ param := sctpGetSetPrim{
+ assocId: int32(0),
+ }
+ optlen := unsafe.Sizeof(param)
+ _, _, err := getsockopt(c.fd(), SCTP_PRIMARY_ADDR, uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
+ if err != nil {
+ return nil, err
+ }
+ return resolveFromRawAddr(unsafe.Pointer(&param.addrs), 1)
+}
+
+func (c *SCTPConn) SCTPLocalAddr(id int) (*SCTPAddr, error) {
+ return sctpGetAddrs(c.fd(), id, SCTP_GET_LOCAL_ADDRS)
+}
+
+func (c *SCTPConn) SCTPRemoteAddr(id int) (*SCTPAddr, error) {
+ return sctpGetAddrs(c.fd(), id, SCTP_GET_PEER_ADDRS)
+}
+
+func (c *SCTPConn) LocalAddr() net.Addr {
+ addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_LOCAL_ADDRS)
+ if err != nil {
+ return nil
+ }
+ return addr
+}
+
+func (c *SCTPConn) RemoteAddr() net.Addr {
+ addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_PEER_ADDRS)
+ if err != nil {
+ return nil
+ }
+ return addr
+}
+
+func (c *SCTPConn) PeelOff(id int) (*SCTPConn, error) {
+ type peeloffArg struct {
+ assocId int32
+ sd int
+ }
+ param := peeloffArg{
+ assocId: int32(id),
+ }
+ optlen := unsafe.Sizeof(param)
+ _, _, err := getsockopt(c.fd(), SCTP_SOCKOPT_PEELOFF, uintptr(unsafe.Pointer(&param)), uintptr(unsafe.Pointer(&optlen)))
+ if err != nil {
+ return nil, err
+ }
+ return &SCTPConn{_fd: int32(param.sd)}, nil
+}
+
+func (c *SCTPConn) SetDeadline(t time.Time) error {
+ return syscall.EOPNOTSUPP
+}
+
+func (c *SCTPConn) SetReadDeadline(t time.Time) error {
+ return syscall.EOPNOTSUPP
+}
+
+func (c *SCTPConn) SetWriteDeadline(t time.Time) error {
+ return syscall.EOPNOTSUPP
+}
+
+type SCTPListener struct {
+ fd int
+ m sync.Mutex
+}
+
+func (ln *SCTPListener) Addr() net.Addr {
+ laddr, err := sctpGetAddrs(ln.fd, 0, SCTP_GET_LOCAL_ADDRS)
+ if err != nil {
+ return nil
+ }
+ return laddr
+}
+
+type SCTPSndRcvInfoWrappedConn struct {
+ conn *SCTPConn
+}
+
+func NewSCTPSndRcvInfoWrappedConn(conn *SCTPConn) *SCTPSndRcvInfoWrappedConn {
+ conn.SubscribeEvents(SCTP_EVENT_DATA_IO)
+ return &SCTPSndRcvInfoWrappedConn{conn}
+}
+
+func (c *SCTPSndRcvInfoWrappedConn) Write(b []byte) (int, error) {
+ if len(b) < int(sndRcvInfoSize) {
+ return 0, syscall.EINVAL
+ }
+ info := (*SndRcvInfo)(unsafe.Pointer(&b[0]))
+ n, err := c.conn.SCTPWrite(b[sndRcvInfoSize:], info)
+ return n + int(sndRcvInfoSize), err
+}
+
+func (c *SCTPSndRcvInfoWrappedConn) Read(b []byte) (int, error) {
+ if len(b) < int(sndRcvInfoSize) {
+ return 0, syscall.EINVAL
+ }
+ n, info, err := c.conn.SCTPRead(b[sndRcvInfoSize:])
+ if err != nil {
+ return n, err
+ }
+ copy(b, toBuf(info))
+ return n + int(sndRcvInfoSize), err
+}
+
+func (c *SCTPSndRcvInfoWrappedConn) Close() error {
+ return c.conn.Close()
+}
+
+func (c *SCTPSndRcvInfoWrappedConn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+func (c *SCTPSndRcvInfoWrappedConn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+func (c *SCTPSndRcvInfoWrappedConn) SetDeadline(t time.Time) error {
+ return c.conn.SetDeadline(t)
+}
+
+func (c *SCTPSndRcvInfoWrappedConn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+func (c *SCTPSndRcvInfoWrappedConn) SetWriteDeadline(t time.Time) error {
+ return c.conn.SetWriteDeadline(t)
+}
diff --git a/vendor/github.com/ishidawataru/sctp/sctp_linux.go b/vendor/github.com/ishidawataru/sctp/sctp_linux.go
new file mode 100644
index 000000000..f93ab8622
--- /dev/null
+++ b/vendor/github.com/ishidawataru/sctp/sctp_linux.go
@@ -0,0 +1,227 @@
+// +build linux,!386
+
+package sctp
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) {
+ // FIXME: syscall.SYS_SETSOCKOPT is undefined on 386
+ r0, r1, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT,
+ uintptr(fd),
+ SOL_SCTP,
+ optname,
+ optval,
+ optlen,
+ 0)
+ if errno != 0 {
+ return r0, r1, errno
+ }
+ return r0, r1, nil
+}
+
+func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) {
+ // FIXME: syscall.SYS_GETSOCKOPT is undefined on 386
+ r0, r1, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT,
+ uintptr(fd),
+ SOL_SCTP,
+ optname,
+ optval,
+ optlen,
+ 0)
+ if errno != 0 {
+ return r0, r1, errno
+ }
+ return r0, r1, nil
+}
+
+func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) {
+ var cbuf []byte
+ if info != nil {
+ cmsgBuf := toBuf(info)
+ hdr := &syscall.Cmsghdr{
+ Level: syscall.IPPROTO_SCTP,
+ Type: SCTP_CMSG_SNDRCV,
+ }
+
+ // bitwidth of hdr.Len is platform-specific,
+ // so we use hdr.SetLen() rather than directly setting hdr.Len
+ hdr.SetLen(syscall.CmsgSpace(len(cmsgBuf)))
+ cbuf = append(toBuf(hdr), cmsgBuf...)
+ }
+ return syscall.SendmsgN(c.fd(), b, cbuf, nil, 0)
+}
+
+func parseSndRcvInfo(b []byte) (*SndRcvInfo, error) {
+ msgs, err := syscall.ParseSocketControlMessage(b)
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range msgs {
+ if m.Header.Level == syscall.IPPROTO_SCTP {
+ switch m.Header.Type {
+ case SCTP_CMSG_SNDRCV:
+ return (*SndRcvInfo)(unsafe.Pointer(&m.Data[0])), nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) {
+ oob := make([]byte, 254)
+ for {
+ n, oobn, recvflags, _, err := syscall.Recvmsg(c.fd(), b, oob, 0)
+ if err != nil {
+ return n, nil, err
+ }
+
+ if n == 0 && oobn == 0 {
+ return 0, nil, io.EOF
+ }
+
+ if recvflags&MSG_NOTIFICATION > 0 && c.notificationHandler != nil {
+ if err := c.notificationHandler(b[:n]); err != nil {
+ return 0, nil, err
+ }
+ } else {
+ var info *SndRcvInfo
+ if oobn > 0 {
+ info, err = parseSndRcvInfo(oob[:oobn])
+ }
+ return n, info, err
+ }
+ }
+}
+
+func (c *SCTPConn) Close() error {
+ if c != nil {
+ fd := atomic.SwapInt32(&c._fd, -1)
+ if fd > 0 {
+ info := &SndRcvInfo{
+ Flags: SCTP_EOF,
+ }
+ c.SCTPWrite(nil, info)
+ syscall.Shutdown(int(fd), syscall.SHUT_RDWR)
+ return syscall.Close(int(fd))
+ }
+ }
+ return syscall.EBADF
+}
+
+func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) {
+ af := syscall.AF_INET
+ switch net {
+ case "sctp":
+ hasv6 := func(addr *SCTPAddr) bool {
+ if addr == nil {
+ return false
+ }
+ for _, ip := range addr.IP {
+ if ip.To4() == nil {
+ return true
+ }
+ }
+ return false
+ }
+ if hasv6(laddr) {
+ af = syscall.AF_INET6
+ }
+ case "sctp4":
+ case "sctp6":
+ af = syscall.AF_INET6
+ default:
+ return nil, fmt.Errorf("invalid net: %s", net)
+ }
+
+ sock, err := syscall.Socket(
+ af,
+ syscall.SOCK_STREAM,
+ syscall.IPPROTO_SCTP,
+ )
+ if err != nil {
+ return nil, err
+ }
+ err = setNumOstreams(sock, SCTP_MAX_STREAM)
+ if err != nil {
+ return nil, err
+ }
+ if laddr != nil && len(laddr.IP) != 0 {
+ err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR)
+ if err != nil {
+ return nil, err
+ }
+ }
+ err = syscall.Listen(sock, syscall.SOMAXCONN)
+ if err != nil {
+ return nil, err
+ }
+ return &SCTPListener{
+ fd: sock,
+ }, nil
+}
+
+func (ln *SCTPListener) Accept() (net.Conn, error) {
+ fd, _, err := syscall.Accept4(ln.fd, 0)
+ return NewSCTPConn(fd, nil), err
+}
+
+func (ln *SCTPListener) Close() error {
+ syscall.Shutdown(ln.fd, syscall.SHUT_RDWR)
+ return syscall.Close(ln.fd)
+}
+
+func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) {
+ af := syscall.AF_INET
+ switch net {
+ case "sctp":
+ hasv6 := func(addr *SCTPAddr) bool {
+ if addr == nil {
+ return false
+ }
+ for _, ip := range addr.IP {
+ if ip.To4() == nil {
+ return true
+ }
+ }
+ return false
+ }
+ if hasv6(laddr) || hasv6(raddr) {
+ af = syscall.AF_INET6
+ }
+ case "sctp4":
+ case "sctp6":
+ af = syscall.AF_INET6
+ default:
+ return nil, fmt.Errorf("invalid net: %s", net)
+ }
+ sock, err := syscall.Socket(
+ af,
+ syscall.SOCK_STREAM,
+ syscall.IPPROTO_SCTP,
+ )
+ if err != nil {
+ return nil, err
+ }
+ err = setNumOstreams(sock, SCTP_MAX_STREAM)
+ if err != nil {
+ return nil, err
+ }
+ if laddr != nil {
+ err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR)
+ if err != nil {
+ return nil, err
+ }
+ }
+ _, err = SCTPConnect(sock, raddr)
+ if err != nil {
+ return nil, err
+ }
+ return NewSCTPConn(sock, nil), nil
+}
diff --git a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go
new file mode 100644
index 000000000..adcbf78b4
--- /dev/null
+++ b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go
@@ -0,0 +1,47 @@
+// +build !linux linux,386
+
+package sctp
+
+import (
+ "errors"
+ "net"
+ "runtime"
+)
+
+var ErrUnsupported = errors.New("SCTP is unsupported on " + runtime.GOOS + "/" + runtime.GOARCH)
+
+func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) {
+ return 0, 0, ErrUnsupported
+}
+
+func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) {
+ return 0, 0, ErrUnsupported
+}
+
+func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) {
+ return 0, ErrUnsupported
+}
+
+func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) {
+ return 0, nil, ErrUnsupported
+}
+
+func (c *SCTPConn) Close() error {
+ return ErrUnsupported
+}
+
+func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) {
+ return nil, ErrUnsupported
+}
+
+func (ln *SCTPListener) Accept() (net.Conn, error) {
+ return nil, ErrUnsupported
+}
+
+func (ln *SCTPListener) Close() error {
+ return ErrUnsupported
+}
+
+func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) {
+ return nil, ErrUnsupported
+}