summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go3
-rw-r--r--vendor/github.com/containers/storage/go.mod6
-rw-r--r--vendor/github.com/containers/storage/go.sum10
-rw-r--r--vendor/github.com/containers/storage/pkg/config/config.go2
-rw-r--r--vendor/github.com/coreos/go-iptables/iptables/iptables.go29
-rw-r--r--vendor/github.com/fatih/camelcase/.travis.yml3
-rw-r--r--vendor/github.com/fatih/camelcase/LICENSE.md20
-rw-r--r--vendor/github.com/fatih/camelcase/README.md58
-rw-r--r--vendor/github.com/fatih/camelcase/camelcase.go90
-rw-r--r--vendor/github.com/json-iterator/go/reflect_extension.go2
-rw-r--r--vendor/github.com/json-iterator/go/reflect_map.go10
-rw-r--r--vendor/github.com/json-iterator/go/reflect_struct_encoder.go1
-rw-r--r--vendor/github.com/klauspost/compress/huff0/bitwriter.go13
-rw-r--r--vendor/github.com/klauspost/compress/huff0/compress.go70
-rw-r--r--vendor/github.com/klauspost/compress/huff0/huff0.go7
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockenc.go33
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go29
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go313
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go245
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go71
-rw-r--r--vendor/github.com/mattn/go-shellwords/.travis.yml1
-rw-r--r--vendor/github.com/mattn/go-shellwords/README.md1
-rw-r--r--vendor/github.com/mattn/go-shellwords/shellwords.go6
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml164
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml328
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml101
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml98
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml219
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml123
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml366
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml70
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml100
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml76
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml144
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml221
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml55
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml141
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml661
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml63
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml88
-rw-r--r--vendor/github.com/openshift/api/config/v1/types.go2
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_apiserver.go43
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_authentication.go4
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_build.go14
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_operator.go72
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_version.go32
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_console.go9
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_dns.go7
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_feature.go120
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_image.go40
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_infrastructure.go39
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_ingress.go18
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_network.go27
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_operatorhub.go19
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_project.go7
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_proxy.go5
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_scheduling.go7
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go260
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go199
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go306
-rw-r--r--vendor/github.com/pkg/errors/cause.go29
-rw-r--r--vendor/github.com/pkg/errors/errors.go26
-rw-r--r--vendor/github.com/pkg/errors/go113.go33
-rw-r--r--vendor/github.com/uber/jaeger-client-go/CHANGELOG.md28
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Makefile8
-rw-r--r--vendor/github.com/uber/jaeger-client-go/README.md2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config.go13
-rw-r--r--vendor/github.com/uber/jaeger-client-go/constants.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go25
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter.go33
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span.go77
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span_context.go4
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go272
-rw-r--r--vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go4
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer.go1
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer_options.go12
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport/http.go11
-rw-r--r--vendor/github.com/uber/jaeger-client-go/transport_udp.go61
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/udp_client.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go1
-rw-r--r--vendor/github.com/vishvananda/netlink/.gitignore1
-rw-r--r--vendor/github.com/vishvananda/netlink/.travis.yml6
-rw-r--r--vendor/github.com/vishvananda/netlink/addr_linux.go105
-rw-r--r--vendor/github.com/vishvananda/netlink/bridge_linux.go9
-rw-r--r--vendor/github.com/vishvananda/netlink/class.go141
-rw-r--r--vendor/github.com/vishvananda/netlink/class_linux.go145
-rw-r--r--vendor/github.com/vishvananda/netlink/conntrack_linux.go119
-rw-r--r--vendor/github.com/vishvananda/netlink/devlink_linux.go272
-rw-r--r--vendor/github.com/vishvananda/netlink/filter.go110
-rw-r--r--vendor/github.com/vishvananda/netlink/filter_linux.go240
-rw-r--r--vendor/github.com/vishvananda/netlink/fou_linux.go6
-rw-r--r--vendor/github.com/vishvananda/netlink/genetlink_linux.go3
-rw-r--r--vendor/github.com/vishvananda/netlink/go.mod8
-rw-r--r--vendor/github.com/vishvananda/netlink/go.sum4
-rw-r--r--vendor/github.com/vishvananda/netlink/handle_linux.go2
-rw-r--r--vendor/github.com/vishvananda/netlink/handle_unspecified.go12
-rw-r--r--vendor/github.com/vishvananda/netlink/ioctl_linux.go10
-rw-r--r--vendor/github.com/vishvananda/netlink/link.go242
-rw-r--r--vendor/github.com/vishvananda/netlink/link_linux.go1120
-rw-r--r--vendor/github.com/vishvananda/netlink/neigh.go7
-rw-r--r--vendor/github.com/vishvananda/netlink/neigh_linux.go189
-rw-r--r--vendor/github.com/vishvananda/netlink/netlink.go3
-rw-r--r--vendor/github.com/vishvananda/netlink/netlink_unspecified.go12
-rw-r--r--vendor/github.com/vishvananda/netlink/netns_linux.go141
-rw-r--r--vendor/github.com/vishvananda/netlink/netns_unspecified.go19
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/bridge_linux.go4
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go40
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/devlink_linux.go40
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/link_linux.go72
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/nl_linux.go66
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go35
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/route_linux.go26
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/seg6_linux.go43
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go76
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/syscall.go11
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/tc_linux.go166
-rw-r--r--vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go62
-rw-r--r--vendor/github.com/vishvananda/netlink/protinfo.go4
-rw-r--r--vendor/github.com/vishvananda/netlink/protinfo_linux.go7
-rw-r--r--vendor/github.com/vishvananda/netlink/qdisc.go48
-rw-r--r--vendor/github.com/vishvananda/netlink/qdisc_linux.go69
-rw-r--r--vendor/github.com/vishvananda/netlink/rdma_link_linux.go264
-rw-r--r--vendor/github.com/vishvananda/netlink/route.go2
-rw-r--r--vendor/github.com/vishvananda/netlink/route_linux.go229
-rw-r--r--vendor/github.com/vishvananda/netlink/rule_linux.go2
-rw-r--r--vendor/github.com/vishvananda/netlink/socket_linux.go5
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go6
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_policy.go26
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go19
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_state.go6
-rw-r--r--vendor/github.com/vishvananda/netlink/xfrm_state_linux.go27
-rw-r--r--vendor/github.com/vishvananda/netns/go.mod3
-rw-r--r--vendor/github.com/vishvananda/netns/go.sum0
-rw-r--r--vendor/gopkg.in/yaml.v2/scannerc.go47
-rw-r--r--vendor/gopkg.in/yaml.v2/yamlh.go1
-rw-r--r--vendor/modules.txt33
140 files changed, 9022 insertions, 1273 deletions
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index d32434904..928b5d437 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.15.5
+1.15.8-dev
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index 1f719fa85..5be1639d0 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -627,7 +627,12 @@ func (d *Driver) Remove(id string) error {
d.updateQuotaStatus()
if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil {
- return err
+ if d.quotaEnabled {
+ return err
+ }
+ // If quota is not enabled, fallback to rmdir syscall to delete subvolumes.
+ // This would allow unprivileged user to delete their owned subvolumes
+ // in kernel >= 4.18 without user_subvol_rm_alowed mount option.
}
if err := system.EnsureRemoveAll(dir); err != nil {
return err
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 16549e88b..25d885be9 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -142,8 +142,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if opts.mountProgram == "" {
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
- logrus.Errorf("'overlay' is not supported over %s", backingFs)
- return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
+ return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs)
}
}
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 378b427de..b7a05a65b 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -7,15 +7,15 @@ require (
github.com/Microsoft/hcsshim v0.8.7
github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect
github.com/docker/go-units v0.4.0
- github.com/klauspost/compress v1.9.4
+ github.com/klauspost/compress v1.9.7
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/pgzip v1.2.1
- github.com/mattn/go-shellwords v1.0.6
+ github.com/mattn/go-shellwords v1.0.7
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/runc v1.0.0-rc9
github.com/opencontainers/selinux v1.3.0
- github.com/pkg/errors v0.8.1
+ github.com/pkg/errors v0.9.1
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
github.com/sirupsen/logrus v1.4.2
github.com/spf13/pflag v1.0.3 // indirect
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index f31828d2a..ffda0c42f 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -73,6 +73,10 @@ github.com/klauspost/compress v1.9.3 h1:hkFELABwacUEgBfiguNeQydKv3M9pawBq8o24Ypw
github.com/klauspost/compress v1.9.3/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4=
github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M=
+github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug26aA=
+github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
@@ -83,6 +87,8 @@ github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9w
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI=
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.7 h1:KqhVjVZomx2puPACkj9vrGFqnp42Htvo9SEAWePHKOs=
+github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
@@ -108,6 +114,10 @@ github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOl
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.0 h1:J8lpUdobwIeCI7OiSxHqEwJUKvJwicL5+3v1oe2Yb4k=
+github.com/pkg/errors v0.9.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index f3f855c32..9e1131823 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -236,7 +236,7 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries))
}
- case "overlay":
+ case "overlay", "overlay2":
if options.Overlay.IgnoreChownErrors != "" {
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors))
} else if options.IgnoreChownErrors != "" {
diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go
index 2ed875bb5..1074275b0 100644
--- a/vendor/github.com/coreos/go-iptables/iptables/iptables.go
+++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go
@@ -48,9 +48,13 @@ func (e *Error) Error() string {
// IsNotExist returns true if the error is due to the chain or rule not existing
func (e *Error) IsNotExist() bool {
- return e.ExitStatus() == 1 &&
- (e.msg == fmt.Sprintf("%s: Bad rule (does a matching rule exist in that chain?).\n", getIptablesCommand(e.proto)) ||
- e.msg == fmt.Sprintf("%s: No chain/target/match by that name.\n", getIptablesCommand(e.proto)))
+ if e.ExitStatus() != 1 {
+ return false
+ }
+ cmdIptables := getIptablesCommand(e.proto)
+ msgNoRuleExist := fmt.Sprintf("%s: Bad rule (does a matching rule exist in that chain?).\n", cmdIptables)
+ msgNoChainExist := fmt.Sprintf("%s: No chain/target/match by that name.\n", cmdIptables)
+ return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist)
}
// Protocol to differentiate between IPv4 and IPv6
@@ -101,7 +105,13 @@ func NewWithProtocol(proto Protocol) (*IPTables, error) {
return nil, err
}
vstring, err := getIptablesVersionString(path)
+ if err != nil {
+ return nil, fmt.Errorf("could not get iptables version: %v", err)
+ }
v1, v2, v3, mode, err := extractIptablesVersion(vstring)
+ if err != nil {
+ return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err)
+ }
checkPresent, waitPresent, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3)
@@ -348,18 +358,6 @@ func (ipt *IPTables) executeList(args []string) ([]string, error) {
rules = rules[:len(rules)-1]
}
- // nftables mode doesn't return an error code when listing a non-existent
- // chain. Patch that up.
- if len(rules) == 0 && ipt.mode == "nf_tables" {
- v := 1
- return nil, &Error{
- cmd: exec.Cmd{Args: args},
- msg: fmt.Sprintf("%s: No chain/target/match by that name.\n", getIptablesCommand(ipt.proto)),
- proto: ipt.proto,
- exitStatus: &v,
- }
- }
-
for i, rule := range rules {
rules[i] = filterRuleOutput(rule)
}
@@ -437,6 +435,7 @@ func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error {
}
ul, err := fmu.tryLock()
if err != nil {
+ syscall.Close(fmu.fd)
return err
}
defer ul.Unlock()
diff --git a/vendor/github.com/fatih/camelcase/.travis.yml b/vendor/github.com/fatih/camelcase/.travis.yml
deleted file mode 100644
index 3489e3871..000000000
--- a/vendor/github.com/fatih/camelcase/.travis.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-language: go
-go: 1.x
-
diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md
deleted file mode 100644
index aa4a536ca..000000000
--- a/vendor/github.com/fatih/camelcase/LICENSE.md
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Fatih Arslan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md
deleted file mode 100644
index 105a6ae33..000000000
--- a/vendor/github.com/fatih/camelcase/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase)
-
-CamelCase is a Golang (Go) package to split the words of a camelcase type
-string into a slice of words. It can be used to convert a camelcase word (lower
-or upper case) into any type of word.
-
-## Splitting rules:
-
-1. If string is not valid UTF-8, return it without splitting as
- single item array.
-2. Assign all unicode characters into one of 4 sets: lower case
- letters, upper case letters, numbers, and all other characters.
-3. Iterate through characters of string, introducing splits
- between adjacent characters that belong to different sets.
-4. Iterate through array of split strings, and if a given string
- is upper case:
- * if subsequent string is lower case:
- * move last character of upper case string to beginning of
- lower case string
-
-## Install
-
-```bash
-go get github.com/fatih/camelcase
-```
-
-## Usage and examples
-
-```go
-splitted := camelcase.Split("GolangPackage")
-
-fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package"
-```
-
-Both lower camel case and upper camel case are supported. For more info please
-check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase)
-
-Below are some example cases:
-
-```
-"" => []
-"lowercase" => ["lowercase"]
-"Class" => ["Class"]
-"MyClass" => ["My", "Class"]
-"MyC" => ["My", "C"]
-"HTML" => ["HTML"]
-"PDFLoader" => ["PDF", "Loader"]
-"AString" => ["A", "String"]
-"SimpleXMLParser" => ["Simple", "XML", "Parser"]
-"vimRPCPlugin" => ["vim", "RPC", "Plugin"]
-"GL11Version" => ["GL", "11", "Version"]
-"99Bottles" => ["99", "Bottles"]
-"May5" => ["May", "5"]
-"BFG9000" => ["BFG", "9000"]
-"BöseÜberraschung" => ["Böse", "Überraschung"]
-"Two spaces" => ["Two", " ", "spaces"]
-"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"]
-```
diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go
deleted file mode 100644
index 02160c9a4..000000000
--- a/vendor/github.com/fatih/camelcase/camelcase.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Package camelcase is a micro package to split the words of a camelcase type
-// string into a slice of words.
-package camelcase
-
-import (
- "unicode"
- "unicode/utf8"
-)
-
-// Split splits the camelcase word and returns a list of words. It also
-// supports digits. Both lower camel case and upper camel case are supported.
-// For more info please check: http://en.wikipedia.org/wiki/CamelCase
-//
-// Examples
-//
-// "" => [""]
-// "lowercase" => ["lowercase"]
-// "Class" => ["Class"]
-// "MyClass" => ["My", "Class"]
-// "MyC" => ["My", "C"]
-// "HTML" => ["HTML"]
-// "PDFLoader" => ["PDF", "Loader"]
-// "AString" => ["A", "String"]
-// "SimpleXMLParser" => ["Simple", "XML", "Parser"]
-// "vimRPCPlugin" => ["vim", "RPC", "Plugin"]
-// "GL11Version" => ["GL", "11", "Version"]
-// "99Bottles" => ["99", "Bottles"]
-// "May5" => ["May", "5"]
-// "BFG9000" => ["BFG", "9000"]
-// "BöseÜberraschung" => ["Böse", "Überraschung"]
-// "Two spaces" => ["Two", " ", "spaces"]
-// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"]
-//
-// Splitting rules
-//
-// 1) If string is not valid UTF-8, return it without splitting as
-// single item array.
-// 2) Assign all unicode characters into one of 4 sets: lower case
-// letters, upper case letters, numbers, and all other characters.
-// 3) Iterate through characters of string, introducing splits
-// between adjacent characters that belong to different sets.
-// 4) Iterate through array of split strings, and if a given string
-// is upper case:
-// if subsequent string is lower case:
-// move last character of upper case string to beginning of
-// lower case string
-func Split(src string) (entries []string) {
- // don't split invalid utf8
- if !utf8.ValidString(src) {
- return []string{src}
- }
- entries = []string{}
- var runes [][]rune
- lastClass := 0
- class := 0
- // split into fields based on class of unicode character
- for _, r := range src {
- switch true {
- case unicode.IsLower(r):
- class = 1
- case unicode.IsUpper(r):
- class = 2
- case unicode.IsDigit(r):
- class = 3
- default:
- class = 4
- }
- if class == lastClass {
- runes[len(runes)-1] = append(runes[len(runes)-1], r)
- } else {
- runes = append(runes, []rune{r})
- }
- lastClass = class
- }
- // handle upper case -> lower case sequences, e.g.
- // "PDFL", "oader" -> "PDF", "Loader"
- for i := 0; i < len(runes)-1; i++ {
- if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) {
- runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...)
- runes[i] = runes[i][:len(runes[i])-1]
- }
- }
- // construct []string from results
- for _, s := range runes {
- if len(s) > 0 {
- entries = append(entries, string(s))
- }
- }
- return
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
index e27e8d191..80320cd64 100644
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -341,7 +341,7 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
continue
}
- if tag == "-" {
+ if tag == "-" || field.Name() == "_" {
continue
}
tagParts := strings.Split(tag, ",")
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
index 08e9a3912..9e2b623fe 100644
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -290,16 +290,17 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteObjectStart()
mapIter := encoder.mapType.UnsafeIterate(ptr)
subStream := stream.cfg.BorrowStream(nil)
+ subStream.Attachment = stream.Attachment
subIter := stream.cfg.BorrowIterator(nil)
keyValues := encodedKeyValues{}
for mapIter.HasNext() {
- subStream.buf = make([]byte, 0, 64)
key, elem := mapIter.UnsafeNext()
+ subStreamIndex := subStream.Buffered()
encoder.keyEncoder.Encode(key, subStream)
if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
stream.Error = subStream.Error
}
- encodedKey := subStream.Buffer()
+ encodedKey := subStream.Buffer()[subStreamIndex:]
subIter.ResetBytes(encodedKey)
decodedKey := subIter.ReadString()
if stream.indention > 0 {
@@ -310,7 +311,7 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.elemEncoder.Encode(elem, subStream)
keyValues = append(keyValues, encodedKV{
key: decodedKey,
- keyValue: subStream.Buffer(),
+ keyValue: subStream.Buffer()[subStreamIndex:],
})
}
sort.Sort(keyValues)
@@ -320,6 +321,9 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
}
stream.Write(keyValue.keyValue)
}
+ if subStream.Error != nil && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
stream.WriteObjectEnd()
stream.cfg.ReturnStream(subStream)
stream.cfg.ReturnIterator(subIter)
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
index d0759cf64..152e3ef5a 100644
--- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
+++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -200,6 +200,7 @@ type stringModeStringEncoder struct {
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
tempStream := encoder.cfg.BorrowStream(nil)
+ tempStream.Attachment = stream.Attachment
defer encoder.cfg.ReturnStream(tempStream)
encoder.elemEncoder.Encode(ptr, tempStream)
stream.WriteString(string(tempStream.Buffer()))
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index ec0c3fc53..bda4021ef 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -38,7 +38,7 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits
}
-// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
enc := ct[symbol]
@@ -46,6 +46,17 @@ func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
b.nBits += enc.nBits
}
+// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
+ encA := ct[av]
+ encB := ct[bv]
+ sh := b.nBits & 63
+ combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
+ b.bitContainer |= combined << sh
+ b.nBits += encA.nBits + encB.nBits
+}
+
// addBits16ZeroNC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 51e00aaeb..0843cb014 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -80,9 +80,12 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
if s.Reuse == ReusePolicyPrefer && canReuse {
keepTable := s.cTable
+ keepTL := s.actualTableLog
s.cTable = s.prevTable
+ s.actualTableLog = s.prevTableLog
s.Out, err = compressor(in)
s.cTable = keepTable
+ s.actualTableLog = keepTL
if err == nil && len(s.Out) < wantSize {
s.OutData = s.Out
return s.Out, true, nil
@@ -92,7 +95,6 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
}
// Calculate new table.
- s.optimalTableLog()
err = s.buildCTable()
if err != nil {
return nil, false, err
@@ -109,9 +111,15 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
if oldSize <= hSize+newSize || hSize+12 >= wantSize {
// Retain cTable even if we re-use.
keepTable := s.cTable
+ keepTL := s.actualTableLog
+
s.cTable = s.prevTable
+ s.actualTableLog = s.prevTableLog
s.Out, err = compressor(in)
+
+ // Restore ctable.
s.cTable = keepTable
+ s.actualTableLog = keepTL
if err != nil {
return nil, false, err
}
@@ -142,7 +150,7 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
return nil, false, ErrIncompressible
}
// Move current table into previous.
- s.prevTable, s.cTable = s.cTable, s.prevTable[:0]
+ s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
s.OutData = s.Out[len(s.OutTable):]
return s.Out, false, nil
}
@@ -163,28 +171,23 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
for i := len(src) & 3; i > 0; i-- {
bw.encSymbol(cTable, src[n+i-1])
}
+ n -= 4
if s.actualTableLog <= 8 {
- n -= 4
for ; n >= 0; n -= 4 {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
- bw.encSymbol(cTable, tmp[3])
- bw.encSymbol(cTable, tmp[2])
- bw.encSymbol(cTable, tmp[1])
- bw.encSymbol(cTable, tmp[0])
+ bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+ bw.encTwoSymbols(cTable, tmp[1], tmp[0])
}
} else {
- n -= 4
for ; n >= 0; n -= 4 {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
- bw.encSymbol(cTable, tmp[3])
- bw.encSymbol(cTable, tmp[2])
+ bw.encTwoSymbols(cTable, tmp[3], tmp[2])
bw.flush32()
- bw.encSymbol(cTable, tmp[1])
- bw.encSymbol(cTable, tmp[0])
+ bw.encTwoSymbols(cTable, tmp[1], tmp[0])
}
}
err := bw.close()
@@ -322,9 +325,26 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true
}
+func (s *Scratch) validateTable(c cTable) bool {
+ if len(c) < int(s.symbolLen) {
+ return false
+ }
+ for i, v := range s.count[:s.symbolLen] {
+ if v != 0 {
+ if c[i].nBits == 0 {
+ return false
+ }
+ if c[i].nBits > s.actualTableLog {
+ return false
+ }
+ }
+ }
+ return true
+}
+
// minTableLog provides the minimum logSize to safely represent a distribution.
func (s *Scratch) minTableLog() uint8 {
- minBitsSrc := highBit32(uint32(s.br.remain()-1)) + 1
+ minBitsSrc := highBit32(uint32(s.br.remain())) + 1
minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
if minBitsSrc < minBitsSymbols {
return uint8(minBitsSrc)
@@ -336,7 +356,7 @@ func (s *Scratch) minTableLog() uint8 {
func (s *Scratch) optimalTableLog() {
tableLog := s.TableLog
minBits := s.minTableLog()
- maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 2
+ maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
if maxBitsSrc < tableLog {
// Accuracy can be reduced
tableLog = maxBitsSrc
@@ -363,6 +383,7 @@ type cTableEntry struct {
const huffNodesMask = huffNodesLen - 1
func (s *Scratch) buildCTable() error {
+ s.optimalTableLog()
s.huffSort()
if cap(s.cTable) < maxSymbolValue+1 {
s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
@@ -439,7 +460,7 @@ func (s *Scratch) buildCTable() error {
return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
}
var nbPerRank [tableLogMax + 1]uint16
- var valPerRank [tableLogMax + 1]uint16
+ var valPerRank [16]uint16
for _, v := range huffNode[:nonNullRank+1] {
nbPerRank[v.nbBits]++
}
@@ -455,16 +476,17 @@ func (s *Scratch) buildCTable() error {
}
// push nbBits per symbol, symbol order
- // TODO: changed `s.symbolLen` -> `nonNullRank+1` (micro-opt)
for _, v := range huffNode[:nonNullRank+1] {
s.cTable[v.symbol].nBits = v.nbBits
}
// assign value within rank, symbol order
- for n, val := range s.cTable[:s.symbolLen] {
- v := valPerRank[val.nBits]
- s.cTable[n].val = v
- valPerRank[val.nBits] = v + 1
+ t := s.cTable[:s.symbolLen]
+ for n, val := range t {
+ nbits := val.nBits & 15
+ v := valPerRank[nbits]
+ t[n].val = v
+ valPerRank[nbits] = v + 1
}
return nil
@@ -488,10 +510,12 @@ func (s *Scratch) huffSort() {
r := highBit32(v+1) & 31
rank[r].base++
}
- for n := 30; n > 0; n-- {
+ // maxBitLength is log2(BlockSizeMax) + 1
+ const maxBitLength = 18 + 1
+ for n := maxBitLength; n > 0; n-- {
rank[n-1].base += rank[n].base
}
- for n := range rank[:] {
+ for n := range rank[:maxBitLength] {
rank[n].current = rank[n].base
}
for n, c := range s.count[:s.symbolLen] {
@@ -510,7 +534,7 @@ func (s *Scratch) huffSort() {
}
func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
- maxNbBits := s.TableLog
+ maxNbBits := s.actualTableLog
huffNode := s.nodes[1 : huffNodesLen+1]
//huffNode = huffNode[: huffNodesLen]
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 6bc23bbf0..53249df05 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -83,7 +83,7 @@ type Scratch struct {
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
- // Must be <= 11.
+ // Must be <= 11 and >= 5.
TableLog uint8
// Reuse will specify the reuse policy
@@ -105,6 +105,7 @@ type Scratch struct {
maxCount int // count of the most probable symbol
clearCount bool // clear count
actualTableLog uint8 // Selected tablelog.
+ prevTableLog uint8 // Tablelog for previous table
prevTable cTable // Table used for previous compression.
cTable cTable // compression table
dt dTable // decompression table
@@ -127,8 +128,8 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if s.TableLog == 0 {
s.TableLog = tableLogDefault
}
- if s.TableLog > tableLogMax {
- return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax)
+ if s.TableLog > tableLogMax || s.TableLog < minTablelog {
+ return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
}
if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
s.MaxDecodedSize = BlockSizeMax
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index 52dc0aee3..bc977a302 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -36,7 +36,7 @@ so as always, testing is recommended.
For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
The "Fastest" compression ratio is roughly equivalent to zstd level 1.
-The "Default" compression ration is roughly equivalent to zstd level 3 (default).
+The "Default" compression ratio is roughly equivalent to zstd level 3 (default).
In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode.
The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
@@ -390,4 +390,4 @@ For sending files for reproducing errors use a service like [goobox](https://goo
For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
-This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. \ No newline at end of file
+This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 99eccda11..507757d52 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -299,6 +299,20 @@ func (b *blockEnc) encodeRaw(a []byte) {
}
}
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(uint32(len(src)))
+ bh.setType(blockTypeRaw)
+ dst = bh.appendTo(dst)
+ dst = append(dst, src...)
+ if debug {
+ println("Adding RAW block, length", len(src))
+ }
+ return dst
+}
+
// encodeLits can be used if the block is only litLen.
func (b *blockEnc) encodeLits(raw bool) error {
var bh blockHeader
@@ -324,18 +338,10 @@ func (b *blockEnc) encodeLits(raw bool) error {
if len(b.literals) >= 1024 {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
- if len(out) > len(b.literals)-len(b.literals)>>4 {
- // Bail out of compression is too little.
- err = huff0.ErrIncompressible
- }
} else if len(b.literals) > 32 {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
- if len(out) > len(b.literals)-len(b.literals)>>4 {
- // Bail out of compression is too little.
- err = huff0.ErrIncompressible
- }
} else {
err = huff0.ErrIncompressible
}
@@ -437,7 +443,7 @@ func fuzzFseEncoder(data []byte) int {
return 1
}
-// encode will encode the block and put the output in b.output.
+// encode will encode the block and append the output in b.output.
func (b *blockEnc) encode(raw bool) error {
if len(b.sequences) == 0 {
return b.encodeLits(raw)
@@ -451,6 +457,8 @@ func (b *blockEnc) encode(raw bool) error {
var lh literalsHeader
bh.setLast(b.last)
bh.setType(blockTypeCompressed)
+ // Store offset of the block header. Needed when we know the size.
+ bhOffset := len(b.output)
b.output = bh.appendTo(b.output)
var (
@@ -468,6 +476,7 @@ func (b *blockEnc) encode(raw bool) error {
} else {
err = huff0.ErrIncompressible
}
+
switch err {
case huff0.ErrIncompressible:
lh.setType(literalsBlockRaw)
@@ -735,18 +744,18 @@ func (b *blockEnc) encode(raw bool) error {
}
b.output = wr.out
- if len(b.output)-3 >= b.size {
+ if len(b.output)-3-bhOffset >= b.size {
// Maybe even add a bigger margin.
b.litEnc.Reuse = huff0.ReusePolicyNone
return errIncompressible
}
// Size is output minus block header.
- bh.setSize(uint32(len(b.output)) - 3)
+ bh.setSize(uint32(len(b.output)-bhOffset) - 3)
if debug {
println("Rewriting block header", bh)
}
- _ = bh.appendTo(b.output[:0])
+ _ = bh.appendTo(b.output[bhOffset:bhOffset])
b.coders.setPrev(llEnc, mlEnc, ofEnc)
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 1de94eef0..35a3cda91 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -388,6 +388,35 @@ func (d *Decoder) Close() {
d.current.err = ErrDecoderClosed
}
+// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
+// Any changes to the decoder will be reflected, so the returned ReadCloser
+// can be reused along with the decoder.
+// io.WriterTo is also supported by the returned ReadCloser.
+func (d *Decoder) IOReadCloser() io.ReadCloser {
+ return closeWrapper{d: d}
+}
+
+// closeWrapper wraps a function call as a closer.
+type closeWrapper struct {
+ d *Decoder
+}
+
+// WriteTo forwards WriteTo calls to the decoder.
+func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
+ return c.d.WriteTo(w)
+}
+
+// Read forwards read calls to the decoder.
+func (c closeWrapper) Read(p []byte) (n int, err error) {
+ return c.d.Read(p)
+}
+
+// Close closes the decoder.
+func (c closeWrapper) Close() error {
+ c.d.Close()
+ return nil
+}
+
type decodeOutput struct {
d *blockDec
b []byte
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 2f41bcd0d..ee3b09b02 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -411,3 +411,316 @@ encodeLoop:
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ if e.cur > (1<<30)+e.maxMatchOff {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ }
+
+ s := int32(0)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ stepSize := int32(e.o.targetLength)
+ if stepSize == 0 {
+ stepSize++
+ }
+
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ for {
+
+ nextHashS := hash5(cv, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+
+ if len(blk.sequences) > 2 {
+ if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
+
+ seq.matchLen = uint32(length - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += length + repOff
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, length)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := s - (candidateL.offset - e.cur)
+ coffsetS := s - (candidateS.offset - e.cur)
+
+ // Check if we have a long match.
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ if debug && s <= t {
+ panic("s <= t")
+ }
+ if debug && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ // Check if we have a short match.
+ if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hash8(cv, dFastLongTableBits)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ s += checkAt
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+
+ t = candidateS.offset - e.cur
+ if debug && s <= t {
+ panic("s <= t")
+ }
+ if debug && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debug && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debug && s <= t {
+ panic("s <= t")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ //l := e.matchlen(s+4, t+4, src) + 4
+ l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) and start+2 (short)
+ index0 := s - l + 1
+ // Index match end-2 (long) and end-1 (short)
+ index1 := s - 2
+
+ cv0 := load6432(src, index0)
+ cv1 := load6432(src, index1)
+ te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+ te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+ e.longTable[hash8(cv0, dFastLongTableBits)] = te0
+ e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+ cv0 >>= 8
+ cv1 >>= 8
+ te0.offset++
+ te1.offset++
+ te0.val = uint32(cv0)
+ te1.val = uint32(cv1)
+ e.table[hash5(cv0, dFastShortTableBits)] = te0
+ e.table[hash5(cv1, dFastShortTableBits)] = te1
+
+ cv = load6432(src, s)
+
+ if len(blk.sequences) <= 2 {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hash5(cv1>>8, dFastShortTableBits)
+ nextHashL := hash8(cv, dFastLongTableBits)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ //l := 4 + e.matchlen(s+4, o2+4, src)
+ l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index 6f388de04..0bdddac5b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -329,6 +329,246 @@ encodeLoop:
}
}
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ const (
+ inputMargin = 8
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debug {
+ if len(src) > maxBlockSize {
+ panic("src too big")
+ }
+ }
+ // Protect against e.cur wraparound.
+ if e.cur > (1<<30)+e.maxMatchOff {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ }
+
+ s := int32(0)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 2.
+ const stepSize = 2
+
+ // TEMPLATE
+ const hashLog = tableBits
+ // seems global, but would be nice to tweak.
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debug {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // t will contain the match offset when we find one.
+ // When existing the search loop, we have already checked 4 bytes.
+ var t int32
+
+ // We will not use repeat offsets across blocks.
+ // By not using them for the first 3 matches
+
+ for {
+ nextHash := hash6(cv, hashLog)
+ nextHash2 := hash6(cv>>8, hashLog)
+ candidate := e.table[nextHash]
+ candidate2 := e.table[nextHash2]
+ repIndex := s - offset1 + 2
+
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+ if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
+ // Consider history as well.
+ var seq seq
+ // lenght := 4 + e.matchlen(s+6, repIndex+4, src)
+ lenght := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + 2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ sMin := s - e.maxMatchOff
+ if sMin < 0 {
+ sMin = 0
+ }
+ for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += lenght + 2
+ nextEmit = s
+ if s >= sLimit {
+ if debug {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ coffset0 := s - (candidate.offset - e.cur)
+ coffset1 := s - (candidate2.offset - e.cur) + 1
+ if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+ // found a regular match
+ t = candidate.offset - e.cur
+ if debug && s <= t {
+ panic("s <= t")
+ }
+ if debug && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ break
+ }
+
+ if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+ // found a regular match
+ t = candidate2.offset - e.cur
+ s++
+ if debug && s <= t {
+ panic("s <= t")
+ }
+ if debug && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debug && t < 0 {
+ panic("t<0")
+ }
+ break
+ }
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ // A 4-byte match has been found. We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debug && s <= t {
+ panic("s <= t")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ //l := e.matchlenNoHist(s+4, t+4, src) + 4
+ l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence.
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ // Don't use repeat offsets
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+
+ // Check offset 2
+ if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ //l := 4 + e.matchlenNoHist(s+4, o2+4, src)
+ l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+ // Store this, since we have it.
+ nextHash := hash6(cv, hashLog)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ break encodeLoop
+ }
+ // Prepare next loop.
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ if debug {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
func (e *fastEncoder) addBlock(src []byte) int32 {
// check if we have space already
if len(e.hist)+len(src) > cap(e.hist) {
@@ -362,6 +602,11 @@ func (e *fastEncoder) UseBlock(enc *blockEnc) {
e.blk = enc
}
+func (e *fastEncoder) matchlenNoHist(s, t int32, src []byte) int32 {
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
+
func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 {
if debug {
if s < 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index f413042f4..366dd66bd 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -29,6 +29,7 @@ type Encoder struct {
type encoder interface {
Encode(blk *blockEnc, src []byte)
+ EncodeNoHist(blk *blockEnc, src []byte)
Block() *blockEnc
CRC() *xxhash.Digest
AppendCRC([]byte) []byte
@@ -433,7 +434,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}()
enc.Reset()
blk := enc.Block()
- single := len(src) > 1<<20
+ // Use single segments when above minimum window and below 1MB.
+ single := len(src) < 1<<20 && len(src) > MinWindowSize
if e.o.single != nil {
single = *e.o.single
}
@@ -454,25 +456,22 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
panic(err)
}
- for len(src) > 0 {
- todo := src
- if len(todo) > e.o.blockSize {
- todo = todo[:e.o.blockSize]
- }
- src = src[len(todo):]
+ if len(src) <= e.o.blockSize && len(src) <= maxBlockSize {
+ // Slightly faster with no history and everything in one block.
if e.o.crc {
- _, _ = enc.CRC().Write(todo)
+ _, _ = enc.CRC().Write(src)
}
blk.reset(nil)
- blk.pushOffsets()
- enc.Encode(blk, todo)
- if len(src) == 0 {
- blk.last = true
- }
- err := errIncompressible
+ blk.last = true
+ enc.EncodeNoHist(blk, src)
+
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
- if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
+ err := errIncompressible
+ oldout := blk.output
+ if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
+ // Output directly to dst
+ blk.output = dst
err = blk.encode(e.o.noEntropy)
}
@@ -481,13 +480,49 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if debug {
println("Storing incompressible block as raw")
}
- blk.encodeRaw(todo)
- blk.popOffsets()
+ dst = blk.encodeRawTo(dst, src)
case nil:
+ dst = blk.output
default:
panic(err)
}
- dst = append(dst, blk.output...)
+ blk.output = oldout
+ } else {
+ for len(src) > 0 {
+ todo := src
+ if len(todo) > e.o.blockSize {
+ todo = todo[:e.o.blockSize]
+ }
+ src = src[len(todo):]
+ if e.o.crc {
+ _, _ = enc.CRC().Write(todo)
+ }
+ blk.reset(nil)
+ blk.pushOffsets()
+ enc.Encode(blk, todo)
+ if len(src) == 0 {
+ blk.last = true
+ }
+ err := errIncompressible
+ // If we got the exact same number of literals as input,
+ // assume the literals cannot be compressed.
+ if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
+ err = blk.encode(e.o.noEntropy)
+ }
+
+ switch err {
+ case errIncompressible:
+ if debug {
+ println("Storing incompressible block as raw")
+ }
+ dst = blk.encodeRawTo(dst, todo)
+ blk.popOffsets()
+ case nil:
+ dst = append(dst, blk.output...)
+ default:
+ panic(err)
+ }
+ }
}
if e.o.crc {
dst = enc.AppendCRC(dst)
diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml
index 6294d337f..b2904bffc 100644
--- a/vendor/github.com/mattn/go-shellwords/.travis.yml
+++ b/vendor/github.com/mattn/go-shellwords/.travis.yml
@@ -11,4 +11,3 @@ script:
after_success:
- bash <(curl -s https://codecov.io/bash)
-
diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md
index 9e1e65045..e91902f40 100644
--- a/vendor/github.com/mattn/go-shellwords/README.md
+++ b/vendor/github.com/mattn/go-shellwords/README.md
@@ -2,6 +2,7 @@
[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords)
[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords)
+[![GoDoc](https://godoc.org/github.com/mattn/go-shellwords?status.svg)](http://godoc.org/github.com/mattn/go-shellwords)
Parse line as shell words.
diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go
index 2dca7f136..ff5e73091 100644
--- a/vendor/github.com/mattn/go-shellwords/shellwords.go
+++ b/vendor/github.com/mattn/go-shellwords/shellwords.go
@@ -144,11 +144,17 @@ loop:
}
case '"':
if !singleQuoted && !dollarQuote {
+ if doubleQuoted && buf == "" {
+ got = true
+ }
doubleQuoted = !doubleQuoted
continue
}
case '\'':
if !doubleQuoted && !dollarQuote {
+ if singleQuoted && buf == "" {
+ got = true
+ }
singleQuoted = !singleQuoted
continue
}
diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml
new file mode 100644
index 000000000..114db5aec
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml
@@ -0,0 +1,164 @@
+kind: CustomResourceDefinition
+apiVersion: apiextensions.k8s.io/v1beta1
+metadata:
+ name: clusteroperators.config.openshift.io
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.versions[?(@.name=="operator")].version
+ description: The version the operator is at.
+ name: Version
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Available")].status
+ description: Whether the operator is running and stable.
+ name: Available
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Progressing")].status
+ description: Whether the operator is processing changes.
+ name: Progressing
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Degraded")].status
+ description: Whether the operator is degraded.
+ name: Degraded
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Available")].lastTransitionTime
+ description: The time the operator's Available status last changed.
+ name: Since
+ type: date
+ group: config.openshift.io
+ names:
+ kind: ClusterOperator
+ listKind: ClusterOperatorList
+ plural: clusteroperators
+ singular: clusteroperator
+ shortNames:
+ - co
+ preserveUnknownFields: false
+ scope: Cluster
+ subresources:
+ status: {}
+ version: v1
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ validation:
+ openAPIV3Schema:
+ description: ClusterOperator is the Custom Resource object which holds the current
+ state of an operator. This object is used by operators to convey their state
+ to the rest of the cluster.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds configuration that could apply to any operator.
+ type: object
+ status:
+ description: status holds the information about the state of an operator. It
+ is consistent with status information across the Kubernetes ecosystem.
+ type: object
+ properties:
+ conditions:
+ description: conditions describes the state of the operator's managed
+ and monitored components.
+ type: array
+ items:
+ description: ClusterOperatorStatusCondition represents the state of
+ the operator's managed and monitored components.
+ type: object
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the time of the last update
+ to the current status property.
+ type: string
+ format: date-time
+ message:
+ description: message provides additional information about the
+ current condition. This is only to be consumed by humans.
+ type: string
+ reason:
+ description: reason is the CamelCase reason for the condition's
+ current status.
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: type specifies the aspect reported by this condition.
+ type: string
+ extension:
+ description: extension contains any additional status information specific
+ to the operator which owns this status object.
+ type: object
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ relatedObjects:
+ description: 'relatedObjects is a list of objects that are "interesting"
+ or related to this operator. Common uses are: 1. the detailed resource
+ driving the operator 2. operator namespaces 3. operand namespaces'
+ type: array
+ items:
+ description: ObjectReference contains enough information to let you
+ inspect or modify the referred object.
+ type: object
+ required:
+ - group
+ - name
+ - resource
+ properties:
+ group:
+ description: group of the referent.
+ type: string
+ name:
+ description: name of the referent.
+ type: string
+ namespace:
+ description: namespace of the referent.
+ type: string
+ resource:
+ description: resource of the referent.
+ type: string
+ versions:
+ description: versions is a slice of operator and operand version tuples. Operators
+ which manage multiple operands will have multiple operand entries
+ in the array. Available operators must report the version of the
+ operator itself with the name "operator". An operator reports a new
+ "operator" version when it has rolled out the new version to all of
+ its operands.
+ type: array
+ items:
+ type: object
+ required:
+ - name
+ - version
+ properties:
+ name:
+ description: name is the name of the particular operand this version
+ is for. It usually matches container images, not operators.
+ type: string
+ version:
+ description: version indicates which version of a particular operand
+ is currently being managed. It must always match the Available
+ operand. If 1.0.0 is Available, then this must indicate 1.0.0
+ even if the operator is trying to rollout 1.1.0
+ type: string
+ versions:
+ - name: v1
+ served: true
+ storage: true
diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml
new file mode 100644
index 000000000..ccde0db23
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml
@@ -0,0 +1,328 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterversions.config.openshift.io
+spec:
+ group: config.openshift.io
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ scope: Cluster
+ subresources:
+ status: {}
+ names:
+ plural: clusterversions
+ singular: clusterversion
+ kind: ClusterVersion
+ preserveUnknownFields: false
+ additionalPrinterColumns:
+ - name: Version
+ type: string
+ JSONPath: .status.history[?(@.state=="Completed")].version
+ - name: Available
+ type: string
+ JSONPath: .status.conditions[?(@.type=="Available")].status
+ - name: Progressing
+ type: string
+ JSONPath: .status.conditions[?(@.type=="Progressing")].status
+ - name: Since
+ type: date
+ JSONPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime
+ - name: Status
+ type: string
+ JSONPath: .status.conditions[?(@.type=="Progressing")].message
+ validation:
+ openAPIV3Schema:
+ description: ClusterVersion is the configuration for the ClusterVersionOperator.
+ This is where parameters related to automatic updates can be set.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec is the desired state of the cluster version - the operator
+ will work to ensure that the desired version is applied to the cluster.
+ type: object
+ required:
+ - clusterID
+ properties:
+ channel:
+ description: channel is an identifier for explicitly requesting that
+ a non-default set of updates be applied to this cluster. The default
+ channel will be contain stable updates that are appropriate for production
+ clusters.
+ type: string
+ clusterID:
+ description: clusterID uniquely identifies this cluster. This is expected
+ to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ in hexadecimal values). This is a required field.
+ type: string
+ desiredUpdate:
+ description: "desiredUpdate is an optional field that indicates the
+ desired value of the cluster version. Setting this value will trigger
+ an upgrade (if the current version does not match the desired version).
+ The set of recommended update values is listed as part of available
+ updates in status, and setting values outside that range may cause
+ the upgrade to fail. You may specify the version field without setting
+ image if an update exists with that version in the availableUpdates
+ or history. \n If an upgrade fails the operator will halt and report
+ status about the failing component. Setting the desired update value
+ back to the previous version will cause a rollback to be attempted.
+ Not all rollbacks will succeed."
+ type: object
+ properties:
+ force:
+ description: "force allows an administrator to update to an image
+ that has failed verification, does not appear in the availableUpdates
+ list, or otherwise would be blocked by normal protections on update.
+ This option should only be used when the authenticity of the provided
+ image has been verified out of band because the provided image
+ will run with full administrative access to the cluster. Do not
+ use this flag with images that comes from unknown or potentially
+ malicious sources. \n This flag does not override other forms
+ of consistency checking that are required before a new update
+ is deployed."
+ type: boolean
+ image:
+ description: image is a container image location that contains the
+ update. When this field is part of spec, image is optional if
+ version is specified and the availableUpdates field contains a
+ matching version.
+ type: string
+ version:
+ description: version is a semantic versioning identifying the update
+ version. When this field is part of spec, version is optional
+ if image is specified.
+ type: string
+ overrides:
+ description: overrides is list of overides for components that are managed
+ by cluster version operator. Marking a component unmanaged will prevent
+ the operator from creating or updating the object.
+ type: array
+ items:
+ description: ComponentOverride allows overriding cluster version operator's
+ behavior for a component.
+ type: object
+ required:
+ - group
+ - kind
+ - name
+ - namespace
+ - unmanaged
+ properties:
+ group:
+ description: group identifies the API group that the kind is in.
+ type: string
+ kind:
+ description: kind indentifies which object to override.
+ type: string
+ name:
+ description: name is the component's name.
+ type: string
+ namespace:
+ description: namespace is the component's namespace. If the resource
+ is cluster scoped, the namespace should be empty.
+ type: string
+ unmanaged:
+ description: 'unmanaged controls if cluster version operator should
+ stop managing the resources in this cluster. Default: false'
+ type: boolean
+ upstream:
+ description: upstream may be used to specify the preferred update server.
+ By default it will use the appropriate update server for the cluster
+ and region.
+ type: string
+ status:
+ description: status contains information about the available updates and
+ any in-progress updates.
+ type: object
+ required:
+ - availableUpdates
+ - desired
+ - observedGeneration
+ - versionHash
+ properties:
+ availableUpdates:
+ description: availableUpdates contains the list of updates that are
+ appropriate for this cluster. This list may be empty if no updates
+ are recommended, if the update service is unavailable, or if an invalid
+ channel has been specified.
+ type: array
+ items:
+ description: Update represents a release of the ClusterVersionOperator,
+ referenced by the Image member.
+ type: object
+ properties:
+ force:
+ description: "force allows an administrator to update to an image
+ that has failed verification, does not appear in the availableUpdates
+ list, or otherwise would be blocked by normal protections on
+ update. This option should only be used when the authenticity
+ of the provided image has been verified out of band because
+ the provided image will run with full administrative access
+ to the cluster. Do not use this flag with images that comes
+ from unknown or potentially malicious sources. \n This flag
+ does not override other forms of consistency checking that are
+ required before a new update is deployed."
+ type: boolean
+ image:
+ description: image is a container image location that contains
+ the update. When this field is part of spec, image is optional
+ if version is specified and the availableUpdates field contains
+ a matching version.
+ type: string
+ version:
+ description: version is a semantic versioning identifying the
+ update version. When this field is part of spec, version is
+ optional if image is specified.
+ type: string
+ nullable: true
+ conditions:
+ description: conditions provides information about the cluster version.
+ The condition "Available" is set to true if the desiredUpdate has
+ been reached. The condition "Progressing" is set to true if an update
+ is being applied. The condition "Degraded" is set to true if an update
+ is currently blocked by a temporary or permanent error. Conditions
+ are only valid for the current desiredUpdate when metadata.generation
+ is equal to status.generation.
+ type: array
+ items:
+ description: ClusterOperatorStatusCondition represents the state of
+ the operator's managed and monitored components.
+ type: object
+ required:
+ - lastTransitionTime
+ - status
+ - type
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the time of the last update
+ to the current status property.
+ type: string
+ format: date-time
+ message:
+ description: message provides additional information about the
+ current condition. This is only to be consumed by humans.
+ type: string
+ reason:
+ description: reason is the CamelCase reason for the condition's
+ current status.
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: type specifies the aspect reported by this condition.
+ type: string
+ desired:
+ description: desired is the version that the cluster is reconciling
+ towards. If the cluster is not yet fully initialized desired will
+ be set with the information available, which may be an image or a
+ tag.
+ type: object
+ properties:
+ force:
+ description: "force allows an administrator to update to an image
+ that has failed verification, does not appear in the availableUpdates
+ list, or otherwise would be blocked by normal protections on update.
+ This option should only be used when the authenticity of the provided
+ image has been verified out of band because the provided image
+ will run with full administrative access to the cluster. Do not
+ use this flag with images that comes from unknown or potentially
+ malicious sources. \n This flag does not override other forms
+ of consistency checking that are required before a new update
+ is deployed."
+ type: boolean
+ image:
+ description: image is a container image location that contains the
+ update. When this field is part of spec, image is optional if
+ version is specified and the availableUpdates field contains a
+ matching version.
+ type: string
+ version:
+ description: version is a semantic versioning identifying the update
+ version. When this field is part of spec, version is optional
+ if image is specified.
+ type: string
+ history:
+ description: history contains a list of the most recent versions applied
+ to the cluster. This value may be empty during cluster startup, and
+ then will be updated when a new update is being applied. The newest
+ update is first in the list and it is ordered by recency. Updates
+ in the history have state Completed if the rollout completed - if
+ an update was failing or halfway applied the state will be Partial.
+ Only a limited amount of update history is preserved.
+ type: array
+ items:
+ description: UpdateHistory is a single attempted update to the cluster.
+ type: object
+ required:
+ - completionTime
+ - image
+ - startedTime
+ - state
+ - verified
+ properties:
+ completionTime:
+ description: completionTime, if set, is when the update was fully
+ applied. The update that is currently being applied will have
+ a null completion time. Completion time will always be set for
+ entries that are not the current update (usually to the started
+ time of the next update).
+ type: string
+ format: date-time
+ nullable: true
+ image:
+ description: image is a container image location that contains
+ the update. This value is always populated.
+ type: string
+ startedTime:
+ description: startedTime is the time at which the update was started.
+ type: string
+ format: date-time
+ state:
+ description: state reflects whether the update was fully applied.
+ The Partial state indicates the update is not fully applied,
+ while the Completed state indicates the update was successfully
+ rolled out at least once (all parts of the update successfully
+ applied).
+ type: string
+ verified:
+ description: verified indicates whether the provided update was
+ properly verified before it was installed. If this is false
+ the cluster may not be trusted.
+ type: boolean
+ version:
+ description: version is a semantic versioning identifying the
+ update version. If the requested image does not define a version,
+ or if a failure occurs retrieving the image, this value may
+ be empty.
+ type: string
+ observedGeneration:
+ description: observedGeneration reports which version of the spec is
+ being synced. If this value is not equal to metadata.generation, then
+ the desired and conditions fields may represent a previous version.
+ type: integer
+ format: int64
+ versionHash:
+ description: versionHash is a fingerprint of the content that the cluster
+ will be updated with. It is used by the operator to avoid unnecessary
+ work and is for internal use only.
+ type: string
+ versions:
+ - name: v1
+ served: true
+ storage: true
diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml
new file mode 100644
index 000000000..8c857d45a
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml
@@ -0,0 +1,101 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: operatorhubs.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: OperatorHub
+ listKind: OperatorHubList
+ plural: operatorhubs
+ singular: operatorhub
+ scope: Cluster
+ preserveUnknownFields: false
+ subresources:
+ status: {}
+ version: v1
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: OperatorHub is the Schema for the operatorhubs API. It can be used
+ to change the state of the default hub sources for OperatorHub on the cluster
+ from enabled to disabled and vice versa.
+ type: object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OperatorHubSpec defines the desired state of OperatorHub
+ type: object
+ properties:
+ disableAllDefaultSources:
+ description: disableAllDefaultSources allows you to disable all the
+ default hub sources. If this is true, a specific entry in sources
+ can be used to enable a default source. If this is false, a specific
+ entry in sources can be used to disable or enable a default source.
+ type: boolean
+ sources:
+ description: sources is the list of default hub sources and their configuration.
+ If the list is empty, it implies that the default hub sources are
+ enabled on the cluster unless disableAllDefaultSources is true. If
+ disableAllDefaultSources is true and sources is not empty, the configuration
+ present in sources will take precedence. The list of default hub sources
+ and their current state will always be reflected in the status block.
+ type: array
+ items:
+ description: HubSource is used to specify the hub source and its configuration
+ type: object
+ properties:
+ disabled:
+ description: disabled is used to disable a default hub source
+ on cluster
+ type: boolean
+ name:
+ description: name is the name of one of the default hub sources
+ type: string
+ maxLength: 253
+ minLength: 1
+ status:
+ description: OperatorHubStatus defines the observed state of OperatorHub.
+ The current state of the default hub sources will always be reflected
+ here.
+ type: object
+ properties:
+ sources:
+ description: sources encapsulates the result of applying the configuration
+ for each hub source
+ type: array
+ items:
+ description: HubSourceStatus is used to reflect the current state
+ of applying the configuration to a default source
+ type: object
+ properties:
+ disabled:
+ description: disabled is used to disable a default hub source
+ on cluster
+ type: boolean
+ message:
+ description: message provides more information regarding failures
+ type: string
+ name:
+ description: name is the name of one of the default hub sources
+ type: string
+ maxLength: 253
+ minLength: 1
+ status:
+ description: status indicates success or failure in applying the
+ configuration
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml
new file mode 100644
index 000000000..afd076747
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml
@@ -0,0 +1,98 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: proxies.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ names:
+ kind: Proxy
+ listKind: ProxyList
+ plural: proxies
+ singular: proxy
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Proxy holds cluster-wide information on how to configure default
+ proxies for the cluster. The canonical name is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec holds user-settable values for the proxy configuration
+ type: object
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames and/or CIDRs
+ for which the proxy should not be used. Empty means unset and will
+ not result in an env var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used to verify
+ readiness of the proxy.
+ type: array
+ items:
+ type: string
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing a CA
+ certificate bundle used for client egress HTTPS connections. The certificate
+ bundle must be from the CA that signed the proxy's certificate and
+ be signed for everything. The trustedCA field should only be consumed
+ by a proxy validator. The validator is responsible for reading the
+ certificate bundle from required key \"ca-bundle.crt\" and copying
+ it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. The namespace for the ConfigMap referenced by trustedCA
+ is \"openshift-config\". Here is an example ConfigMap (in yaml): \n
+ apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace:
+ openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE-----
+ \ Custom CA certificate bundle. -----END CERTIFICATE-----"
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS requests.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames and/or CIDRs
+ for which the proxy should not be used.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml
new file mode 100644
index 000000000..4e1fdac37
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml
@@ -0,0 +1,219 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: apiservers.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: APIServer
+ singular: apiserver
+ plural: apiservers
+ listKind: APIServerList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: APIServer holds configuration (like serving certificates, client
+ CA and CORS domains) shared by all API servers in the system, among them especially
+ kube-apiserver and openshift-apiserver. The canonical name of an instance
+ is 'cluster'.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ additionalCORSAllowedOrigins:
+ description: additionalCORSAllowedOrigins lists additional, user-defined
+ regular expressions describing hosts for which the API server allows
+ access using the CORS headers. This may be needed to access the API
+ and the integrated OAuth server from JavaScript applications. The
+ values are regular expressions that correspond to the Golang regular
+ expression language.
+ type: array
+ items:
+ type: string
+ clientCA:
+ description: 'clientCA references a ConfigMap containing a certificate
+ bundle for the signers that will be recognized for incoming client
+ certificates in addition to the operator managed signers. If this
+ is empty, then only operator managed signers are valid. You usually
+ only have to set this if you have your own PKI you wish to honor client
+ certificates from. The ConfigMap must exist in the openshift-config
+ namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"]
+ - CA bundle.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ encryption:
+ description: encryption allows the configuration of encryption of resources
+ at the datastore layer.
+ type: object
+ properties:
+ type:
+ description: "type defines what encryption type should be used to
+ encrypt resources at the datastore layer. When this field is unset
+ (i.e. when it is set to the empty string), identity is implied.
+ The behavior of unset can and will change over time. Even if
+ encryption is enabled by default, the meaning of unset may change
+ to a different encryption type based on changes in best practices.
+ \n When encryption is enabled, all sensitive resources shipped
+ with the platform are encrypted. This list of sensitive resources
+ can and will change over time. The current authoritative list
+ is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io
+ \ 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io"
+ type: string
+ enum:
+ - ""
+ - identity
+ - aescbc
+ servingCerts:
+ description: servingCert is the TLS cert info for serving secure traffic.
+ If not specified, operator managed certificates will be used for serving
+ secure traffic.
+ type: object
+ properties:
+ namedCertificates:
+ description: namedCertificates references secrets containing the
+ TLS cert info for serving secure traffic to specific hostnames.
+ If no named certificates are provided, or no named certificates
+ match the server name as understood by a client, the defaultServingCertificate
+ will be used.
+ type: array
+ items:
+ description: APIServerNamedServingCert maps a server DNS name,
+ as understood by a client, to a certificate.
+ type: object
+ properties:
+ names:
+ description: names is a optional list of explicit DNS names
+ (leading wildcards allowed) that should use this certificate
+ to serve secure traffic. If no names are provided, the implicit
+ names will be extracted from the certificates. Exact names
+ trump over wildcard names. Explicit names defined here trump
+ over extracted implicit names.
+ type: array
+ items:
+ type: string
+ servingCertificate:
+ description: 'servingCertificate references a kubernetes.io/tls
+ type secret containing the TLS cert info for serving secure
+ traffic. The secret must exist in the openshift-config namespace
+ and contain the following required fields: - Secret.Data["tls.key"]
+ - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ tlsSecurityProfile:
+ description: "tlsSecurityProfile specifies settings for TLS connections
+ for externally exposed servers. \n If unset, a default (which may
+ change between releases) is chosen. Note that only Old and Intermediate
+ profiles are currently supported, and the maximum available MinTLSVersions
+ is VersionTLS12."
+ type: object
+ properties:
+ custom:
+ description: "custom is a user-defined TLS security profile. Be
+ extremely careful using a custom profile as invalid configurations
+ can be catastrophic. An example custom profile looks like this:
+ \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256
+ \ minTLSVersion: TLSv1.1"
+ type: object
+ properties:
+ ciphers:
+ description: "ciphers is used to specify the cipher algorithms
+ that are negotiated during the TLS handshake. Operators may
+ remove entries their operands do not support. For example,
+ to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA"
+ type: array
+ items:
+ type: string
+ minTLSVersion:
+ description: "minTLSVersion is used to specify the minimal version
+ of the TLS protocol that is negotiated during the TLS handshake.
+ For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):
+ \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest
+ minTLSVersion allowed is VersionTLS12"
+ type: string
+ nullable: true
+ intermediate:
+ description: "intermediate is a TLS security profile based on: \n
+ https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ \ minTLSVersion: TLSv1.2"
+ type: object
+ nullable: true
+ modern:
+ description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported."
+ type: object
+ nullable: true
+ old:
+ description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
+ \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
+ \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
+ \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
+ \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
+ \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
+ \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256
+ \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA -
+ ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384
+ \ - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA -
+ DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256
+ \ - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256
+ \ - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion:
+ TLSv1.0"
+ type: object
+ nullable: true
+ type:
+ description: "type is one of Old, Intermediate, Modern or Custom.
+ Custom provides the ability to specify individual TLS security
+ profile parameters. Old, Intermediate and Modern are TLS security
+ profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ \n The profiles are intent based, so they may change over time
+ as new ciphers are developed and existing ciphers are found to
+ be insecure. Depending on precisely which ciphers are available
+ to a process, the list may be reduced. \n Note that the Modern
+ profile is currently not supported because it is not yet well
+ adopted by common software libraries."
+ type: string
+ status:
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
new file mode 100644
index 000000000..f21ac7ea8
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
@@ -0,0 +1,123 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: authentications.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: Authentication
+ listKind: AuthenticationList
+ plural: authentications
+ singular: authentication
+ scope: Cluster
+ preserveUnknownFields: false
+ subresources:
+ status: {}
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: Authentication specifies cluster-wide settings for authentication
+ (like OAuth and webhook token authenticators). The canonical name of an instance
+ is `cluster`.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ oauthMetadata:
+ description: 'oauthMetadata contains the discovery endpoint data for
+ OAuth 2.0 Authorization Server Metadata for an external OAuth server.
+ This discovery document can be viewed from its served location: oc
+ get --raw ''/.well-known/oauth-authorization-server'' For further
+ details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ If oauthMetadata.name is non-empty, this value has precedence over
+ any metadata reference stored in status. The key "oauthMetadata" is
+ used to locate the data. If specified and the config map or expected
+ key is not found, no metadata is served. If the specified metadata
+ is not valid, no metadata is served. The namespace for this config
+ map is openshift-config.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ type:
+ description: type identifies the cluster managed, user facing authentication
+ mode in use. Specifically, it manages the component that responds
+ to login attempts. The default is IntegratedOAuth.
+ type: string
+ webhookTokenAuthenticators:
+ description: webhookTokenAuthenticators configures remote token reviewers.
+ These remote authentication webhooks can be used to verify bearer
+ tokens via the tokenreviews.authentication.k8s.io REST API. This
+ is required to honor bearer tokens that are provisioned by an external
+ authentication service. The namespace for these secrets is openshift-config.
+ type: array
+ items:
+ description: webhookTokenAuthenticator holds the necessary configuration
+ options for a remote token authenticator
+ type: object
+ properties:
+ kubeConfig:
+ description: 'kubeConfig contains kube config file data which
+ describes how to access the remote webhook service. For further
+ details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ The key "kubeConfig" is used to locate the data. If the secret
+ or expected key is not found, the webhook is not honored. If
+ the specified kube config data is not valid, the webhook is
+ not honored. The namespace for this secret is determined by
+ the point of use.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced secret
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ integratedOAuthMetadata:
+ description: 'integratedOAuthMetadata contains the discovery endpoint
+ data for OAuth 2.0 Authorization Server Metadata for the in-cluster
+ integrated OAuth server. This discovery document can be viewed from
+ its served location: oc get --raw ''/.well-known/oauth-authorization-server''
+ For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ This contains the observed value based on cluster state. An explicitly
+ set value in spec.oauthMetadata has precedence over this field. This
+ field has no meaning if authentication spec.type is not set to IntegratedOAuth.
+ The key "oauthMetadata" is used to locate the data. If the config
+ map or expected key is not found, no metadata is served. If the specified
+ metadata is not valid, no metadata is served. The namespace for this
+ config map is openshift-config-managed.'
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml
new file mode 100644
index 000000000..8f7583971
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml
@@ -0,0 +1,366 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: builds.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: Build
+ singular: build
+ plural: builds
+ listKind: BuildList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: "Build configures the behavior of OpenShift builds for the entire
+ cluster. This includes default settings that can be overridden in BuildConfig
+ objects, and overrides which are applied to all builds. \n The canonical name
+ is \"cluster\""
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: Spec holds user-settable values for the build controller configuration
+ type: object
+ properties:
+ additionalTrustedCA:
+ description: "AdditionalTrustedCA is a reference to a ConfigMap containing
+ additional CAs that should be trusted for image pushes and pulls during
+ builds. The namespace for this config map is openshift-config. \n
+ DEPRECATED: Additional CAs for image pull and push should be set on
+ image.config.openshift.io/cluster instead."
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ buildDefaults:
+ description: BuildDefaults controls the default information for Builds
+ type: object
+ properties:
+ defaultProxy:
+ description: "DefaultProxy contains the default proxy settings for
+ all build operations, including image pull/push and source download.
+ \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`,
+ and `NO_PROXY` environment variables in the build config's strategy."
+ type: object
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames
+ and/or CIDRs for which the proxy should not be used. Empty
+ means unset and will not result in an env var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used
+ to verify readiness of the proxy.
+ type: array
+ items:
+ type: string
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing
+ a CA certificate bundle used for client egress HTTPS connections.
+ The certificate bundle must be from the CA that signed the
+ proxy's certificate and be signed for everything. The trustedCA
+ field should only be consumed by a proxy validator. The validator
+ is responsible for reading the certificate bundle from required
+ key \"ca-bundle.crt\" and copying it to a ConfigMap named
+ \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. The namespace for the ConfigMap referenced by trustedCA
+ is \"openshift-config\". Here is an example ConfigMap (in
+ yaml): \n apiVersion: v1 kind: ConfigMap metadata: name:
+ user-ca-bundle namespace: openshift-config data: ca-bundle.crt:
+ | -----BEGIN CERTIFICATE----- Custom CA certificate
+ bundle. -----END CERTIFICATE-----"
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ env:
+ description: Env is a set of default environment variables that
+ will be applied to the build if the specified variables do not
+ exist on the build
+ type: array
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a variable
+ cannot be resolved, the reference in the input string will
+ be unchanged. The $(VAR_NAME) syntax can be escaped with
+ a double $$, ie: $$(VAR_NAME). Escaped references will never
+ be expanded, regardless of whether the variable exists or
+ not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value.
+ Cannot be used if value is not empty.
+ type: object
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ type: object
+ required:
+ - key
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ fieldRef:
+ description: 'Selects a field of the pod: supports metadata.name,
+ metadata.namespace, metadata.labels, metadata.annotations,
+ spec.nodeName, spec.serviceAccountName, status.hostIP,
+ status.podIP, status.podIPs.'
+ type: object
+ required:
+ - fieldPath
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ resourceFieldRef:
+ description: 'Selects a resource of the container: only
+ resources limits and requests (limits.cpu, limits.memory,
+ limits.ephemeral-storage, requests.cpu, requests.memory
+ and requests.ephemeral-storage) are currently supported.'
+ type: object
+ required:
+ - resource
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ description: Specifies the output format of the exposed
+ resources, defaults to "1"
+ type: string
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ type: object
+ required:
+ - key
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind,
+ uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ gitProxy:
+ description: "GitProxy contains the proxy settings for git operations
+ only. If set, this will override any Proxy settings for all git
+ commands, such as git clone. \n Values that are not set here will
+ be inherited from DefaultProxy."
+ type: object
+ properties:
+ httpProxy:
+ description: httpProxy is the URL of the proxy for HTTP requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ httpsProxy:
+ description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
+ means unset and will not result in an env var.
+ type: string
+ noProxy:
+ description: noProxy is a comma-separated list of hostnames
+ and/or CIDRs for which the proxy should not be used. Empty
+ means unset and will not result in an env var.
+ type: string
+ readinessEndpoints:
+ description: readinessEndpoints is a list of endpoints used
+ to verify readiness of the proxy.
+ type: array
+ items:
+ type: string
+ trustedCA:
+ description: "trustedCA is a reference to a ConfigMap containing
+ a CA certificate bundle used for client egress HTTPS connections.
+ The certificate bundle must be from the CA that signed the
+ proxy's certificate and be signed for everything. The trustedCA
+ field should only be consumed by a proxy validator. The validator
+ is responsible for reading the certificate bundle from required
+ key \"ca-bundle.crt\" and copying it to a ConfigMap named
+ \"trusted-ca-bundle\" in the \"openshift-config-managed\"
+ namespace. The namespace for the ConfigMap referenced by trustedCA
+ is \"openshift-config\". Here is an example ConfigMap (in
+ yaml): \n apiVersion: v1 kind: ConfigMap metadata: name:
+ user-ca-bundle namespace: openshift-config data: ca-bundle.crt:
+ | -----BEGIN CERTIFICATE----- Custom CA certificate
+ bundle. -----END CERTIFICATE-----"
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ imageLabels:
+ description: ImageLabels is a list of docker labels that are applied
+ to the resulting image. User can override a default label by providing
+ a label with the same name in their Build/BuildConfig.
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ description: Name defines the name of the label. It must have
+ non-zero length.
+ type: string
+ value:
+ description: Value defines the literal value of the label.
+ type: string
+ resources:
+ description: Resources defines resource requirements to execute
+ the build.
+ type: object
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ additionalProperties:
+ type: string
+ requests:
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ additionalProperties:
+ type: string
+ buildOverrides:
+ description: BuildOverrides controls override settings for builds
+ type: object
+ properties:
+ imageLabels:
+ description: ImageLabels is a list of docker labels that are applied
+ to the resulting image. If user provided a label in their Build/BuildConfig
+ with the same name as one in this list, the user's label will
+ be overwritten.
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ description: Name defines the name of the label. It must have
+ non-zero length.
+ type: string
+ value:
+ description: Value defines the literal value of the label.
+ type: string
+ nodeSelector:
+ description: NodeSelector is a selector which must be true for the
+ build pod to fit on a node
+ type: object
+ additionalProperties:
+ type: string
+ tolerations:
+ description: Tolerations is a list of Tolerations that will override
+ any existing tolerations set on a build pod.
+ type: array
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using the
+ matching operator <operator>.
+ type: object
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ type: integer
+ format: int64
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml
new file mode 100644
index 000000000..b527f7aa3
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml
@@ -0,0 +1,70 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: consoles.config.openshift.io
+spec:
+ scope: Cluster
+ preserveUnknownFields: false
+ group: config.openshift.io
+ names:
+ kind: Console
+ listKind: ConsoleList
+ plural: consoles
+ singular: console
+ subresources:
+ status: {}
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: Console holds cluster-wide configuration for the web console, including
+ the logout URL, and reports the public URL of the console. The canonical name
+ is `cluster`.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ authentication:
+ description: ConsoleAuthentication defines a list of optional configuration
+ for console authentication.
+ type: object
+ properties:
+ logoutRedirect:
+ description: 'An optional, absolute URL to redirect web browsers
+ to after logging out of the console. If not specified, it will
+ redirect to the default login page. This is required when using
+ an identity provider that supports single sign-on (SSO) such as:
+ - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML)
+ - OAuth (GitHub, GitLab, Google) Logging out of the console will
+ destroy the user''s token. The logoutRedirect provides the user
+ the option to perform single logout (SLO) through the identity
+ provider to destroy their single sign-on session.'
+ type: string
+ pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ consoleURL:
+ description: The URL for the console. This will be derived from the
+ host for the route that is created for the console.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml
new file mode 100644
index 000000000..c883ee0f0
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml
@@ -0,0 +1,100 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: dnses.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: DNS
+ listKind: DNSList
+ plural: dnses
+ singular: dns
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: DNS holds cluster-wide information about DNS. The canonical name
+ is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ baseDomain:
+ description: "baseDomain is the base domain of the cluster. All managed
+ DNS records will be sub-domains of this base. \n For example, given
+ the base domain `openshift.example.com`, an API server DNS record
+ may be created for `cluster-api.openshift.example.com`. \n Once set,
+ this field cannot be changed."
+ type: string
+ privateZone:
+ description: "privateZone is the location where all the DNS records
+ that are only available internally to the cluster exist. \n If this
+ field is nil, no private records should be created. \n Once set, this
+ field cannot be changed."
+ type: object
+ properties:
+ id:
+ description: "id is the identifier that can be used to find the
+ DNS hosted zone. \n on AWS zone can be fetched using `ID` as id
+ in [1] on Azure zone can be fetched using `ID` as a pre-determined
+ name in [2], on GCP zone can be fetched using `ID` as a pre-determined
+ name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
+ [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
+ [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get"
+ type: string
+ tags:
+ description: "tags can be used to query the DNS hosted zone. \n
+ on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone
+ using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options"
+ type: object
+ additionalProperties:
+ type: string
+ publicZone:
+ description: "publicZone is the location where all the DNS records that
+ are publicly accessible to the internet exist. \n If this field is
+ nil, no public records should be created. \n Once set, this field
+ cannot be changed."
+ type: object
+ properties:
+ id:
+ description: "id is the identifier that can be used to find the
+ DNS hosted zone. \n on AWS zone can be fetched using `ID` as id
+ in [1] on Azure zone can be fetched using `ID` as a pre-determined
+ name in [2], on GCP zone can be fetched using `ID` as a pre-determined
+ name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
+ [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
+ [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get"
+ type: string
+ tags:
+ description: "tags can be used to query the DNS hosted zone. \n
+ on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone
+ using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options"
+ type: object
+ additionalProperties:
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml
new file mode 100644
index 000000000..89084a33f
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml
@@ -0,0 +1,76 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: featuregates.config.openshift.io
+spec:
+ group: config.openshift.io
+ version: v1
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: FeatureGate
+ singular: featuregate
+ plural: featuregates
+ listKind: FeatureGateList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Feature holds cluster-wide information about feature gates. The
+ canonical name is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ customNoUpgrade:
+ description: customNoUpgrade allows the enabling or disabling of any
+ feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE,
+ and PREVENTS UPGRADES. Because of its nature, this setting cannot
+ be validated. If you have any typos or accidentally apply invalid
+ combinations your cluster may fail in an unrecoverable way. featureSet
+ must equal "CustomNoUpgrade" must be set to use this field.
+ type: object
+ properties:
+ disabled:
+ description: disabled is a list of all feature gates that you want
+ to force off
+ type: array
+ items:
+ type: string
+ enabled:
+ description: enabled is a list of all feature gates that you want
+ to force on
+ type: array
+ items:
+ type: string
+ nullable: true
+ featureSet:
+ description: featureSet changes the list of features in the cluster. The
+ default is empty. Be very careful adjusting this setting. Turning
+ on or off features may cause irreversible changes in your cluster
+ which cannot be undone.
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml
new file mode 100644
index 000000000..a0fd48709
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml
@@ -0,0 +1,144 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: images.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: Image
+ singular: image
+ plural: images
+ listKind: ImageList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Image governs policies related to imagestream imports and runtime
+ configuration for external registries. It allows cluster admins to configure
+ which registries OpenShift is allowed to import images from, extra CA trust
+ bundles for external registries, and policies to blacklist/whitelist registry
+ hostnames. When exposing OpenShift's image registry to the public, this also
+ lets cluster admins specify the external hostname.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ additionalTrustedCA:
+ description: additionalTrustedCA is a reference to a ConfigMap containing
+ additional CAs that should be trusted during imagestream import, pod
+ image pull, build image pull, and imageregistry pullthrough. The namespace
+ for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ allowedRegistriesForImport:
+ description: allowedRegistriesForImport limits the container image registries
+ that normal users may import images from. Set this list to the registries
+ that you trust to contain valid Docker images and that you want applications
+ to be able to import from. Users with permission to create Images
+ or ImageStreamMappings via the API are not affected by this policy
+ - typically only administrators or system integrations will have those
+ permissions.
+ type: array
+ items:
+ description: RegistryLocation contains a location of the registry
+ specified by the registry domain name. The domain name might include
+ wildcards, like '*' or '??'.
+ type: object
+ properties:
+ domainName:
+ description: domainName specifies a domain name for the registry
+ In case the registry use non-standard (80 or 443) port, the
+ port should be included in the domain name as well.
+ type: string
+ insecure:
+ description: insecure indicates whether the registry is secure
+ (https) or insecure (http) By default (if not specified) the
+ registry is assumed as secure.
+ type: boolean
+ externalRegistryHostnames:
+ description: externalRegistryHostnames provides the hostnames for the
+ default external image registry. The external hostname should be set
+ only when the image registry is exposed externally. The first value
+ is used in 'publicDockerImageRepository' field in ImageStreams. The
+ value must be in "hostname[:port]" format.
+ type: array
+ items:
+ type: string
+ registrySources:
+ description: registrySources contains configuration that determines
+ how the container runtime should treat individual registries when
+ accessing images for builds+pods. (e.g. whether or not to allow insecure
+ access). It does not contain configuration for the internal cluster
+ registry.
+ type: object
+ properties:
+ allowedRegistries:
+ description: "allowedRegistries are whitelisted for image pull/push.
+ All other registries are blocked. \n Only one of BlockedRegistries
+ or AllowedRegistries may be set."
+ type: array
+ items:
+ type: string
+ blockedRegistries:
+ description: "blockedRegistries are blacklisted from image pull/push.
+ All other registries are allowed. \n Only one of BlockedRegistries
+ or AllowedRegistries may be set."
+ type: array
+ items:
+ type: string
+ insecureRegistries:
+ description: insecureRegistries are registries which do not have
+ a valid TLS certificates or only support HTTP connections.
+ type: array
+ items:
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ externalRegistryHostnames:
+ description: externalRegistryHostnames provides the hostnames for the
+ default external image registry. The external hostname should be set
+ only when the image registry is exposed externally. The first value
+ is used in 'publicDockerImageRepository' field in ImageStreams. The
+ value must be in "hostname[:port]" format.
+ type: array
+ items:
+ type: string
+ internalRegistryHostname:
+ description: internalRegistryHostname sets the hostname for the default
+ internal image registry. The value must be in "hostname[:port]" format.
+ This value is set by the image registry operator which controls the
+ internal registry hostname. For backward compatibility, users can
+ still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this
+ setting overrides the environment variable.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml
new file mode 100644
index 000000000..2aba542da
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml
@@ -0,0 +1,221 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: infrastructures.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: Infrastructure
+ listKind: InfrastructureList
+ plural: infrastructures
+ singular: infrastructure
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: Infrastructure holds cluster-wide information about Infrastructure. The
+ canonical name is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ cloudConfig:
+ description: cloudConfig is a reference to a ConfigMap containing the
+ cloud provider configuration file. This configuration file is used
+ to configure the Kubernetes cloud provider integration when using
+ the built-in cloud provider integration or the external cloud controller
+ manager. The namespace for this config map is openshift-config.
+ type: object
+ properties:
+ key:
+ description: Key allows pointing to a specific key/value inside
+ of the configmap. This is useful for logical file references.
+ type: string
+ name:
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ apiServerInternalURI:
+ description: apiServerInternalURL is a valid URI with scheme(http/https),
+ address and port. apiServerInternalURL can be used by components
+ like kubelets, to contact the Kubernetes API server using the infrastructure
+ provider rather than Kubernetes networking.
+ type: string
+ apiServerURL:
+ description: apiServerURL is a valid URI with scheme(http/https), address
+ and port. apiServerURL can be used by components like the web console
+ to tell users where to find the Kubernetes API.
+ type: string
+ etcdDiscoveryDomain:
+ description: 'etcdDiscoveryDomain is the domain used to fetch the SRV
+ records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery'
+ type: string
+ infrastructureName:
+ description: infrastructureName uniquely identifies a cluster with a
+ human friendly name. Once set it should not be changed. Must be of
+ max length 27 and must have only alphanumeric or hyphen characters.
+ type: string
+ platform:
+ description: "platform is the underlying infrastructure provider for
+ the cluster. \n Deprecated: Use platformStatus.type instead."
+ type: string
+ platformStatus:
+ description: platformStatus holds status information specific to the
+ underlying infrastructure provider.
+ type: object
+ properties:
+ aws:
+ description: AWS contains settings specific to the Amazon Web Services
+ infrastructure provider.
+ type: object
+ properties:
+ region:
+ description: region holds the default AWS region for new AWS
+ resources created by the cluster.
+ type: string
+ azure:
+ description: Azure contains settings specific to the Azure infrastructure
+ provider.
+ type: object
+ properties:
+ networkResourceGroupName:
+ description: networkResourceGroupName is the Resource Group
+ for network resources like the Virtual Network and Subnets
+ used by the cluster. If empty, the value is same as ResourceGroupName.
+ type: string
+ resourceGroupName:
+ description: resourceGroupName is the Resource Group for new
+ Azure resources created for the cluster.
+ type: string
+ baremetal:
+ description: BareMetal contains settings specific to the BareMetal
+ platform.
+ type: object
+ properties:
+ apiServerInternalIP:
+ description: apiServerInternalIP is an IP address to contact
+ the Kubernetes API server that can be used by components inside
+ the cluster, like kubelets using the infrastructure rather
+ than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ points to. It is the IP for a self-hosted load balancer in
+ front of the API servers.
+ type: string
+ ingressIP:
+ description: ingressIP is an external IP which routes to the
+ default ingress controller. The IP is a suitable target of
+ a wildcard DNS record used to resolve default route host names.
+ type: string
+ nodeDNSIP:
+ description: nodeDNSIP is the IP address for the internal DNS
+ used by the nodes. Unlike the one managed by the DNS operator,
+ `NodeDNSIP` provides name resolution for the nodes themselves.
+ There is no DNS-as-a-service for BareMetal deployments. In
+ order to minimize necessary changes to the datacenter DNS,
+ a DNS service is hosted as a static pod to serve those hostnames
+ to the nodes in the cluster.
+ type: string
+ gcp:
+ description: GCP contains settings specific to the Google Cloud
+ Platform infrastructure provider.
+ type: object
+ properties:
+ projectID:
+ description: resourceGroupName is the Project ID for new GCP
+ resources created for the cluster.
+ type: string
+ region:
+ description: region holds the region for new GCP resources created
+ for the cluster.
+ type: string
+ openstack:
+ description: OpenStack contains settings specific to the OpenStack
+ infrastructure provider.
+ type: object
+ properties:
+ apiServerInternalIP:
+ description: apiServerInternalIP is an IP address to contact
+ the Kubernetes API server that can be used by components inside
+ the cluster, like kubelets using the infrastructure rather
+ than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ points to. It is the IP for a self-hosted load balancer in
+ front of the API servers.
+ type: string
+ cloudName:
+ description: cloudName is the name of the desired OpenStack
+ cloud in the client configuration file (`clouds.yaml`).
+ type: string
+ ingressIP:
+ description: ingressIP is an external IP which routes to the
+ default ingress controller. The IP is a suitable target of
+ a wildcard DNS record used to resolve default route host names.
+ type: string
+ nodeDNSIP:
+ description: nodeDNSIP is the IP address for the internal DNS
+ used by the nodes. Unlike the one managed by the DNS operator,
+ `NodeDNSIP` provides name resolution for the nodes themselves.
+ There is no DNS-as-a-service for OpenStack deployments. In
+ order to minimize necessary changes to the datacenter DNS,
+ a DNS service is hosted as a static pod to serve those hostnames
+ to the nodes in the cluster.
+ type: string
+ ovirt:
+ description: Ovirt contains settings specific to the oVirt infrastructure
+ provider.
+ type: object
+ properties:
+ apiServerInternalIP:
+ description: apiServerInternalIP is an IP address to contact
+ the Kubernetes API server that can be used by components inside
+ the cluster, like kubelets using the infrastructure rather
+ than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ points to. It is the IP for a self-hosted load balancer in
+ front of the API servers.
+ type: string
+ ingressIP:
+ description: ingressIP is an external IP which routes to the
+ default ingress controller. The IP is a suitable target of
+ a wildcard DNS record used to resolve default route host names.
+ type: string
+ nodeDNSIP:
+ description: nodeDNSIP is the IP address for the internal DNS
+ used by the nodes. Unlike the one managed by the DNS operator,
+ `NodeDNSIP` provides name resolution for the nodes themselves.
+ There is no DNS-as-a-service for oVirt deployments. In order
+ to minimize necessary changes to the datacenter DNS, a DNS
+ service is hosted as a static pod to serve those hostnames
+ to the nodes in the cluster.
+ type: string
+ type:
+ description: type is the underlying infrastructure provider for
+ the cluster. This value controls whether infrastructure automation
+ such as service load balancers, dynamic volume provisioning, machine
+ creation and deletion, and other integrations are enabled. If
+ None, no infrastructure automation is enabled. Allowed values
+ are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack",
+ "VSphere", "oVirt", and "None". Individual components may not
+ support all platforms, and must handle unrecognized platforms
+ as None if they do not support that platform.
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml
new file mode 100644
index 000000000..ada440425
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml
@@ -0,0 +1,55 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ingresses.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: Ingress
+ listKind: IngressList
+ plural: ingresses
+ singular: ingress
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Ingress holds cluster-wide information about ingress, including
+ the default ingress domain used for routes. The canonical name is `cluster`.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ domain:
+ description: "domain is used to generate a default host name for a route
+ when the route's host name is empty. The generated host name will
+ follow this pattern: \"<route-name>.<route-namespace>.<domain>\".
+ \n It is also used as the default wildcard domain suffix for ingress.
+ The default ingresscontroller domain will follow this pattern: \"*.<domain>\".
+ \n Once set, changing domain is not currently supported."
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml
new file mode 100644
index 000000000..bc3b62a87
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml
@@ -0,0 +1,141 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networks.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: Network
+ listKind: NetworkList
+ plural: networks
+ singular: network
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: 'Network holds cluster-wide information about Network. The canonical
+ name is `cluster`. It is used to configure the desired network configuration,
+ such as: IP address pools for services/pod IPs, network plugin, etc. Please
+ view network.spec for an explanation on what applies when configuring this
+ resource.'
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration. As a general
+ rule, this SHOULD NOT be read directly. Instead, you should consume the
+ NetworkStatus, as it indicates the currently deployed configuration. Currently,
+ most spec fields are immutable after installation. Please view the individual
+ ones for further details on each.
+ type: object
+ properties:
+ clusterNetwork:
+ description: IP address pool to use for pod IPs. This field is immutable
+ after installation.
+ type: array
+ items:
+ description: ClusterNetworkEntry is a contiguous block of IP addresses
+ from which pod IPs are allocated.
+ type: object
+ properties:
+ cidr:
+ description: The complete block for pod IPs.
+ type: string
+ hostPrefix:
+ description: The size (prefix) of block to allocate to each node.
+ type: integer
+ format: int32
+ minimum: 0
+ externalIP:
+ description: externalIP defines configuration for controllers that affect
+ Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.
+ type: object
+ properties:
+ autoAssignCIDRs:
+ description: autoAssignCIDRs is a list of CIDRs from which to automatically
+ assign Service.ExternalIP. These are assigned when the service
+ is of type LoadBalancer. In general, this is only useful for bare-metal
+ clusters. In Openshift 3.x, this was misleadingly called "IngressIPs".
+ Automatically assigned External IPs are not affected by any ExternalIPPolicy
+ rules. Currently, only one entry may be provided.
+ type: array
+ items:
+ type: string
+ policy:
+ description: policy is a set of restrictions applied to the ExternalIP
+ field. If nil or empty, then ExternalIP is not allowed to be set.
+ type: object
+ properties:
+ allowedCIDRs:
+ description: allowedCIDRs is the list of allowed CIDRs.
+ type: array
+ items:
+ type: string
+ rejectedCIDRs:
+ description: rejectedCIDRs is the list of disallowed CIDRs.
+ These take precedence over allowedCIDRs.
+ type: array
+ items:
+ type: string
+ networkType:
+ description: 'NetworkType is the plugin that is to be deployed (e.g.
+ OpenShiftSDN). This should match a value that the cluster-network-operator
+ understands, or else no networking will be installed. Currently supported
+ values are: - OpenShiftSDN This field is immutable after installation.'
+ type: string
+ serviceNetwork:
+ description: IP address pool for services. Currently, we only support
+ a single entry here. This field is immutable after installation.
+ type: array
+ items:
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
+ properties:
+ clusterNetwork:
+ description: IP address pool to use for pod IPs.
+ type: array
+ items:
+ description: ClusterNetworkEntry is a contiguous block of IP addresses
+ from which pod IPs are allocated.
+ type: object
+ properties:
+ cidr:
+ description: The complete block for pod IPs.
+ type: string
+ hostPrefix:
+ description: The size (prefix) of block to allocate to each node.
+ type: integer
+ format: int32
+ minimum: 0
+ clusterNetworkMTU:
+ description: ClusterNetworkMTU is the MTU for inter-pod networking.
+ type: integer
+ networkType:
+ description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).
+ type: string
+ serviceNetwork:
+ description: IP address pool for services. Currently, we only support
+ a single entry here.
+ type: array
+ items:
+ type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml
new file mode 100644
index 000000000..fd763d047
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml
@@ -0,0 +1,661 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: oauths.config.openshift.io
+spec:
+ group: config.openshift.io
+ names:
+ kind: OAuth
+ listKind: OAuthList
+ plural: oauths
+ singular: oauth
+ scope: Cluster
+ preserveUnknownFields: false
+ subresources:
+ status: {}
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ "validation":
+ "openAPIV3Schema":
+ description: OAuth holds cluster-wide information about OAuth. The canonical
+ name is `cluster`. It is used to configure the integrated OAuth server. This
+ configuration is only honored when the top level Authentication config has
+ type set to IntegratedOAuth.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OAuthSpec contains desired cluster auth configuration
+ type: object
+ properties:
+ identityProviders:
+ description: identityProviders is an ordered list of ways for a user
+ to identify themselves. When this list is empty, no identities are
+ provisioned for users.
+ type: array
+ items:
+ description: IdentityProvider provides identities for users authenticating
+ using credentials
+ type: object
+ properties:
+ basicAuth:
+ description: basicAuth contains configuration options for the
+ BasicAuth IdP
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ tlsClientCert:
+ description: tlsClientCert is an optional reference to a secret
+ by name that contains the PEM-encoded TLS client certificate
+ to present when connecting to the server. The key "tls.crt"
+ is used to locate the data. If specified and the secret
+ or expected key is not found, the identity provider is not
+ honored. If the specified certificate data is not valid,
+ the identity provider is not honored. The namespace for
+ this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ tlsClientKey:
+ description: tlsClientKey is an optional reference to a secret
+ by name that contains the PEM-encoded TLS private key for
+ the client certificate referenced in tlsClientCert. The
+ key "tls.key" is used to locate the data. If specified and
+ the secret or expected key is not found, the identity provider
+ is not honored. If the specified certificate data is not
+ valid, the identity provider is not honored. The namespace
+ for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ url:
+ description: url is the remote URL to connect to
+ type: string
+ github:
+ description: github enables user authentication using GitHub credentials
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ This can only be configured when hostname is set to a non-empty
+ value. The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference to the secret
+ by name containing the oauth client secret. The key "clientSecret"
+ is used to locate the data. If the secret or expected key
+ is not found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ hostname:
+ description: hostname is the optional domain (e.g. "mycompany.com")
+ for use with a hosted instance of GitHub Enterprise. It
+ must match the GitHub Enterprise settings value configured
+ at /setup/settings#hostname.
+ type: string
+ organizations:
+ description: organizations optionally restricts which organizations
+ are allowed to log in
+ type: array
+ items:
+ type: string
+ teams:
+ description: teams optionally restricts which teams are allowed
+ to log in. Format is <org>/<team>.
+ type: array
+ items:
+ type: string
+ gitlab:
+ description: gitlab enables user authentication using GitLab credentials
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference to the secret
+ by name containing the oauth client secret. The key "clientSecret"
+ is used to locate the data. If the secret or expected key
+ is not found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ url:
+ description: url is the oauth server base URL
+ type: string
+ google:
+ description: google enables user authentication using Google credentials
+ type: object
+ properties:
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference to the secret
+ by name containing the oauth client secret. The key "clientSecret"
+ is used to locate the data. If the secret or expected key
+ is not found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ hostedDomain:
+ description: hostedDomain is the optional Google App domain
+ (e.g. "mycompany.com") to restrict logins to
+ type: string
+ htpasswd:
+ description: htpasswd enables user authentication using an HTPasswd
+ file to validate credentials
+ type: object
+ properties:
+ fileData:
+ description: fileData is a required reference to a secret
+ by name containing the data to use as the htpasswd file.
+ The key "htpasswd" is used to locate the data. If the secret
+ or expected key is not found, the identity provider is not
+ honored. If the specified htpasswd data is not valid, the
+ identity provider is not honored. The namespace for this
+ secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ keystone:
+ description: keystone enables user authentication using keystone
+ password credentials
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ domainName:
+ description: domainName is required for keystone v3
+ type: string
+ tlsClientCert:
+ description: tlsClientCert is an optional reference to a secret
+ by name that contains the PEM-encoded TLS client certificate
+ to present when connecting to the server. The key "tls.crt"
+ is used to locate the data. If specified and the secret
+ or expected key is not found, the identity provider is not
+ honored. If the specified certificate data is not valid,
+ the identity provider is not honored. The namespace for
+ this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ tlsClientKey:
+ description: tlsClientKey is an optional reference to a secret
+ by name that contains the PEM-encoded TLS private key for
+ the client certificate referenced in tlsClientCert. The
+ key "tls.key" is used to locate the data. If specified and
+ the secret or expected key is not found, the identity provider
+ is not honored. If the specified certificate data is not
+ valid, the identity provider is not honored. The namespace
+ for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ url:
+ description: url is the remote URL to connect to
+ type: string
+ ldap:
+ description: ldap enables user authentication using LDAP credentials
+ type: object
+ properties:
+ attributes:
+ description: attributes maps LDAP attributes to identities
+ type: object
+ properties:
+ email:
+ description: email is the list of attributes whose values
+ should be used as the email address. Optional. If unspecified,
+ no email is set for the identity
+ type: array
+ items:
+ type: string
+ id:
+ description: id is the list of attributes whose values
+ should be used as the user ID. Required. First non-empty
+ attribute is used. At least one attribute is required.
+ If none of the listed attribute have a value, authentication
+ fails. LDAP standard identity attribute is "dn"
+ type: array
+ items:
+ type: string
+ name:
+ description: name is the list of attributes whose values
+ should be used as the display name. Optional. If unspecified,
+ no display name is set for the identity LDAP standard
+ display name attribute is "cn"
+ type: array
+ items:
+ type: string
+ preferredUsername:
+ description: preferredUsername is the list of attributes
+ whose values should be used as the preferred username.
+ LDAP standard login attribute is "uid"
+ type: array
+ items:
+ type: string
+ bindDN:
+ description: bindDN is an optional DN to bind with during
+ the search phase.
+ type: string
+ bindPassword:
+ description: bindPassword is an optional reference to a secret
+ by name containing a password to bind with during the search
+ phase. The key "bindPassword" is used to locate the data.
+ If specified and the secret or expected key is not found,
+ the identity provider is not honored. The namespace for
+ this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ insecure:
+ description: 'insecure, if true, indicates the connection
+ should not use TLS WARNING: Should not be set to `true`
+ with the URL scheme "ldaps://" as "ldaps://" URLs always attempt
+ to connect using TLS, even when `insecure` is set to `true`
+ When `true`, "ldap://" URLS connect insecurely. When `false`,
+ "ldap://" URLs are upgraded to a TLS connection using StartTLS
+ as specified in https://tools.ietf.org/html/rfc2830.'
+ type: boolean
+ url:
+ description: 'url is an RFC 2255 URL which specifies the LDAP
+ search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter'
+ type: string
+ mappingMethod:
+ description: mappingMethod determines how identities from this
+ provider are mapped to users Defaults to "claim"
+ type: string
+ name:
+ description: 'name is used to qualify the identities returned
+ by this provider. - It MUST be unique and not shared by any
+ other identity provider used - It MUST be a valid path segment:
+ name cannot equal "." or ".." or contain "/" or "%" or ":" Ref:
+ https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName'
+ type: string
+ openID:
+ description: openID enables user authentication using OpenID credentials
+ type: object
+ properties:
+ ca:
+ description: ca is an optional reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. The key "ca.crt" is used to locate
+ the data. If specified and the config map or expected key
+ is not found, the identity provider is not honored. If the
+ specified ca data is not valid, the identity provider is
+ not honored. If empty, the default system roots are used.
+ The namespace for this config map is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ claims:
+ description: claims mappings
+ type: object
+ properties:
+ email:
+ description: email is the list of claims whose values
+ should be used as the email address. Optional. If unspecified,
+ no email is set for the identity
+ type: array
+ items:
+ type: string
+ name:
+ description: name is the list of claims whose values should
+ be used as the display name. Optional. If unspecified,
+ no display name is set for the identity
+ type: array
+ items:
+ type: string
+ preferredUsername:
+ description: preferredUsername is the list of claims whose
+ values should be used as the preferred username. If
+ unspecified, the preferred username is determined from
+ the value of the sub claim
+ type: array
+ items:
+ type: string
+ clientID:
+ description: clientID is the oauth client ID
+ type: string
+ clientSecret:
+ description: clientSecret is a required reference to the secret
+ by name containing the oauth client secret. The key "clientSecret"
+ is used to locate the data. If the secret or expected key
+ is not found, the identity provider is not honored. The
+ namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ secret
+ type: string
+ extraAuthorizeParameters:
+ description: extraAuthorizeParameters are any custom parameters
+ to add to the authorize request.
+ type: object
+ additionalProperties:
+ type: string
+ extraScopes:
+ description: extraScopes are any scopes to request in addition
+ to the standard "openid" scope.
+ type: array
+ items:
+ type: string
+ issuer:
+ description: issuer is the URL that the OpenID Provider asserts
+ as its Issuer Identifier. It must use the https scheme with
+ no query or fragment component.
+ type: string
+ requestHeader:
+ description: requestHeader enables user authentication using request
+ header credentials
+ type: object
+ properties:
+ ca:
+ description: ca is a required reference to a config map by
+ name containing the PEM-encoded CA bundle. It is used as
+ a trust anchor to validate the TLS certificate presented
+ by the remote server. Specifically, it allows verification
+ of incoming requests to prevent header spoofing. The key
+ "ca.crt" is used to locate the data. If the config map or
+ expected key is not found, the identity provider is not
+ honored. If the specified ca data is not valid, the identity
+ provider is not honored. The namespace for this config map
+ is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced
+ config map
+ type: string
+ challengeURL:
+ description: challengeURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests from OAuth
+ clients which expect WWW-Authenticate challenges will be
+ redirected here. ${url} is replaced with the current URL,
+ escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when challenge is set to true.
+ type: string
+ clientCommonNames:
+ description: clientCommonNames is an optional list of common
+ names to require a match from. If empty, any client certificate
+ validated against the clientCA bundle is considered authoritative.
+ type: array
+ items:
+ type: string
+ emailHeaders:
+ description: emailHeaders is the set of headers to check for
+ the email address
+ type: array
+ items:
+ type: string
+ headers:
+ description: headers is the set of headers to check for identity
+ information
+ type: array
+ items:
+ type: string
+ loginURL:
+ description: loginURL is a URL to redirect unauthenticated
+ /authorize requests to Unauthenticated requests from OAuth
+ clients which expect interactive logins will be redirected
+ here ${url} is replaced with the current URL, escaped to
+ be safe in a query parameter https://www.example.com/sso-login?then=${url}
+ ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query}
+ Required when login is set to true.
+ type: string
+ nameHeaders:
+ description: nameHeaders is the set of headers to check for
+ the display name
+ type: array
+ items:
+ type: string
+ preferredUsernameHeaders:
+ description: preferredUsernameHeaders is the set of headers
+ to check for the preferred username
+ type: array
+ items:
+ type: string
+ type:
+ description: type identifies the identity provider type for this
+ entry.
+ type: string
+ templates:
+ description: templates allow you to customize pages like the login page.
+ type: object
+ properties:
+ error:
+ description: error is the name of a secret that specifies a go template
+ to use to render error pages during the authentication or grant
+ flow. The key "errors.html" is used to locate the template data.
+ If specified and the secret or expected key is not found, the
+ default error page is used. If the specified template is not valid,
+ the default error page is used. If unspecified, the default error
+ page is used. The namespace for this secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced secret
+ type: string
+ login:
+ description: login is the name of a secret that specifies a go template
+ to use to render the login page. The key "login.html" is used
+ to locate the template data. If specified and the secret or expected
+ key is not found, the default login page is used. If the specified
+ template is not valid, the default login page is used. If unspecified,
+ the default login page is used. The namespace for this secret
+ is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced secret
+ type: string
+ providerSelection:
+ description: providerSelection is the name of a secret that specifies
+ a go template to use to render the provider selection page. The
+ key "providers.html" is used to locate the template data. If specified
+ and the secret or expected key is not found, the default provider
+ selection page is used. If the specified template is not valid,
+ the default provider selection page is used. If unspecified, the
+ default provider selection page is used. The namespace for this
+ secret is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced secret
+ type: string
+ tokenConfig:
+ description: tokenConfig contains options for authorization and access
+ tokens
+ type: object
+ properties:
+ accessTokenInactivityTimeoutSeconds:
+ description: 'accessTokenInactivityTimeoutSeconds defines the default
+ token inactivity timeout for tokens granted by any client. The
+ value represents the maximum amount of time that can occur between
+ consecutive uses of the token. Tokens become invalid if they are
+ not used within this temporal window. The user will need to acquire
+ a new token to regain access once a token times out. Valid values
+ are integer values: x < 0 Tokens time out is enabled but tokens
+ never timeout unless configured per client (e.g. `-1`) x = 0 Tokens
+ time out is disabled (default) x > 0 Tokens time out if there
+ is no activity for x seconds The current minimum allowed value
+ for X is 300 (5 minutes)'
+ type: integer
+ format: int32
+ accessTokenMaxAgeSeconds:
+ description: accessTokenMaxAgeSeconds defines the maximum age of
+ access tokens
+ type: integer
+ format: int32
+ status:
+ description: OAuthStatus shows current known state of OAuth server in the
+ cluster
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml
new file mode 100644
index 000000000..a625aa617
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml
@@ -0,0 +1,63 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: projects.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ names:
+ kind: Project
+ listKind: ProjectList
+ plural: projects
+ singular: project
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Project holds cluster-wide information about Project. The canonical
+ name is `cluster`
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ projectRequestMessage:
+ description: projectRequestMessage is the string presented to a user
+ if they are unable to request a project via the projectrequest api
+ endpoint
+ type: string
+ projectRequestTemplate:
+ description: projectRequestTemplate is the template to use for creating
+ projects in response to projectrequest. This must point to a template
+ in 'openshift-config' namespace. It is optional. If it is not specified,
+ a default template is used.
+ type: object
+ properties:
+ name:
+ description: name is the metadata.name of the referenced project
+ request template
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml
new file mode 100644
index 000000000..6f5336c8f
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml
@@ -0,0 +1,88 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: schedulers.config.openshift.io
+spec:
+ group: config.openshift.io
+ scope: Cluster
+ preserveUnknownFields: false
+ names:
+ kind: Scheduler
+ singular: scheduler
+ plural: schedulers
+ listKind: SchedulerList
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ "validation":
+ "openAPIV3Schema":
+ description: Scheduler holds cluster-wide config information to run the Kubernetes
+ Scheduler and influence its placement decisions. The canonical name for this
+ config is `cluster`.
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: spec holds user settable values for configuration
+ type: object
+ properties:
+ defaultNodeSelector:
+ description: 'defaultNodeSelector helps set the cluster-wide default
+ node selector to restrict pod placement to specific nodes. This is
+ applied to the pods created in all namespaces without a specified
+ nodeSelector value. For example, defaultNodeSelector: "type=user-node,region=east"
+ would set nodeSelector field in pod spec to "type=user-node,region=east"
+ to all pods created in all namespaces. Namespaces having project-wide
+ node selectors won''t be impacted even if this field is set. This
+ adds an annotation section to the namespace. For example, if a new
+ namespace is created with node-selector=''type=user-node,region=east'',
+ the annotation openshift.io/node-selector: type=user-node,region=east
+ gets added to the project. When the openshift.io/node-selector annotation
+ is set on the project the value is used in preference to the value
+ we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector:
+ "type=user-node,region=west" means that the default of "type=user-node,region=east"
+ set in defaultNodeSelector would not be applied.'
+ type: string
+ mastersSchedulable:
+ description: 'MastersSchedulable allows masters nodes to be schedulable.
+ When this flag is turned on, all the master nodes in the cluster will
+ be made schedulable, so that workload pods can run on them. The default
+ value for this field is false, meaning none of the master nodes are
+ schedulable. Important Note: Once the workload pods start running
+ on the master nodes, extreme care must be taken to ensure that cluster-critical
+ control plane components are not impacted. Please turn on this field
+ after doing due diligence.'
+ type: boolean
+ policy:
+ description: policy is a reference to a ConfigMap containing scheduler
+ policy which has user specified predicates and priorities. If this
+ ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider.
+ The namespace for this configmap is openshift-config.
+ type: object
+ required:
+ - name
+ properties:
+ name:
+ description: name is the metadata.name of the referenced config
+ map
+ type: string
+ status:
+ description: status holds observed values from the cluster. They may not
+ be overridden.
+ type: object
diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go
index ca36f6777..142748423 100644
--- a/vendor/github.com/openshift/api/config/v1/types.go
+++ b/vendor/github.com/openshift/api/config/v1/types.go
@@ -167,6 +167,7 @@ type AdmissionPluginConfig struct {
// Configuration is an embedded configuration object to be used as the plugin's
// configuration. If present, it will be used instead of the path to the configuration file.
// +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
Configuration runtime.RawExtension `json:"configuration"`
}
@@ -210,6 +211,7 @@ type AuditConfig struct {
// as the audit policy configuration. If present, it will be used instead of
// the path to the policy file.
// +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"`
// Format of saved audits (legacy or json).
diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
index ea76aec02..b347bd80e 100644
--- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go
+++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
@@ -39,6 +39,16 @@ type APIServerSpec struct {
// The values are regular expressions that correspond to the Golang regular expression language.
// +optional
AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"`
+ // encryption allows the configuration of encryption of resources at the datastore layer.
+ // +optional
+ Encryption APIServerEncryption `json:"encryption"`
+ // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.
+ //
+ // If unset, a default (which may change between releases) is chosen. Note that only Old and
+ // Intermediate profiles are currently supported, and the maximum available MinTLSVersions
+ // is VersionTLS12.
+ // +optional
+ TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"`
}
type APIServerServingCerts struct {
@@ -63,6 +73,39 @@ type APIServerNamedServingCert struct {
ServingCertificate SecretNameReference `json:"servingCertificate"`
}
+type APIServerEncryption struct {
+ // type defines what encryption type should be used to encrypt resources at the datastore layer.
+ // When this field is unset (i.e. when it is set to the empty string), identity is implied.
+ // The behavior of unset can and will change over time. Even if encryption is enabled by default,
+ // the meaning of unset may change to a different encryption type based on changes in best practices.
+ //
+ // When encryption is enabled, all sensitive resources shipped with the platform are encrypted.
+ // This list of sensitive resources can and will change over time. The current authoritative list is:
+ //
+ // 1. secrets
+ // 2. configmaps
+ // 3. routes.route.openshift.io
+ // 4. oauthaccesstokens.oauth.openshift.io
+ // 5. oauthauthorizetokens.oauth.openshift.io
+ //
+ // +unionDiscriminator
+ // +optional
+ Type EncryptionType `json:"type,omitempty"`
+}
+
+// +kubebuilder:validation:Enum="";identity;aescbc
+type EncryptionType string
+
+const (
+ // identity refers to a type where no encryption is performed at the datastore layer.
+ // Resources are written as-is without encryption.
+ EncryptionTypeIdentity EncryptionType = "identity"
+
+ // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key
+ // is used to perform encryption at the datastore layer.
+ EncryptionTypeAESCBC EncryptionType = "aescbc"
+)
+
type APIServerStatus struct {
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go
index 0722ddbfc..eecfe75e7 100644
--- a/vendor/github.com/openshift/api/config/v1/types_authentication.go
+++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go
@@ -9,8 +9,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Authentication specifies cluster-wide settings for authentication (like OAuth and
// webhook token authenticators). The canonical name of an instance is `cluster`.
type Authentication struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -77,7 +76,6 @@ type AuthenticationStatus struct {
type AuthenticationList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
Items []Authentication `json:"items"`
diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go
index c7ed7e958..ef4512aa1 100644
--- a/vendor/github.com/openshift/api/config/v1/types_build.go
+++ b/vendor/github.com/openshift/api/config/v1/types_build.go
@@ -9,10 +9,14 @@ import (
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// Build holds cluster-wide information on how to handle builds. The canonical name is `cluster`
+// Build configures the behavior of OpenShift builds for the entire cluster.
+// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.
+//
+// The canonical name is "cluster"
type Build struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
+
// Spec holds user-settable values for the build controller configuration
// +kubebuilder:validation:Required
// +required
@@ -23,6 +27,10 @@ type BuildSpec struct {
// AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
// should be trusted for image pushes and pulls during builds.
// The namespace for this config map is openshift-config.
+ //
+ // DEPRECATED: Additional CAs for image pull and push should be set on
+ // image.config.openshift.io/cluster instead.
+ //
// +optional
AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
// BuildDefaults controls the default information for Builds
@@ -95,7 +103,7 @@ type BuildOverrides struct {
type BuildList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Build `json:"items"`
+
+ Items []Build `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
index af2ce846c..3681d0ff0 100644
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
@@ -16,13 +16,13 @@ type ClusterOperator struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
- // spec hold the intent of how this operator should behave.
+ // spec holds configuration that could apply to any operator.
// +kubebuilder:validation:Required
// +required
Spec ClusterOperatorSpec `json:"spec"`
// status holds the information about the state of an operator. It is consistent with status information across
- // the kube ecosystem.
+ // the Kubernetes ecosystem.
// +optional
Status ClusterOperatorStatus `json:"status"`
}
@@ -34,15 +34,15 @@ type ClusterOperatorSpec struct {
// ClusterOperatorStatus provides information about the status of the operator.
// +k8s:deepcopy-gen=true
type ClusterOperatorStatus struct {
- // conditions describes the state of the operator's reconciliation functionality.
+ // conditions describes the state of the operator's managed and monitored components.
// +patchMergeKey=type
// +patchStrategy=merge
// +optional
Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
- // versions is a slice of operand version tuples. Operators which manage multiple operands will have multiple
- // entries in the array. If an operator is Available, it must have at least one entry. You must report the version of
- // the operator itself with the name "operator".
+ // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple
+ // operand entries in the array. Available operators must report the version of the operator itself with the name "operator".
+ // An operator reports a new "operator" version when it has rolled out the new version to all of its operands.
// +optional
Versions []OperandVersion `json:"versions,omitempty"`
@@ -57,29 +57,40 @@ type ClusterOperatorStatus struct {
// operator which owns this status object.
// +nullable
// +optional
+ // +kubebuilder:pruning:PreserveUnknownFields
Extension runtime.RawExtension `json:"extension"`
}
type OperandVersion struct {
// name is the name of the particular operand this version is for. It usually matches container images, not operators.
+ // +kubebuilder:validation:Required
+ // +required
Name string `json:"name"`
- // version indicates which version of a particular operand is currently being manage. It must always match the Available
- // condition. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
+ // version indicates which version of a particular operand is currently being managed. It must always match the Available
+ // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
// 1.1.0
+ // +kubebuilder:validation:Required
+ // +required
Version string `json:"version"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
// group of the referent.
+ // +kubebuilder:validation:Required
+ // +required
Group string `json:"group"`
// resource of the referent.
+ // +kubebuilder:validation:Required
+ // +required
Resource string `json:"resource"`
// namespace of the referent.
// +optional
Namespace string `json:"namespace,omitempty"`
// name of the referent.
+ // +kubebuilder:validation:Required
+ // +required
Name string `json:"name"`
}
@@ -96,41 +107,64 @@ const (
)
// ClusterOperatorStatusCondition represents the state of the operator's
-// reconciliation functionality.
+// managed and monitored components.
// +k8s:deepcopy-gen=true
type ClusterOperatorStatusCondition struct {
- // type specifies the state of the operator's reconciliation functionality.
+ // type specifies the aspect reported by this condition.
+ // +kubebuilder:validation:Required
+ // +required
Type ClusterStatusConditionType `json:"type"`
// status of the condition, one of True, False, Unknown.
+ // +kubebuilder:validation:Required
+ // +required
Status ConditionStatus `json:"status"`
- // lastTransitionTime is the time of the last update to the current status object.
+ // lastTransitionTime is the time of the last update to the current status property.
+ // +kubebuilder:validation:Required
+ // +required
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
- // reason is the reason for the condition's last transition. Reasons are CamelCase
+ // reason is the CamelCase reason for the condition's current status.
+ // +optional
Reason string `json:"reason,omitempty"`
// message provides additional information about the current condition.
// This is only to be consumed by humans.
+ // +optional
Message string `json:"message,omitempty"`
}
-// ClusterStatusConditionType is the state of the operator's reconciliation functionality.
+// ClusterStatusConditionType is an aspect of operator state.
type ClusterStatusConditionType string
const (
- // Available indicates that the binary maintained by the operator (eg: openshift-apiserver for the
+ // Available indicates that the operand (eg: openshift-apiserver for the
// openshift-apiserver-operator), is functional and available in the cluster.
OperatorAvailable ClusterStatusConditionType = "Available"
- // Progressing indicates that the operator is actively making changes to the binary maintained by the
- // operator (eg: openshift-apiserver for the openshift-apiserver-operator).
+ // Progressing indicates that the operator is actively rolling out new code,
+ // propagating config changes, or otherwise moving from one steady state to
+ // another. Operators should not report progressing when they are reconciling
+ // a previously known state.
OperatorProgressing ClusterStatusConditionType = "Progressing"
- // Degraded indicates that the operand is not functioning completely. An example of a degraded state
- // would be if there should be 5 copies of the operand running but only 4 are running. It may still be available,
- // but it is degraded
+ // Degraded indicates that the operator's current state does not match its
+ // desired state over a period of time resulting in a lower quality of service.
+ // The period of time may vary by component, but a Degraded state represents
+ // persistent observation of a condition. As a result, a component should not
+ // oscillate in and out of Degraded state. A service may be Available even
+ // if its degraded. For example, your service may desire 3 running pods, but 1
+ // pod is crash-looping. The service is Available but Degraded because it
+ // may have a lower quality of service. A component may be Progressing but
+ // not Degraded because the transition from one state to another does not
+ // persist over a long enough period to report Degraded. A service should not
+ // report Degraded during the course of a normal upgrade. A service may report
+ // Degraded in response to a persistent infrastructure failure that requires
+ // administrator intervention. For example, if a control plane host is unhealthy
+ // and must be replaced. An operator should report Degraded if unexpected
+ // errors occur over a period, but the expectation is that all unexpected errors
+ // are handled as operators mature.
OperatorDegraded ClusterStatusConditionType = "Degraded"
// Upgradeable indicates whether the operator is in a state that is safe to upgrade. When status is `False`
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
index c6c2e7e43..771e962ad 100644
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
@@ -33,6 +33,8 @@ type ClusterVersionSpec struct {
// clusterID uniquely identifies this cluster. This is expected to be
// an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in
// hexadecimal values). This is a required field.
+ // +kubebuilder:validation:Required
+ // +required
ClusterID ClusterID `json:"clusterID"`
// desiredUpdate is an optional field that indicates the desired value of
@@ -80,6 +82,8 @@ type ClusterVersionStatus struct {
// desired is the version that the cluster is reconciling towards.
// If the cluster is not yet fully initialized desired will be set
// with the information available, which may be an image or a tag.
+ // +kubebuilder:validation:Required
+ // +required
Desired Update `json:"desired"`
// history contains a list of the most recent versions applied to the cluster.
@@ -94,12 +98,16 @@ type ClusterVersionStatus struct {
// observedGeneration reports which version of the spec is being synced.
// If this value is not equal to metadata.generation, then the desired
- // and conditions fields may represent from a previous version.
+ // and conditions fields may represent a previous version.
+ // +kubebuilder:validation:Required
+ // +required
ObservedGeneration int64 `json:"observedGeneration"`
// versionHash is a fingerprint of the content that the cluster will be
// updated with. It is used by the operator to avoid unnecessary work
// and is for internal use only.
+ // +kubebuilder:validation:Required
+ // +required
VersionHash string `json:"versionHash"`
// conditions provides information about the cluster version. The condition
@@ -117,6 +125,8 @@ type ClusterVersionStatus struct {
// if the update service is unavailable, or if an invalid channel has
// been specified.
// +nullable
+ // +kubebuilder:validation:Required
+ // +required
AvailableUpdates []Update `json:"availableUpdates"`
}
@@ -139,14 +149,20 @@ type UpdateHistory struct {
// indicates the update is not fully applied, while the Completed state
// indicates the update was successfully rolled out at least once (all
// parts of the update successfully applied).
+ // +kubebuilder:validation:Required
+ // +required
State UpdateState `json:"state"`
// startedTime is the time at which the update was started.
+ // +kubebuilder:validation:Required
+ // +required
StartedTime metav1.Time `json:"startedTime"`
// completionTime, if set, is when the update was fully applied. The update
// that is currently being applied will have a null completion time.
// Completion time will always be set for entries that are not the current
// update (usually to the started time of the next update).
+ // +kubebuilder:validation:Required
+ // +required
// +nullable
CompletionTime *metav1.Time `json:"completionTime"`
@@ -158,9 +174,13 @@ type UpdateHistory struct {
Version string `json:"version"`
// image is a container image location that contains the update. This value
// is always populated.
+ // +kubebuilder:validation:Required
+ // +required
Image string `json:"image"`
// verified indicates whether the provided update was properly verified
// before it was installed. If this is false the cluster may not be trusted.
+ // +kubebuilder:validation:Required
+ // +required
Verified bool `json:"verified"`
}
@@ -172,19 +192,29 @@ type ClusterID string
// +k8s:deepcopy-gen=true
type ComponentOverride struct {
// kind indentifies which object to override.
+ // +kubebuilder:validation:Required
+ // +required
Kind string `json:"kind"`
// group identifies the API group that the kind is in.
+ // +kubebuilder:validation:Required
+ // +required
Group string `json:"group"`
// namespace is the component's namespace. If the resource is cluster
// scoped, the namespace should be empty.
+ // +kubebuilder:validation:Required
+ // +required
Namespace string `json:"namespace"`
// name is the component's name.
+ // +kubebuilder:validation:Required
+ // +required
Name string `json:"name"`
// unmanaged controls if cluster version operator should stop managing the
// resources in this cluster.
// Default: false
+ // +kubebuilder:validation:Required
+ // +required
Unmanaged bool `json:"unmanaged"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go
index 9cda3f83b..22b0b5160 100644
--- a/vendor/github.com/openshift/api/config/v1/types_console.go
+++ b/vendor/github.com/openshift/api/config/v1/types_console.go
@@ -10,8 +10,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// logout URL, and reports the public URL of the console. The canonical name is
// `cluster`.
type Console struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -40,9 +39,9 @@ type ConsoleStatus struct {
type ConsoleList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Console `json:"items"`
+
+ Items []Console `json:"items"`
}
// ConsoleAuthentication defines a list of optional configuration for console authentication.
@@ -58,6 +57,6 @@ type ConsoleAuthentication struct {
// provides the user the option to perform single logout (SLO) through the identity
// provider to destroy their single sign-on session.
// +optional
- // +kubebuilder:validation:Pattern=^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$
+ // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$`
LogoutRedirect string `json:"logoutRedirect,omitempty"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go
index ef04f7a67..989ef99c3 100644
--- a/vendor/github.com/openshift/api/config/v1/types_dns.go
+++ b/vendor/github.com/openshift/api/config/v1/types_dns.go
@@ -8,8 +8,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// DNS holds cluster-wide information about DNS. The canonical name is `cluster`
type DNS struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -82,7 +81,7 @@ type DNSStatus struct {
type DNSList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []DNS `json:"items"`
+
+ Items []DNS `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go
index 536bad191..ce9012627 100644
--- a/vendor/github.com/openshift/api/config/v1/types_feature.go
+++ b/vendor/github.com/openshift/api/config/v1/types_feature.go
@@ -8,8 +8,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Feature holds cluster-wide information about feature gates. The canonical name is `cluster`
type FeatureGate struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -35,6 +34,9 @@ var (
// Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
// your cluster may fail in an unrecoverable way.
CustomNoUpgrade FeatureSet = "CustomNoUpgrade"
+
+ // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature.
+ LatencySensitive FeatureSet = "LatencySensitive"
)
type FeatureGateSpec struct {
@@ -73,9 +75,9 @@ type FeatureGateStatus struct {
type FeatureGateList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []FeatureGate `json:"items"`
+
+ Items []FeatureGate `json:"items"`
}
type FeatureGateEnabledDisabled struct {
@@ -95,24 +97,98 @@ type FeatureGateEnabledDisabled struct {
//
// If you put an item in either of these lists, put your area and name on it so we can find owners.
var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{
- Default: {
- Enabled: []string{
- "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning
- "RotateKubeletServerCertificate", // sig-pod, sjenning
- "SupportPodPidsLimit", // sig-pod, sjenning
- },
- Disabled: []string{
- "LocalStorageCapacityIsolation", // sig-pod, sjenning
- },
+ Default: defaultFeatures,
+ CustomNoUpgrade: {
+ Enabled: []string{},
+ Disabled: []string{},
+ },
+ TechPreviewNoUpgrade: newDefaultFeatures().toFeatures(),
+ LatencySensitive: newDefaultFeatures().
+ with(
+ "TopologyManager", // sig-pod, sjenning
+ ).
+ toFeatures(),
+}
+
+var defaultFeatures = &FeatureGateEnabledDisabled{
+ Enabled: []string{
+ "RotateKubeletServerCertificate", // sig-pod, sjenning
+ "SupportPodPidsLimit", // sig-pod, sjenning
+ "NodeDisruptionExclusion", // sig-scheduling, ccoleman
+ "ServiceNodeExclusion", // sig-scheduling, ccoleman
+ "SCTPSupport", // sig-network, ccallend
},
- TechPreviewNoUpgrade: {
- Enabled: []string{
- "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning
- "RotateKubeletServerCertificate", // sig-pod, sjenning
- "SupportPodPidsLimit", // sig-pod, sjenning
- },
- Disabled: []string{
- "LocalStorageCapacityIsolation", // sig-pod, sjenning
- },
+ Disabled: []string{
+ "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman
},
}
+
+type featureSetBuilder struct {
+ forceOn []string
+ forceOff []string
+}
+
+func newDefaultFeatures() *featureSetBuilder {
+ return &featureSetBuilder{}
+}
+
+func (f *featureSetBuilder) with(forceOn ...string) *featureSetBuilder {
+ f.forceOn = append(f.forceOn, forceOn...)
+ return f
+}
+
+func (f *featureSetBuilder) without(forceOff ...string) *featureSetBuilder {
+ f.forceOff = append(f.forceOff, forceOff...)
+ return f
+}
+
+func (f *featureSetBuilder) isForcedOff(needle string) bool {
+ for _, forcedOff := range f.forceOff {
+ if needle == forcedOff {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *featureSetBuilder) isForcedOn(needle string) bool {
+ for _, forceOn := range f.forceOn {
+ if needle == forceOn {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled {
+ finalOn := []string{}
+ finalOff := []string{}
+
+ // only add the default enabled features if they haven't been explicitly set off
+ for _, defaultOn := range defaultFeatures.Enabled {
+ if !f.isForcedOff(defaultOn) {
+ finalOn = append(finalOn, defaultOn)
+ }
+ }
+ for _, currOn := range f.forceOn {
+ if f.isForcedOff(currOn) {
+ panic("coding error, you can't have features both on and off")
+ }
+ finalOn = append(finalOn, currOn)
+ }
+
+ // only add the default disabled features if they haven't been explicitly set on
+ for _, defaultOff := range defaultFeatures.Disabled {
+ if !f.isForcedOn(defaultOff) {
+ finalOff = append(finalOff, defaultOff)
+ }
+ }
+ for _, currOff := range f.forceOff {
+ finalOff = append(finalOff, currOff)
+ }
+
+ return &FeatureGateEnabledDisabled{
+ Enabled: finalOn,
+ Disabled: finalOff,
+ }
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go
index f0cf220d3..bf594c1b7 100644
--- a/vendor/github.com/openshift/api/config/v1/types_image.go
+++ b/vendor/github.com/openshift/api/config/v1/types_image.go
@@ -6,10 +6,14 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// Image holds cluster-wide information about how to handle images. The canonical name is `cluster`
+// Image governs policies related to imagestream imports and runtime configuration
+// for external registries. It allows cluster admins to configure which registries
+// OpenShift is allowed to import images from, extra CA trust bundles for external
+// registries, and policies to blacklist/whitelist registry hostnames.
+// When exposing OpenShift's image registry to the public, this also lets cluster
+// admins specify the external hostname.
type Image struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -22,7 +26,7 @@ type Image struct {
}
type ImageSpec struct {
- // AllowedRegistriesForImport limits the container image registries that normal users may import
+ // allowedRegistriesForImport limits the container image registries that normal users may import
// images from. Set this list to the registries that you trust to contain valid Docker
// images and that you want applications to be able to import from. Users with
// permission to create Images or ImageStreamMappings via the API are not affected by
@@ -38,14 +42,14 @@ type ImageSpec struct {
// +optional
ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
- // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
- // should be trusted during imagestream import, pod image pull, and imageregistry
- // pullthrough.
+ // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that
+ // should be trusted during imagestream import, pod image pull, build image pull, and
+ // imageregistry pullthrough.
// The namespace for this config map is openshift-config.
// +optional
AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
- // RegistrySources contains configuration that determines how the container runtime
+ // registrySources contains configuration that determines how the container runtime
// should treat individual registries when accessing images for builds+pods. (e.g.
// whether or not to allow insecure access). It does not contain configuration for the
// internal cluster registry.
@@ -55,10 +59,10 @@ type ImageSpec struct {
type ImageStatus struct {
- // this value is set by the image registry operator which controls the internal registry hostname
- // InternalRegistryHostname sets the hostname for the default internal image
+ // internalRegistryHostname sets the hostname for the default internal image
// registry. The value must be in "hostname[:port]" format.
- // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY
+ // This value is set by the image registry operator which controls the internal registry
+ // hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY
// environment variable but this setting overrides the environment variable.
// +optional
InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"`
@@ -75,19 +79,19 @@ type ImageStatus struct {
type ImageList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Image `json:"items"`
+
+ Items []Image `json:"items"`
}
// RegistryLocation contains a location of the registry specified by the registry domain
// name. The domain name might include wildcards, like '*' or '??'.
type RegistryLocation struct {
- // DomainName specifies a domain name for the registry
+ // domainName specifies a domain name for the registry
// In case the registry use non-standard (80 or 443) port, the port should be included
// in the domain name as well.
DomainName string `json:"domainName"`
- // Insecure indicates whether the registry is secure (https) or insecure (http)
+ // insecure indicates whether the registry is secure (https) or insecure (http)
// By default (if not specified) the registry is assumed as secure.
// +optional
Insecure bool `json:"insecure,omitempty"`
@@ -95,15 +99,15 @@ type RegistryLocation struct {
// RegistrySources holds cluster-wide information about how to handle the registries config.
type RegistrySources struct {
- // InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.
+ // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.
// +optional
InsecureRegistries []string `json:"insecureRegistries,omitempty"`
- // BlockedRegistries are blacklisted from image pull/push. All other registries are allowed.
+ // blockedRegistries are blacklisted from image pull/push. All other registries are allowed.
//
// Only one of BlockedRegistries or AllowedRegistries may be set.
// +optional
BlockedRegistries []string `json:"blockedRegistries,omitempty"`
- // AllowedRegistries are whitelisted for image pull/push. All other registries are blocked.
+ // allowedRegistries are whitelisted for image pull/push. All other registries are blocked.
//
// Only one of BlockedRegistries or AllowedRegistries may be set.
// +optional
diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
index 4632e6ada..ac1e5048e 100644
--- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
+++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
@@ -8,8 +8,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`
type Infrastructure struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -130,6 +129,10 @@ type PlatformStatus struct {
// OpenStack contains settings specific to the OpenStack infrastructure provider.
// +optional
OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"`
+
+ // Ovirt contains settings specific to the oVirt infrastructure provider.
+ // +optional
+ Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"`
}
// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.
@@ -142,6 +145,11 @@ type AWSPlatformStatus struct {
type AzurePlatformStatus struct {
// resourceGroupName is the Resource Group for new Azure resources created for the cluster.
ResourceGroupName string `json:"resourceGroupName"`
+
+ // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster.
+ // If empty, the value is same as ResourceGroupName.
+ // +optional
+ NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"`
}
// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.
@@ -154,6 +162,8 @@ type GCPPlatformStatus struct {
}
// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.
+// For more information about the network architecture used with the BareMetal platform type, see:
+// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md
type BareMetalPlatformStatus struct {
// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
// by components inside the cluster, like kubelets using the infrastructure rather
@@ -199,12 +209,33 @@ type OpenStackPlatformStatus struct {
NodeDNSIP string `json:"nodeDNSIP,omitempty"`
}
+// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.
+type OvirtPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // nodeDNSIP is the IP address for the internal DNS used by the
+ // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
+ // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
+ // oVirt deployments. In order to minimize necessary changes to the
+ // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
+ // to the nodes in the cluster.
+ NodeDNSIP string `json:"nodeDNSIP,omitempty"`
+}
+
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// InfrastructureList is
type InfrastructureList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Infrastructure `json:"items"`
+
+ Items []Infrastructure `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go
index 484a1af0b..0216919ad 100644
--- a/vendor/github.com/openshift/api/config/v1/types_ingress.go
+++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go
@@ -6,11 +6,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`
-// TODO this object is an example of a possible grouping and is subject to change or removal
+// Ingress holds cluster-wide information about ingress, including the default ingress domain
+// used for routes. The canonical name is `cluster`.
type Ingress struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -24,8 +23,13 @@ type Ingress struct {
type IngressSpec struct {
// domain is used to generate a default host name for a route when the
- // route's host name is empty. The generated host name will follow this
+ // route's host name is empty. The generated host name will follow this
// pattern: "<route-name>.<route-namespace>.<domain>".
+ //
+ // It is also used as the default wildcard domain suffix for ingress. The
+ // default ingresscontroller domain will follow this pattern: "*.<domain>".
+ //
+ // Once set, changing domain is not currently supported.
Domain string `json:"domain"`
}
@@ -36,7 +40,7 @@ type IngressStatus struct {
type IngressList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Ingress `json:"items"`
+
+ Items []Ingress `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go
index a60c5f7dc..a09c5fe8e 100644
--- a/vendor/github.com/openshift/api/config/v1/types_network.go
+++ b/vendor/github.com/openshift/api/config/v1/types_network.go
@@ -6,14 +6,16 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// Network holds cluster-wide information about Network. The canonical name is `cluster`
-// TODO this object is an example of a possible grouping and is subject to change or removal
+// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc.
+// Please view network.spec for an explanation on what applies when configuring this resource.
type Network struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration.
+ // As a general rule, this SHOULD NOT be read directly. Instead, you should
+ // consume the NetworkStatus, as it indicates the currently deployed configuration.
+ // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
// +kubebuilder:validation:Required
// +required
Spec NetworkSpec `json:"spec"`
@@ -25,14 +27,15 @@ type Network struct {
// NetworkSpec is the desired network configuration.
// As a general rule, this SHOULD NOT be read directly. Instead, you should
// consume the NetworkStatus, as it indicates the currently deployed configuration.
-// Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after
-// installation is not supported.
+// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
type NetworkSpec struct {
// IP address pool to use for pod IPs.
+ // This field is immutable after installation.
ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"`
// IP address pool for services.
// Currently, we only support a single entry here.
+ // This field is immutable after installation.
ServiceNetwork []string `json:"serviceNetwork"`
// NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN).
@@ -40,10 +43,12 @@ type NetworkSpec struct {
// or else no networking will be installed.
// Currently supported values are:
// - OpenShiftSDN
+ // This field is immutable after installation.
NetworkType string `json:"networkType"`
// externalIP defines configuration for controllers that
- // affect Service.ExternalIP
+ // affect Service.ExternalIP. If nil, then ExternalIP is
+ // not allowed to be set.
// +optional
ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"`
}
@@ -71,6 +76,7 @@ type ClusterNetworkEntry struct {
CIDR string `json:"cidr"`
// The size (prefix) of block to allocate to each node.
+ // +kubebuilder:validation:Minimum=0
HostPrefix uint32 `json:"hostPrefix"`
}
@@ -78,8 +84,7 @@ type ClusterNetworkEntry struct {
// of a Service resource.
type ExternalIPConfig struct {
// policy is a set of restrictions applied to the ExternalIP field.
- // If nil, any value is allowed for an ExternalIP. If the empty/zero
- // policy is supplied, then ExternalIP is not allowed to be set.
+ // If nil or empty, then ExternalIP is not allowed to be set.
// +optional
Policy *ExternalIPPolicy `json:"policy,omitempty"`
@@ -111,7 +116,7 @@ type ExternalIPPolicy struct {
type NetworkList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Network `json:"items"`
+
+ Items []Network `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
index cf821f9e3..1d998bf37 100644
--- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
+++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
@@ -6,10 +6,19 @@ import (
// OperatorHubSpec defines the desired state of OperatorHub
type OperatorHubSpec struct {
+ // disableAllDefaultSources allows you to disable all the default hub
+ // sources. If this is true, a specific entry in sources can be used to
+ // enable a default source. If this is false, a specific entry in
+ // sources can be used to disable or enable a default source.
+ // +optional
+ DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"`
// sources is the list of default hub sources and their configuration.
- // If the list is empty, it indicates that the default hub sources are
- // enabled on the cluster. The list of default hub sources and their
- // current state will always be reflected in the status block.
+ // If the list is empty, it implies that the default hub sources are
+ // enabled on the cluster unless disableAllDefaultSources is true.
+ // If disableAllDefaultSources is true and sources is not empty,
+ // the configuration present in sources will take precedence. The list of
+ // default hub sources and their current state will always be reflected in
+ // the status block.
// +optional
Sources []HubSource `json:"sources,omitempty"`
}
@@ -61,9 +70,9 @@ type HubSource struct {
// HubSourceStatus is used to reflect the current state of applying the
// configuration to a default source
type HubSourceStatus struct {
- HubSource
+ HubSource `json:",omitempty"`
// status indicates success or failure in applying the configuration
- Status string `json:"status"`
+ Status string `json:"status,omitempty"`
// message provides more information regarding failures
Message string `json:"message,omitempty"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go
index 61152a6f7..244ce3ef8 100644
--- a/vendor/github.com/openshift/api/config/v1/types_project.go
+++ b/vendor/github.com/openshift/api/config/v1/types_project.go
@@ -8,8 +8,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Project holds cluster-wide information about Project. The canonical name is `cluster`
type Project struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -49,7 +48,7 @@ type ProjectStatus struct {
type ProjectList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Project `json:"items"`
+
+ Items []Project `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go
index 1413a48ca..93f4c487e 100644
--- a/vendor/github.com/openshift/api/config/v1/types_proxy.go
+++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go
@@ -12,6 +12,7 @@ import (
type Proxy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
+
// Spec holds user-settable values for the proxy configuration
// +kubebuilder:validation:Required
// +required
@@ -83,7 +84,7 @@ type ProxyStatus struct {
type ProxyList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Proxy `json:"items"`
+
+ Items []Proxy `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
index 9b8fa3a52..d5bf0c362 100644
--- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go
+++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
@@ -9,8 +9,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler
// and influence its placement decisions. The canonical name for this config is `cluster`.
type Scheduler struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
+ metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// spec holds user settable values for configuration
@@ -69,7 +68,7 @@ type SchedulerStatus struct {
type SchedulerList struct {
metav1.TypeMeta `json:",inline"`
- // Standard object's metadata.
metav1.ListMeta `json:"metadata"`
- Items []Scheduler `json:"items"`
+
+ Items []Scheduler `json:"items"`
}
diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
new file mode 100644
index 000000000..ea788dc16
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
@@ -0,0 +1,260 @@
+package v1
+
+// TLSSecurityProfile defines the schema for a TLS security profile. This object
+// is used by operators to apply TLS security settings to operands.
+// +union
+type TLSSecurityProfile struct {
+ // type is one of Old, Intermediate, Modern or Custom. Custom provides
+ // the ability to specify individual TLS security profile parameters.
+ // Old, Intermediate and Modern are TLS security profiles based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ //
+ // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers
+ // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be
+ // reduced.
+ //
+ // Note that the Modern profile is currently not supported because it is not
+ // yet well adopted by common software libraries.
+ //
+ // +unionDiscriminator
+ // +optional
+ Type TLSProfileType `json:"type"`
+ // old is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ // - TLS_AES_128_GCM_SHA256
+ // - TLS_AES_256_GCM_SHA384
+ // - TLS_CHACHA20_POLY1305_SHA256
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ // - ECDHE-ECDSA-AES256-GCM-SHA384
+ // - ECDHE-RSA-AES256-GCM-SHA384
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ // - DHE-RSA-AES128-GCM-SHA256
+ // - DHE-RSA-AES256-GCM-SHA384
+ // - DHE-RSA-CHACHA20-POLY1305
+ // - ECDHE-ECDSA-AES128-SHA256
+ // - ECDHE-RSA-AES128-SHA256
+ // - ECDHE-ECDSA-AES128-SHA
+ // - ECDHE-RSA-AES128-SHA
+ // - ECDHE-ECDSA-AES256-SHA384
+ // - ECDHE-RSA-AES256-SHA384
+ // - ECDHE-ECDSA-AES256-SHA
+ // - ECDHE-RSA-AES256-SHA
+ // - DHE-RSA-AES128-SHA256
+ // - DHE-RSA-AES256-SHA256
+ // - AES128-GCM-SHA256
+ // - AES256-GCM-SHA384
+ // - AES128-SHA256
+ // - AES256-SHA256
+ // - AES128-SHA
+ // - AES256-SHA
+ // - DES-CBC3-SHA
+ // minTLSVersion: TLSv1.0
+ //
+ // +optional
+ // +nullable
+ Old *OldTLSProfile `json:"old,omitempty"`
+ // intermediate is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ // - TLS_AES_128_GCM_SHA256
+ // - TLS_AES_256_GCM_SHA384
+ // - TLS_CHACHA20_POLY1305_SHA256
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ // - ECDHE-ECDSA-AES256-GCM-SHA384
+ // - ECDHE-RSA-AES256-GCM-SHA384
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ // - DHE-RSA-AES128-GCM-SHA256
+ // - DHE-RSA-AES256-GCM-SHA384
+ // minTLSVersion: TLSv1.2
+ //
+ // +optional
+ // +nullable
+ Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"`
+ // modern is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ // - TLS_AES_128_GCM_SHA256
+ // - TLS_AES_256_GCM_SHA384
+ // - TLS_CHACHA20_POLY1305_SHA256
+ // minTLSVersion: TLSv1.3
+ //
+ // NOTE: Currently unsupported.
+ //
+ // +optional
+ // +nullable
+ Modern *ModernTLSProfile `json:"modern,omitempty"`
+ // custom is a user-defined TLS security profile. Be extremely careful using a custom
+ // profile as invalid configurations can be catastrophic. An example custom profile
+ // looks like this:
+ //
+ // ciphers:
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ // minTLSVersion: TLSv1.1
+ //
+ // +optional
+ // +nullable
+ Custom *CustomTLSProfile `json:"custom,omitempty"`
+}
+
+// OldTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+type OldTLSProfile struct{}
+
+// IntermediateTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
+type IntermediateTLSProfile struct{}
+
+// ModernTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+type ModernTLSProfile struct{}
+
+// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful
+// using a custom TLS profile as invalid configurations can be catastrophic.
+type CustomTLSProfile struct {
+ TLSProfileSpec `json:",inline"`
+}
+
+// TLSProfileType defines a TLS security profile type.
+type TLSProfileType string
+
+const (
+ // Old is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ TLSProfileOldType TLSProfileType = "Old"
+ // Intermediate is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
+ TLSProfileIntermediateType TLSProfileType = "Intermediate"
+ // Modern is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ TLSProfileModernType TLSProfileType = "Modern"
+ // Custom is a TLS security profile that allows for user-defined parameters.
+ TLSProfileCustomType TLSProfileType = "Custom"
+)
+
+// TLSProfileSpec is the desired behavior of a TLSSecurityProfile.
+type TLSProfileSpec struct {
+ // ciphers is used to specify the cipher algorithms that are negotiated
+ // during the TLS handshake. Operators may remove entries their operands
+ // do not support. For example, to use DES-CBC3-SHA (yaml):
+ //
+ // ciphers:
+ // - DES-CBC3-SHA
+ //
+ Ciphers []string `json:"ciphers"`
+ // minTLSVersion is used to specify the minimal version of the TLS protocol
+ // that is negotiated during the TLS handshake. For example, to use TLS
+ // versions 1.1, 1.2 and 1.3 (yaml):
+ //
+ // minTLSVersion: TLSv1.1
+ //
+ // NOTE: currently the highest minTLSVersion allowed is VersionTLS12
+ //
+ MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"`
+}
+
+// TLSProtocolVersion is a way to specify the protocol version used for TLS connections.
+// Protocol versions are based on the following most common TLS configurations:
+//
+// https://ssl-config.mozilla.org/
+//
+// Note that SSLv3.0 is not a supported protocol version due to well known
+// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE
+type TLSProtocolVersion string
+
+const (
+ // VersionTLSv10 is version 1.0 of the TLS security protocol.
+ VersionTLS10 TLSProtocolVersion = "VersionTLS10"
+ // VersionTLSv11 is version 1.1 of the TLS security protocol.
+ VersionTLS11 TLSProtocolVersion = "VersionTLS11"
+ // VersionTLSv12 is version 1.2 of the TLS security protocol.
+ VersionTLS12 TLSProtocolVersion = "VersionTLS12"
+ // VersionTLSv13 is version 1.3 of the TLS security protocol.
+ VersionTLS13 TLSProtocolVersion = "VersionTLS13"
+)
+
+// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec.
+//
+// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all
+// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail,
+// just be sure to whitelist only and everything will be ok.
+var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{
+ TLSProfileOldType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "ECDHE-ECDSA-AES128-GCM-SHA256",
+ "ECDHE-RSA-AES128-GCM-SHA256",
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-CHACHA20-POLY1305",
+ "ECDHE-RSA-CHACHA20-POLY1305",
+ "DHE-RSA-AES128-GCM-SHA256",
+ "DHE-RSA-AES256-GCM-SHA384",
+ "DHE-RSA-CHACHA20-POLY1305",
+ "ECDHE-ECDSA-AES128-SHA256",
+ "ECDHE-RSA-AES128-SHA256",
+ "ECDHE-ECDSA-AES128-SHA",
+ "ECDHE-RSA-AES128-SHA",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDHE-ECDSA-AES256-SHA",
+ "ECDHE-RSA-AES256-SHA",
+ "DHE-RSA-AES128-SHA256",
+ "DHE-RSA-AES256-SHA256",
+ "AES128-GCM-SHA256",
+ "AES256-GCM-SHA384",
+ "AES128-SHA256",
+ "AES256-SHA256",
+ "AES128-SHA",
+ "AES256-SHA",
+ "DES-CBC3-SHA",
+ },
+ MinTLSVersion: VersionTLS10,
+ },
+ TLSProfileIntermediateType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "ECDHE-ECDSA-AES128-GCM-SHA256",
+ "ECDHE-RSA-AES128-GCM-SHA256",
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-CHACHA20-POLY1305",
+ "ECDHE-RSA-CHACHA20-POLY1305",
+ "DHE-RSA-AES128-GCM-SHA256",
+ "DHE-RSA-AES256-GCM-SHA384",
+ },
+ MinTLSVersion: VersionTLS12,
+ },
+ TLSProfileModernType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ },
+ MinTLSVersion: VersionTLS13,
+ },
+}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
index 3d44627f9..37888a939 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
@@ -38,10 +38,26 @@ func (in *APIServer) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption.
+func (in *APIServerEncryption) DeepCopy() *APIServerEncryption {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerEncryption)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIServerList) DeepCopyInto(out *APIServerList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]APIServer, len(*in))
@@ -125,6 +141,12 @@ func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ out.Encryption = in.Encryption
+ if in.TLSSecurityProfile != nil {
+ in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile
+ *out = new(TLSSecurityProfile)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -269,7 +291,7 @@ func (in *Authentication) DeepCopyObject() runtime.Object {
func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Authentication, len(*in))
@@ -456,7 +478,7 @@ func (in *BuildDefaults) DeepCopy() *BuildDefaults {
func (in *BuildList) DeepCopyInto(out *BuildList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Build, len(*in))
@@ -619,7 +641,7 @@ func (in *ClusterOperator) DeepCopyObject() runtime.Object {
func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterOperator, len(*in))
@@ -747,7 +769,7 @@ func (in *ClusterVersion) DeepCopyObject() runtime.Object {
func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterVersion, len(*in))
@@ -934,7 +956,7 @@ func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication {
func (in *ConsoleList) DeepCopyInto(out *ConsoleList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Console, len(*in))
@@ -1023,6 +1045,23 @@ func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) {
+ *out = *in
+ in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile.
+func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DNS) DeepCopyInto(out *DNS) {
*out = *in
out.TypeMeta = in.TypeMeta
@@ -1054,7 +1093,7 @@ func (in *DNS) DeepCopyObject() runtime.Object {
func (in *DNSList) DeepCopyInto(out *DNSList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DNS, len(*in))
@@ -1329,7 +1368,7 @@ func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled {
func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]FeatureGate, len(*in))
@@ -1730,7 +1769,7 @@ func (in *ImageLabel) DeepCopy() *ImageLabel {
func (in *ImageList) DeepCopyInto(out *ImageList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Image, len(*in))
@@ -1840,7 +1879,7 @@ func (in *Infrastructure) DeepCopyObject() runtime.Object {
func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Infrastructure, len(*in))
@@ -1939,7 +1978,7 @@ func (in *Ingress) DeepCopyObject() runtime.Object {
func (in *IngressList) DeepCopyInto(out *IngressList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Ingress, len(*in))
@@ -2001,6 +2040,22 @@ func (in *IngressStatus) DeepCopy() *IngressStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile.
+func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(IntermediateTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) {
*out = *in
out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
@@ -2109,6 +2164,22 @@ func (in *LeaderElection) DeepCopy() *LeaderElection {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile.
+func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(ModernTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) {
*out = *in
if in.Names != nil {
@@ -2162,7 +2233,7 @@ func (in *Network) DeepCopyObject() runtime.Object {
func (in *NetworkList) DeepCopyInto(out *NetworkList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Network, len(*in))
@@ -2280,7 +2351,7 @@ func (in *OAuth) DeepCopyObject() runtime.Object {
func (in *OAuthList) DeepCopyInto(out *OAuthList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OAuth, len(*in))
@@ -2405,6 +2476,22 @@ func (in *ObjectReference) DeepCopy() *ObjectReference {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile.
+func (in *OldTLSProfile) DeepCopy() *OldTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(OldTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) {
*out = *in
if in.PreferredUsername != nil {
@@ -2530,7 +2617,7 @@ func (in *OperatorHub) DeepCopyObject() runtime.Object {
func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OperatorHub, len(*in))
@@ -2602,6 +2689,22 @@ func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus.
+func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OvirtPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
*out = *in
if in.AWS != nil {
@@ -2629,6 +2732,11 @@ func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
*out = new(OpenStackPlatformStatus)
**out = **in
}
+ if in.Ovirt != nil {
+ in, out := &in.Ovirt, &out.Ovirt
+ *out = new(OvirtPlatformStatus)
+ **out = **in
+ }
return
}
@@ -2674,7 +2782,7 @@ func (in *Project) DeepCopyObject() runtime.Object {
func (in *ProjectList) DeepCopyInto(out *ProjectList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Project, len(*in))
@@ -2768,7 +2876,7 @@ func (in *Proxy) DeepCopyObject() runtime.Object {
func (in *ProxyList) DeepCopyInto(out *ProxyList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Proxy, len(*in))
@@ -2973,7 +3081,7 @@ func (in *Scheduler) DeepCopyObject() runtime.Object {
func (in *SchedulerList) DeepCopyInto(out *SchedulerList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Scheduler, len(*in))
@@ -3114,6 +3222,63 @@ func (in *StringSourceSpec) DeepCopy() *StringSourceSpec {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) {
+ *out = *in
+ if in.Ciphers != nil {
+ in, out := &in.Ciphers, &out.Ciphers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec.
+func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) {
+ *out = *in
+ if in.Old != nil {
+ in, out := &in.Old, &out.Old
+ *out = new(OldTLSProfile)
+ **out = **in
+ }
+ if in.Intermediate != nil {
+ in, out := &in.Intermediate, &out.Intermediate
+ *out = new(IntermediateTLSProfile)
+ **out = **in
+ }
+ if in.Modern != nil {
+ in, out := &in.Modern, &out.Modern
+ *out = new(ModernTLSProfile)
+ **out = **in
+ }
+ if in.Custom != nil {
+ in, out := &in.Custom, &out.Custom
+ *out = new(CustomTLSProfile)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile.
+func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSSecurityProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TemplateReference) DeepCopyInto(out *TemplateReference) {
*out = *in
return
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
index 6bd877a4f..2d6b19d2d 100644
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
@@ -251,6 +251,14 @@ func (APIServer) SwaggerDoc() map[string]string {
return map_APIServer
}
+var map_APIServerEncryption = map[string]string{
+ "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io",
+}
+
+func (APIServerEncryption) SwaggerDoc() map[string]string {
+ return map_APIServerEncryption
+}
+
var map_APIServerNamedServingCert = map[string]string{
"": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.",
"names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.",
@@ -273,6 +281,8 @@ var map_APIServerSpec = map[string]string{
"servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.",
"clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.",
"additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.",
+ "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.",
+ "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old and Intermediate profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.",
}
func (APIServerSpec) SwaggerDoc() map[string]string {
@@ -280,24 +290,15 @@ func (APIServerSpec) SwaggerDoc() map[string]string {
}
var map_Authentication = map[string]string{
- "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Authentication) SwaggerDoc() map[string]string {
return map_Authentication
}
-var map_AuthenticationList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (AuthenticationList) SwaggerDoc() map[string]string {
- return map_AuthenticationList
-}
-
var map_AuthenticationSpec = map[string]string{
"type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.",
"oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.",
@@ -326,7 +327,7 @@ func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
}
var map_Build = map[string]string{
- "": "Build holds cluster-wide information on how to handle builds. The canonical name is `cluster`",
+ "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"",
"spec": "Spec holds user-settable values for the build controller configuration",
}
@@ -346,14 +347,6 @@ func (BuildDefaults) SwaggerDoc() map[string]string {
return map_BuildDefaults
}
-var map_BuildList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (BuildList) SwaggerDoc() map[string]string {
- return map_BuildList
-}
-
var map_BuildOverrides = map[string]string{
"imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
"nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node",
@@ -365,7 +358,7 @@ func (BuildOverrides) SwaggerDoc() map[string]string {
}
var map_BuildSpec = map[string]string{
- "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.",
+ "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.",
"buildDefaults": "BuildDefaults controls the default information for Builds",
"buildOverrides": "BuildOverrides controls override settings for builds",
}
@@ -385,8 +378,8 @@ func (ImageLabel) SwaggerDoc() map[string]string {
var map_ClusterOperator = map[string]string{
"": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.",
- "spec": "spec hold the intent of how this operator should behave.",
- "status": "status holds the information about the state of an operator. It is consistent with status information across the kube ecosystem.",
+ "spec": "spec holds configuration that could apply to any operator.",
+ "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.",
}
func (ClusterOperator) SwaggerDoc() map[string]string {
@@ -411,8 +404,8 @@ func (ClusterOperatorSpec) SwaggerDoc() map[string]string {
var map_ClusterOperatorStatus = map[string]string{
"": "ClusterOperatorStatus provides information about the status of the operator.",
- "conditions": "conditions describes the state of the operator's reconciliation functionality.",
- "versions": "versions is a slice of operand version tuples. Operators which manage multiple operands will have multiple entries in the array. If an operator is Available, it must have at least one entry. You must report the version of the operator itself with the name \"operator\".",
+ "conditions": "conditions describes the state of the operator's managed and monitored components.",
+ "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.",
"relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces",
"extension": "extension contains any additional status information specific to the operator which owns this status object.",
}
@@ -422,11 +415,11 @@ func (ClusterOperatorStatus) SwaggerDoc() map[string]string {
}
var map_ClusterOperatorStatusCondition = map[string]string{
- "": "ClusterOperatorStatusCondition represents the state of the operator's reconciliation functionality.",
- "type": "type specifies the state of the operator's reconciliation functionality.",
+ "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.",
+ "type": "type specifies the aspect reported by this condition.",
"status": "status of the condition, one of True, False, Unknown.",
- "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status object.",
- "reason": "reason is the reason for the condition's last transition. Reasons are CamelCase",
+ "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.",
+ "reason": "reason is the CamelCase reason for the condition's current status.",
"message": "message provides additional information about the current condition. This is only to be consumed by humans.",
}
@@ -448,7 +441,7 @@ func (ObjectReference) SwaggerDoc() map[string]string {
var map_OperandVersion = map[string]string{
"name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.",
- "version": "version indicates which version of a particular operand is currently being manage. It must always match the Available condition. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0",
+ "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0",
}
func (OperandVersion) SwaggerDoc() map[string]string {
@@ -490,7 +483,7 @@ var map_ClusterVersionStatus = map[string]string{
"": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.",
"desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.",
"history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.",
- "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent from a previous version.",
+ "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.",
"versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.",
"conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.",
"availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.",
@@ -539,10 +532,9 @@ func (UpdateHistory) SwaggerDoc() map[string]string {
}
var map_Console = map[string]string{
- "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Console) SwaggerDoc() map[string]string {
@@ -558,14 +550,6 @@ func (ConsoleAuthentication) SwaggerDoc() map[string]string {
return map_ConsoleAuthentication
}
-var map_ConsoleList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (ConsoleList) SwaggerDoc() map[string]string {
- return map_ConsoleList
-}
-
var map_ConsoleSpec = map[string]string{
"": "ConsoleSpec is the specification of the desired behavior of the Console.",
}
@@ -584,24 +568,15 @@ func (ConsoleStatus) SwaggerDoc() map[string]string {
}
var map_DNS = map[string]string{
- "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (DNS) SwaggerDoc() map[string]string {
return map_DNS
}
-var map_DNSList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (DNSList) SwaggerDoc() map[string]string {
- return map_DNSList
-}
-
var map_DNSSpec = map[string]string{
"baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.",
"publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.",
@@ -632,24 +607,15 @@ func (CustomFeatureGates) SwaggerDoc() map[string]string {
}
var map_FeatureGate = map[string]string{
- "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (FeatureGate) SwaggerDoc() map[string]string {
return map_FeatureGate
}
-var map_FeatureGateList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (FeatureGateList) SwaggerDoc() map[string]string {
- return map_FeatureGateList
-}
-
var map_FeatureGateSelection = map[string]string{
"featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.",
"customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.",
@@ -660,29 +626,20 @@ func (FeatureGateSelection) SwaggerDoc() map[string]string {
}
var map_Image = map[string]string{
- "": "Image holds cluster-wide information about how to handle images. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to blacklist/whitelist registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Image) SwaggerDoc() map[string]string {
return map_Image
}
-var map_ImageList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (ImageList) SwaggerDoc() map[string]string {
- return map_ImageList
-}
-
var map_ImageSpec = map[string]string{
- "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
+ "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
"externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
- "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.",
- "registrySources": "RegistrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.",
+ "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.",
+ "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.",
}
func (ImageSpec) SwaggerDoc() map[string]string {
@@ -690,7 +647,7 @@ func (ImageSpec) SwaggerDoc() map[string]string {
}
var map_ImageStatus = map[string]string{
- "internalRegistryHostname": "this value is set by the image registry operator which controls the internal registry hostname InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.",
+ "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.",
"externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
}
@@ -700,8 +657,8 @@ func (ImageStatus) SwaggerDoc() map[string]string {
var map_RegistryLocation = map[string]string{
"": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.",
- "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
- "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
+ "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
+ "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
}
func (RegistryLocation) SwaggerDoc() map[string]string {
@@ -710,9 +667,9 @@ func (RegistryLocation) SwaggerDoc() map[string]string {
var map_RegistrySources = map[string]string{
"": "RegistrySources holds cluster-wide information about how to handle the registries config.",
- "insecureRegistries": "InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.",
- "blockedRegistries": "BlockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
- "allowedRegistries": "AllowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
+ "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.",
+ "blockedRegistries": "blockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
+ "allowedRegistries": "allowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
}
func (RegistrySources) SwaggerDoc() map[string]string {
@@ -729,8 +686,9 @@ func (AWSPlatformStatus) SwaggerDoc() map[string]string {
}
var map_AzurePlatformStatus = map[string]string{
- "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.",
- "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.",
+ "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.",
+ "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.",
+ "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.",
}
func (AzurePlatformStatus) SwaggerDoc() map[string]string {
@@ -738,7 +696,7 @@ func (AzurePlatformStatus) SwaggerDoc() map[string]string {
}
var map_BareMetalPlatformStatus = map[string]string{
- "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.",
+ "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md",
"apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
"ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
"nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
@@ -759,10 +717,9 @@ func (GCPPlatformStatus) SwaggerDoc() map[string]string {
}
var map_Infrastructure = map[string]string{
- "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Infrastructure) SwaggerDoc() map[string]string {
@@ -770,8 +727,7 @@ func (Infrastructure) SwaggerDoc() map[string]string {
}
var map_InfrastructureList = map[string]string{
- "": "InfrastructureList is",
- "metadata": "Standard object's metadata.",
+ "": "InfrastructureList is",
}
func (InfrastructureList) SwaggerDoc() map[string]string {
@@ -813,6 +769,17 @@ func (OpenStackPlatformStatus) SwaggerDoc() map[string]string {
return map_OpenStackPlatformStatus
}
+var map_OvirtPlatformStatus = map[string]string{
+ "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
+ "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for oVirt deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
+}
+
+func (OvirtPlatformStatus) SwaggerDoc() map[string]string {
+ return map_OvirtPlatformStatus
+}
+
var map_PlatformStatus = map[string]string{
"": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.",
"type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.",
@@ -821,6 +788,7 @@ var map_PlatformStatus = map[string]string{
"gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
"baremetal": "BareMetal contains settings specific to the BareMetal platform.",
"openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
+ "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.",
}
func (PlatformStatus) SwaggerDoc() map[string]string {
@@ -828,26 +796,17 @@ func (PlatformStatus) SwaggerDoc() map[string]string {
}
var map_Ingress = map[string]string{
- "": "Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Ingress) SwaggerDoc() map[string]string {
return map_Ingress
}
-var map_IngressList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (IngressList) SwaggerDoc() map[string]string {
- return map_IngressList
-}
-
var map_IngressSpec = map[string]string{
- "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"<route-name>.<route-namespace>.<domain>\".",
+ "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"<route-name>.<route-namespace>.<domain>\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.<domain>\".\n\nOnce set, changing domain is not currently supported.",
}
func (IngressSpec) SwaggerDoc() map[string]string {
@@ -866,7 +825,7 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
var map_ExternalIPConfig = map[string]string{
"": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.",
- "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil, any value is allowed for an ExternalIP. If the empty/zero policy is supplied, then ExternalIP is not allowed to be set.",
+ "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.",
"autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.",
}
@@ -885,30 +844,21 @@ func (ExternalIPPolicy) SwaggerDoc() map[string]string {
}
var map_Network = map[string]string{
- "": "Network holds cluster-wide information about Network. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration.",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.",
+ "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Network) SwaggerDoc() map[string]string {
return map_Network
}
-var map_NetworkList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (NetworkList) SwaggerDoc() map[string]string {
- return map_NetworkList
-}
-
var map_NetworkSpec = map[string]string{
- "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after installation is not supported.",
- "clusterNetwork": "IP address pool to use for pod IPs.",
- "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.",
- "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN",
- "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP",
+ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
+ "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.",
+ "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.",
+ "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.",
+ "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.",
}
func (NetworkSpec) SwaggerDoc() map[string]string {
@@ -1183,8 +1133,9 @@ func (OperatorHubList) SwaggerDoc() map[string]string {
}
var map_OperatorHubSpec = map[string]string{
- "": "OperatorHubSpec defines the desired state of OperatorHub",
- "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it indicates that the default hub sources are enabled on the cluster. The list of default hub sources and their current state will always be reflected in the status block.",
+ "": "OperatorHubSpec defines the desired state of OperatorHub",
+ "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.",
+ "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.",
}
func (OperatorHubSpec) SwaggerDoc() map[string]string {
@@ -1201,24 +1152,15 @@ func (OperatorHubStatus) SwaggerDoc() map[string]string {
}
var map_Project = map[string]string{
- "": "Project holds cluster-wide information about Project. The canonical name is `cluster`",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Project holds cluster-wide information about Project. The canonical name is `cluster`",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Project) SwaggerDoc() map[string]string {
return map_Project
}
-var map_ProjectList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (ProjectList) SwaggerDoc() map[string]string {
- return map_ProjectList
-}
-
var map_ProjectSpec = map[string]string{
"": "ProjectSpec holds the project creation configuration.",
"projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint",
@@ -1248,14 +1190,6 @@ func (Proxy) SwaggerDoc() map[string]string {
return map_Proxy
}
-var map_ProxyList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (ProxyList) SwaggerDoc() map[string]string {
- return map_ProxyList
-}
-
var map_ProxySpec = map[string]string{
"": "ProxySpec contains cluster proxy creation configuration.",
"httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.",
@@ -1281,24 +1215,15 @@ func (ProxyStatus) SwaggerDoc() map[string]string {
}
var map_Scheduler = map[string]string{
- "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.",
- "metadata": "Standard object's metadata.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
+ "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
}
func (Scheduler) SwaggerDoc() map[string]string {
return map_Scheduler
}
-var map_SchedulerList = map[string]string{
- "metadata": "Standard object's metadata.",
-}
-
-func (SchedulerList) SwaggerDoc() map[string]string {
- return map_SchedulerList
-}
-
var map_SchedulerSpec = map[string]string{
"policy": "policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.",
"defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces without a specified nodeSelector value. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.",
@@ -1309,4 +1234,59 @@ func (SchedulerSpec) SwaggerDoc() map[string]string {
return map_SchedulerSpec
}
+var map_CustomTLSProfile = map[string]string{
+ "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.",
+}
+
+func (CustomTLSProfile) SwaggerDoc() map[string]string {
+ return map_CustomTLSProfile
+}
+
+var map_IntermediateTLSProfile = map[string]string{
+ "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29",
+}
+
+func (IntermediateTLSProfile) SwaggerDoc() map[string]string {
+ return map_IntermediateTLSProfile
+}
+
+var map_ModernTLSProfile = map[string]string{
+ "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility",
+}
+
+func (ModernTLSProfile) SwaggerDoc() map[string]string {
+ return map_ModernTLSProfile
+}
+
+var map_OldTLSProfile = map[string]string{
+ "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility",
+}
+
+func (OldTLSProfile) SwaggerDoc() map[string]string {
+ return map_OldTLSProfile
+}
+
+var map_TLSProfileSpec = map[string]string{
+ "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.",
+ "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA",
+ "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12",
+}
+
+func (TLSProfileSpec) SwaggerDoc() map[string]string {
+ return map_TLSProfileSpec
+}
+
+var map_TLSSecurityProfile = map[string]string{
+ "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.",
+ "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.",
+ "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0",
+ "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: TLSv1.2",
+ "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3\n\nNOTE: Currently unsupported.",
+ "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1",
+}
+
+func (TLSSecurityProfile) SwaggerDoc() map[string]string {
+ return map_TLSSecurityProfile
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/pkg/errors/cause.go b/vendor/github.com/pkg/errors/cause.go
deleted file mode 100644
index 566f88bb0..000000000
--- a/vendor/github.com/pkg/errors/cause.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build !go1.13
-
-package errors
-
-// Cause recursively unwraps an error chain and returns the underlying cause of
-// the error, if possible. An error value has a cause if it implements the
-// following interface:
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// If the error does not implement Cause, the original error will
-// be returned. If the error is nil, nil will be returned without further
-// investigation.
-func Cause(err error) error {
- type causer interface {
- Cause() error
- }
-
- for err != nil {
- cause, ok := err.(causer)
- if !ok {
- break
- }
- err = cause.Cause()
- }
- return err
-}
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
index a9840ecee..161aea258 100644
--- a/vendor/github.com/pkg/errors/errors.go
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -260,3 +260,29 @@ func (w *withMessage) Format(s fmt.State, verb rune) {
io.WriteString(s, w.Error())
}
}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
index ed0dc7a6d..be0d10d0c 100644
--- a/vendor/github.com/pkg/errors/go113.go
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -36,36 +36,3 @@ func As(err error, target interface{}) bool { return stderrors.As(err, target) }
func Unwrap(err error) error {
return stderrors.Unwrap(err)
}
-
-// Cause recursively unwraps an error chain and returns the underlying cause of
-// the error, if possible. There are two ways that an error value may provide a
-// cause. First, the error may implement the following interface:
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// Second, the error may return a non-nil value when passed as an argument to
-// the Unwrap function. This makes Cause forwards-compatible with Go 1.13 error
-// chains.
-//
-// If an error value satisfies both methods of unwrapping, Cause will use the
-// causer interface.
-//
-// If the error is nil, nil will be returned without further investigation.
-func Cause(err error) error {
- type causer interface {
- Cause() error
- }
-
- for err != nil {
- if cause, ok := err.(causer); ok {
- err = cause.Cause()
- } else if unwrapped := Unwrap(err); unwrapped != nil {
- err = unwrapped
- } else {
- break
- }
- }
- return err
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
index 5e7e9d5e5..818568b28 100644
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -1,6 +1,34 @@
Changes by Version
==================
+2.22.1 (2020-01-16)
+-------------------
+
+- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro
+
+
+2.22.0 (2020-01-15)
+-------------------
+
+- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro
+- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura
+
+
+2.21.1 (2019-12-20)
+-------------------
+
+- Update version correctly.
+
+
+2.21.0 (2019-12-20)
+-------------------
+
+- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro
+- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro
+- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud
+- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh
+
+
2.20.1 (2019-11-08)
-------------------
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
index 0cfe6a5f6..d5e962ccf 100644
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -83,8 +83,12 @@ cover-html: cover
test-examples:
make -C examples
+.PHONY: thrift
+thrift: idl-submodule thrift-compile
+
# TODO at the moment we're not generating tchan_*.go files
-thrift: idl-submodule thrift-image
+.PHONY: thrift-compile
+thrift-compile: thrift-image
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
@@ -99,10 +103,12 @@ thrift: idl-submodule thrift-image
rm -rf crossdock/thrift/*/*-remote
rm -rf thrift-gen/jaeger/collector.go
+.PHONY: idl-submodule
idl-submodule:
git submodule init
git submodule update
+.PHONY: thrift-image
thrift-image:
$(THRIFT) -version
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
index a3366114d..0e4d9fc0b 100644
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -45,7 +45,7 @@ and [config/example_test.go](./config/example_test.go).
### Environment variables
The tracer can be initialized with values coming from environment variables. None of the env vars are required
-and all of them can be overriden via direct setting of the property on the configuration object.
+and all of them can be overridden via direct setting of the property on the configuration object.
Property| Description
--- | ---
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index a0c32d804..44e93533c 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -134,6 +134,10 @@ type ReporterConfig struct {
// Password instructs reporter to include a password for basic http authentication when sending spans to
// jaeger-collector. Can be set by exporting an environment variable named JAEGER_PASSWORD
Password string `yaml:"password"`
+
+ // HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans.
+ // This field takes effect only when using HTTPTransport by setting the CollectorEndpoint.
+ HTTPHeaders map[string]string `yaml:"http_headers"`
}
// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
@@ -397,11 +401,12 @@ func (rc *ReporterConfig) NewReporter(
func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) {
switch {
- case rc.CollectorEndpoint != "" && rc.User != "" && rc.Password != "":
- return transport.NewHTTPTransport(rc.CollectorEndpoint, transport.HTTPBatchSize(1),
- transport.HTTPBasicAuth(rc.User, rc.Password)), nil
case rc.CollectorEndpoint != "":
- return transport.NewHTTPTransport(rc.CollectorEndpoint, transport.HTTPBatchSize(1)), nil
+ httpOptions := []transport.HTTPOption{transport.HTTPBatchSize(1), transport.HTTPHeaders(rc.HTTPHeaders)}
+ if rc.User != "" && rc.Password != "" {
+ httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password))
+ }
+ return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil
default:
return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0)
}
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index 5d27b628d..1702c7de4 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -22,7 +22,7 @@ import (
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
- JaegerClientVersion = "Go-2.20.1"
+ JaegerClientVersion = "Go-2.22.1"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
new file mode 100644
index 000000000..fe0bef268
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2020 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reporterstats
+
+// ReporterStats exposes some metrics from the RemoteReporter.
+type ReporterStats interface {
+ SpansDroppedFromQueue() int64
+}
+
+// Receiver can be implemented by a Transport to be given ReporterStats.
+type Receiver interface {
+ SetReporterStats(ReporterStats)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
index f0f1afe2f..3ac2f8f94 100644
--- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
@@ -24,6 +24,7 @@ import (
)
// BuildJaegerThrift builds jaeger span based on internal span.
+// TODO: (breaking change) move to internal package.
func BuildJaegerThrift(span *Span) *j.Span {
span.Lock()
defer span.Unlock()
@@ -46,6 +47,7 @@ func BuildJaegerThrift(span *Span) *j.Span {
}
// BuildJaegerProcessThrift creates a thrift Process type.
+// TODO: (breaking change) move to internal package.
func BuildJaegerProcessThrift(span *Span) *j.Process {
span.Lock()
defer span.Unlock()
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
index 0b78cec20..830b5a4bb 100644
--- a/vendor/github.com/uber/jaeger-client-go/reporter.go
+++ b/vendor/github.com/uber/jaeger-client-go/reporter.go
@@ -22,6 +22,7 @@ import (
"github.com/opentracing/opentracing-go"
+ "github.com/uber/jaeger-client-go/internal/reporterstats"
"github.com/uber/jaeger-client-go/log"
)
@@ -176,16 +177,31 @@ type reporterQueueItem struct {
close *sync.WaitGroup
}
+// reporterStats implements reporterstats.ReporterStats.
+type reporterStats struct {
+ droppedCount int64 // provided to Transports to report data loss to the backend
+}
+
+// SpansDroppedFromQueue implements reporterstats.ReporterStats.
+func (r *reporterStats) SpansDroppedFromQueue() int64 {
+ return atomic.LoadInt64(&r.droppedCount)
+}
+
+func (r *reporterStats) incDroppedCount() {
+ atomic.AddInt64(&r.droppedCount, 1)
+}
+
type remoteReporter struct {
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
- queueLength int64
+ queueLength int64 // used to update metrics.Gauge
closed int64 // 0 - not closed, 1 - closed
reporterOptions
- sender Transport
- queue chan reporterQueueItem
+ sender Transport
+ queue chan reporterQueueItem
+ reporterStats *reporterStats
}
// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
@@ -213,6 +229,10 @@ func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
reporterOptions: options,
sender: sender,
queue: make(chan reporterQueueItem, options.queueSize),
+ reporterStats: new(reporterStats),
+ }
+ if receiver, ok := sender.(reporterstats.Receiver); ok {
+ receiver.SetReporterStats(reporter.reporterStats)
}
go reporter.processQueue()
return reporter
@@ -231,6 +251,7 @@ func (r *remoteReporter) Report(span *Span) {
atomic.AddInt64(&r.queueLength, 1)
default:
r.metrics.ReporterDropped.Inc(1)
+ r.reporterStats.incDroppedCount()
}
}
@@ -241,7 +262,7 @@ func (r *remoteReporter) Close() {
return
}
r.sendCloseEvent()
- r.sender.Close()
+ _ = r.sender.Close()
}
func (r *remoteReporter) sendCloseEvent() {
@@ -263,7 +284,7 @@ func (r *remoteReporter) processQueue() {
flush := func() {
if flushed, err := r.sender.Flush(); err != nil {
r.metrics.ReporterFailure.Inc(int64(flushed))
- r.logger.Error(fmt.Sprintf("error when flushing the buffer: %s", err.Error()))
+ r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error()))
} else if flushed > 0 {
r.metrics.ReporterSuccess.Inc(int64(flushed))
}
@@ -281,7 +302,7 @@ func (r *remoteReporter) processQueue() {
span := item.span
if flushed, err := r.sender.Append(span); err != nil {
r.metrics.ReporterFailure.Inc(int64(flushed))
- r.logger.Error(fmt.Sprintf("error reporting span %q: %s", span.OperationName(), err.Error()))
+ r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error()))
} else if flushed > 0 {
r.metrics.ReporterSuccess.Inc(int64(flushed))
// to reduce the number of gauge stats, we only emit queue length on flush
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
index bbf6fb068..42c9112c0 100644
--- a/vendor/github.com/uber/jaeger-client-go/span.go
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -59,6 +59,9 @@ type Span struct {
// The span's "micro-log"
logs []opentracing.LogRecord
+ // The number of logs dropped because of MaxLogsPerSpan.
+ numDroppedLogs int
+
// references for this span
references []Reference
@@ -152,7 +155,12 @@ func (s *Span) Logs() []opentracing.LogRecord {
s.Lock()
defer s.Unlock()
- return append([]opentracing.LogRecord(nil), s.logs...)
+ logs := append([]opentracing.LogRecord(nil), s.logs...)
+ if s.numDroppedLogs != 0 {
+ fixLogs(logs, s.numDroppedLogs)
+ }
+
+ return logs
}
// References returns references for this span
@@ -234,8 +242,65 @@ func (s *Span) Log(ld opentracing.LogData) {
// this function should only be called while holding a Write lock
func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
- // TODO add logic to limit number of logs per span (issue #46)
- s.logs = append(s.logs, lr)
+ maxLogs := s.tracer.options.maxLogsPerSpan
+ if maxLogs == 0 || len(s.logs) < maxLogs {
+ s.logs = append(s.logs, lr)
+ return
+ }
+
+ // We have too many logs. We don't touch the first numOld logs; we treat the
+ // rest as a circular buffer and overwrite the oldest log among those.
+ numOld := (maxLogs - 1) / 2
+ numNew := maxLogs - numOld
+ s.logs[numOld+s.numDroppedLogs%numNew] = lr
+ s.numDroppedLogs++
+}
+
+// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
+// the end (i.e. pos circular left shifts).
+func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
+ // This algorithm is described in:
+ // http://www.cplusplus.com/reference/algorithm/rotate
+ for first, middle, next := 0, pos, pos; first != middle; {
+ buf[first], buf[next] = buf[next], buf[first]
+ first++
+ next++
+ if next == len(buf) {
+ next = middle
+ } else if first == middle {
+ middle = next
+ }
+ }
+}
+
+func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) {
+ // We dropped some log events, which means that we used part of Logs as a
+ // circular buffer (see appendLog). De-circularize it.
+ numOld := (len(logs) - 1) / 2
+ numNew := len(logs) - numOld
+ rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew)
+
+ // Replace the log in the middle (the oldest "new" log) with information
+ // about the dropped logs. This means that we are effectively dropping one
+ // more "new" log.
+ numDropped := numDroppedLogs + 1
+ logs[numOld] = opentracing.LogRecord{
+ // Keep the timestamp of the last dropped event.
+ Timestamp: logs[numOld].Timestamp,
+ Fields: []log.Field{
+ log.String("event", "dropped Span logs"),
+ log.Int("dropped_log_count", numDropped),
+ log.String("component", "jaeger-client"),
+ },
+ }
+}
+
+func (s *Span) fixLogsIfDropped() {
+ if s.numDroppedLogs == 0 {
+ return
+ }
+ fixLogs(s.logs, s.numDroppedLogs)
+ s.numDroppedLogs = 0
}
// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext
@@ -274,8 +339,9 @@ func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
s.applySamplingDecision(decision, true)
}
if s.context.IsSampled() {
+ s.Lock()
+ s.fixLogsIfDropped()
if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
- s.Lock()
// Note: bulk logs are not subject to maxLogsPerSpan limit
if options.LogRecords != nil {
s.logs = append(s.logs, options.LogRecords...)
@@ -283,8 +349,8 @@ func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
for _, ld := range options.BulkLogData {
s.logs = append(s.logs, ld.ToLogRecord())
}
- s.Unlock()
}
+ s.Unlock()
}
// call reportSpan even for non-sampled traces, to return span to the pool
// and update metrics counter
@@ -344,6 +410,7 @@ func (s *Span) reset() {
// Note: To reuse memory we can save the pointers on the heap
s.tags = s.tags[:0]
s.logs = s.logs[:0]
+ s.numDroppedLogs = 0
s.references = s.references[:0]
}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
index b7230abfe..1b44f3f8c 100644
--- a/vendor/github.com/uber/jaeger-client-go/span_context.go
+++ b/vendor/github.com/uber/jaeger-client-go/span_context.go
@@ -213,9 +213,9 @@ func (c SpanContext) SetFirehose() {
func (c SpanContext) String() string {
if c.traceID.High == 0 {
- return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
+ return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
}
- return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
+ return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
}
// ContextFromString reconstructs the Context encoded in a string
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
index d23ed2fc2..e69c6d603 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go
@@ -1577,11 +1577,192 @@ func (p *Process) String() string {
}
// Attributes:
+// - FullQueueDroppedSpans
+// - TooLargeDroppedSpans
+// - FailedToEmitSpans
+type ClientStats struct {
+ FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" json:"fullQueueDroppedSpans"`
+ TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" json:"tooLargeDroppedSpans"`
+ FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" json:"failedToEmitSpans"`
+}
+
+func NewClientStats() *ClientStats {
+ return &ClientStats{}
+}
+
+func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
+ return p.FullQueueDroppedSpans
+}
+
+func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
+ return p.TooLargeDroppedSpans
+}
+
+func (p *ClientStats) GetFailedToEmitSpans() int64 {
+ return p.FailedToEmitSpans
+}
+func (p *ClientStats) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetFullQueueDroppedSpans bool = false
+ var issetTooLargeDroppedSpans bool = false
+ var issetFailedToEmitSpans bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.readField1(iprot); err != nil {
+ return err
+ }
+ issetFullQueueDroppedSpans = true
+ case 2:
+ if err := p.readField2(iprot); err != nil {
+ return err
+ }
+ issetTooLargeDroppedSpans = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ issetFailedToEmitSpans = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetFullQueueDroppedSpans {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"))
+ }
+ if !issetTooLargeDroppedSpans {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"))
+ }
+ if !issetFailedToEmitSpans {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"))
+ }
+ return nil
+}
+
+func (p *ClientStats) readField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.FullQueueDroppedSpans = v
+ }
+ return nil
+}
+
+func (p *ClientStats) readField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.TooLargeDroppedSpans = v
+ }
+ return nil
+}
+
+func (p *ClientStats) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.FailedToEmitSpans = v
+ }
+ return nil
+}
+
+func (p *ClientStats) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("ClientStats"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *ClientStats) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("fullQueueDroppedSpans", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.FullQueueDroppedSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err)
+ }
+ return err
+}
+
+func (p *ClientStats) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("tooLargeDroppedSpans", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.TooLargeDroppedSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err)
+ }
+ return err
+}
+
+func (p *ClientStats) writeField3(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("failedToEmitSpans", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(p.FailedToEmitSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err)
+ }
+ return err
+}
+
+func (p *ClientStats) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ClientStats(%+v)", *p)
+}
+
+// Attributes:
// - Process
// - Spans
+// - SeqNo
+// - Stats
type Batch struct {
- Process *Process `thrift:"process,1,required" json:"process"`
- Spans []*Span `thrift:"spans,2,required" json:"spans"`
+ Process *Process `thrift:"process,1,required" json:"process"`
+ Spans []*Span `thrift:"spans,2,required" json:"spans"`
+ SeqNo *int64 `thrift:"seqNo,3" json:"seqNo,omitempty"`
+ Stats *ClientStats `thrift:"stats,4" json:"stats,omitempty"`
}
func NewBatch() *Batch {
@@ -1600,10 +1781,36 @@ func (p *Batch) GetProcess() *Process {
func (p *Batch) GetSpans() []*Span {
return p.Spans
}
+
+var Batch_SeqNo_DEFAULT int64
+
+func (p *Batch) GetSeqNo() int64 {
+ if !p.IsSetSeqNo() {
+ return Batch_SeqNo_DEFAULT
+ }
+ return *p.SeqNo
+}
+
+var Batch_Stats_DEFAULT *ClientStats
+
+func (p *Batch) GetStats() *ClientStats {
+ if !p.IsSetStats() {
+ return Batch_Stats_DEFAULT
+ }
+ return p.Stats
+}
func (p *Batch) IsSetProcess() bool {
return p.Process != nil
}
+func (p *Batch) IsSetSeqNo() bool {
+ return p.SeqNo != nil
+}
+
+func (p *Batch) IsSetStats() bool {
+ return p.Stats != nil
+}
+
func (p *Batch) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
@@ -1631,6 +1838,14 @@ func (p *Batch) Read(iprot thrift.TProtocol) error {
return err
}
issetSpans = true
+ case 3:
+ if err := p.readField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.readField4(iprot); err != nil {
+ return err
+ }
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
@@ -1680,6 +1895,23 @@ func (p *Batch) readField2(iprot thrift.TProtocol) error {
return nil
}
+func (p *Batch) readField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.SeqNo = &v
+ }
+ return nil
+}
+
+func (p *Batch) readField4(iprot thrift.TProtocol) error {
+ p.Stats = &ClientStats{}
+ if err := p.Stats.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
+ }
+ return nil
+}
+
func (p *Batch) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("Batch"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
@@ -1690,6 +1922,12 @@ func (p *Batch) Write(oprot thrift.TProtocol) error {
if err := p.writeField2(oprot); err != nil {
return err
}
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
}
@@ -1733,6 +1971,36 @@ func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) {
return err
}
+func (p *Batch) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSeqNo() {
+ if err := oprot.WriteFieldBegin("seqNo", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.SeqNo)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *Batch) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStats() {
+ if err := oprot.WriteFieldBegin("stats", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err)
+ }
+ if err := p.Stats.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err)
+ }
+ }
+ return err
+}
+
func (p *Batch) String() string {
if p == nil {
return "<nil>"
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
index 2d49e1d5f..15583e56b 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go
@@ -729,7 +729,7 @@ func (p *BinaryAnnotation) String() string {
// precise value possible. For example, gettimeofday or syncing nanoTime
// against a tick of currentTimeMillis.
//
-// For compatibilty with instrumentation that precede this field, collectors
+// For compatibility with instrumentation that precede this field, collectors
// or span stores can derive this via Annotation.timestamp.
// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
//
@@ -741,7 +741,7 @@ func (p *BinaryAnnotation) String() string {
// precise measurement decoupled from problems of clocks, such as skew or NTP
// updates causing time to move backwards.
//
-// For compatibilty with instrumentation that precede this field, collectors
+// For compatibility with instrumentation that precede this field, collectors
// or span stores can derive this by subtracting Annotation.timestamp.
// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
//
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
index f03372dc7..da43ec6db 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -52,6 +52,7 @@ type Tracer struct {
highTraceIDGenerator func() uint64 // custom high trace ID generator
maxTagValueLength int
noDebugFlagOnForcedSampling bool
+ maxLogsPerSpan int
// more options to come
}
// allocator of Span objects
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
index 469685bb4..f016484b9 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -144,6 +144,18 @@ func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
}
}
+// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
+// value). If a span has more logs than this value, logs are dropped as
+// necessary (and replaced with a log describing how many were dropped).
+//
+// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
+// half are the newest logs.
+func (tracerOptions) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.maxLogsPerSpan = maxLogsPerSpan
+ }
+}
+
func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
return func(tracer *Tracer) {
tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go
index bc1b3e6b0..bb7eb00c9 100644
--- a/vendor/github.com/uber/jaeger-client-go/transport/http.go
+++ b/vendor/github.com/uber/jaeger-client-go/transport/http.go
@@ -39,6 +39,7 @@ type HTTPTransport struct {
spans []*j.Span
process *j.Process
httpCredentials *HTTPBasicAuthCredentials
+ headers map[string]string
}
// HTTPBasicAuthCredentials stores credentials for HTTP basic auth.
@@ -76,6 +77,13 @@ func HTTPRoundTripper(transport http.RoundTripper) HTTPOption {
}
}
+// HTTPHeaders defines the HTTP headers that will be attached to the jaeger client's HTTP request
+func HTTPHeaders(headers map[string]string) HTTPOption {
+ return func(c *HTTPTransport) {
+ c.headers = headers
+ }
+}
+
// NewHTTPTransport returns a new HTTP-backend transport. url should be an http
// url of the collector to handle POST request, typically something like:
// http://hostname:14268/api/traces?format=jaeger.thrift
@@ -136,6 +144,9 @@ func (c *HTTPTransport) send(spans []*j.Span) error {
return err
}
req.Header.Set("Content-Type", "application/x-thrift")
+ for k, v := range c.headers {
+ req.Header.Set(k, v)
+ }
if c.httpCredentials != nil {
req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password)
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
index 7b9ccf937..7370d8007 100644
--- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go
+++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
@@ -18,8 +18,8 @@ import (
"errors"
"fmt"
+ "github.com/uber/jaeger-client-go/internal/reporterstats"
"github.com/uber/jaeger-client-go/thrift"
-
j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/utils"
)
@@ -27,12 +27,14 @@ import (
// Empirically obtained constant for how many bytes in the message are used for envelope.
// The total datagram size is:
// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize
-// There is a unit test `TestEmitBatchOverhead` that validates this number.
+//
// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans
// in the batch, because the length of the list is encoded as varint32, as well as SeqId.
-const emitBatchOverhead = 30
+//
+// There is a unit test `TestEmitBatchOverhead` that validates this number, it fails at <68.
+const emitBatchOverhead = 70
-var errSpanTooLarge = errors.New("Span is too large")
+var errSpanTooLarge = errors.New("span is too large")
type udpSender struct {
client *utils.AgentClientUDP
@@ -44,9 +46,19 @@ type udpSender struct {
thriftProtocol thrift.TProtocol
process *j.Process
processByteSize int
+
+ // reporterStats provides access to stats that are only known to Reporter
+ reporterStats reporterstats.ReporterStats
+
+ // The following counters are always non-negative, but we need to send them in signed i64 Thrift fields,
+ // so we keep them as signed. At 10k QPS, overflow happens in about 300 million years.
+ batchSeqNo int64
+ tooLargeDroppedSpans int64
+ failedToEmitSpans int64
}
-// NewUDPTransport creates a reporter that submits spans to jaeger-agent
+// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
+// TODO: (breaking change) move to transport/ package.
func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
if len(hostPort) == 0 {
hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
@@ -66,17 +78,22 @@ func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
return nil, err
}
- sender := &udpSender{
+ return &udpSender{
client: client,
maxSpanBytes: maxPacketSize - emitBatchOverhead,
thriftBuffer: thriftBuffer,
- thriftProtocol: thriftProtocol}
- return sender, nil
+ thriftProtocol: thriftProtocol,
+ }, nil
+}
+
+// SetReporterStats implements reporterstats.Receiver.
+func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) {
+ s.reporterStats = rs
}
func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
s.thriftBuffer.Reset()
- thriftStruct.Write(s.thriftProtocol)
+ _ = thriftStruct.Write(s.thriftProtocol)
return s.thriftBuffer.Len()
}
@@ -89,6 +106,7 @@ func (s *udpSender) Append(span *Span) (int, error) {
jSpan := BuildJaegerThrift(span)
spanSize := s.calcSizeOfSerializedThrift(jSpan)
if spanSize > s.maxSpanBytes {
+ s.tooLargeDroppedSpans++
return 1, errSpanTooLarge
}
@@ -112,9 +130,18 @@ func (s *udpSender) Flush() (int, error) {
if n == 0 {
return 0, nil
}
- err := s.client.EmitBatch(&j.Batch{Process: s.process, Spans: s.spanBuffer})
+ s.batchSeqNo++
+ batchSeqNo := int64(s.batchSeqNo)
+ err := s.client.EmitBatch(&j.Batch{
+ Process: s.process,
+ Spans: s.spanBuffer,
+ SeqNo: &batchSeqNo,
+ Stats: s.makeStats(),
+ })
s.resetBuffers()
-
+ if err != nil {
+ s.failedToEmitSpans += int64(n)
+ }
return n, err
}
@@ -129,3 +156,15 @@ func (s *udpSender) resetBuffers() {
s.spanBuffer = s.spanBuffer[:0]
s.byteBufferSize = s.processByteSize
}
+
+func (s *udpSender) makeStats() *j.ClientStats {
+ var dropped int64
+ if s.reporterStats != nil {
+ dropped = s.reporterStats.SpansDroppedFromQueue()
+ }
+ return &j.ClientStats{
+ FullQueueDroppedSpans: dropped,
+ TooLargeDroppedSpans: s.tooLargeDroppedSpans,
+ FailedToEmitSpans: s.failedToEmitSpans,
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
index 6f042073d..fadd73e49 100644
--- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
+++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
@@ -85,7 +85,7 @@ func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error {
return err
}
if a.thriftBuffer.Len() > a.maxPacketSize {
- return fmt.Errorf("Data does not fit within one UDP packet; size %d, max %d, spans %d",
+ return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
}
_, err := a.connUDP.Write(a.thriftBuffer.Bytes())
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
index eb31c4369..73aeb000f 100644
--- a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
@@ -40,6 +40,7 @@ var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){
}
// BuildZipkinThrift builds thrift span based on internal span.
+// TODO: (breaking change) move to transport/zipkin and make private.
func BuildZipkinThrift(s *Span) *z.Span {
span := &zipkinSpan{Span: s}
span.handleSpecialTags()
diff --git a/vendor/github.com/vishvananda/netlink/.gitignore b/vendor/github.com/vishvananda/netlink/.gitignore
new file mode 100644
index 000000000..9f11b755a
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/.gitignore
@@ -0,0 +1 @@
+.idea/
diff --git a/vendor/github.com/vishvananda/netlink/.travis.yml b/vendor/github.com/vishvananda/netlink/.travis.yml
index f5c0b3eb5..7d14af4d6 100644
--- a/vendor/github.com/vishvananda/netlink/.travis.yml
+++ b/vendor/github.com/vishvananda/netlink/.travis.yml
@@ -1,4 +1,8 @@
language: go
+go:
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
before_script:
# make sure we keep path in tact when we sudo
- sudo sed -i -e 's/^Defaults\tsecure_path.*$//' /etc/sudoers
@@ -9,5 +13,7 @@ before_script:
- sudo modprobe nf_conntrack_netlink
- sudo modprobe nf_conntrack_ipv4
- sudo modprobe nf_conntrack_ipv6
+ - sudo modprobe sch_hfsc
install:
- go get github.com/vishvananda/netns
+go_import_path: github.com/vishvananda/netlink
diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go
index d59c3281e..28746d5af 100644
--- a/vendor/github.com/vishvananda/netlink/addr_linux.go
+++ b/vendor/github.com/vishvananda/netlink/addr_linux.go
@@ -15,39 +15,62 @@ import (
const IFA_FLAGS = 0x8
// AddrAdd will add an IP address to a link device.
+//
// Equivalent to: `ip addr add $addr dev $link`
+//
+// If `addr` is an IPv4 address and the broadcast address is not given, it
+// will be automatically computed based on the IP mask if /30 or larger.
func AddrAdd(link Link, addr *Addr) error {
return pkgHandle.AddrAdd(link, addr)
}
// AddrAdd will add an IP address to a link device.
+//
// Equivalent to: `ip addr add $addr dev $link`
+//
+// If `addr` is an IPv4 address and the broadcast address is not given, it
+// will be automatically computed based on the IP mask if /30 or larger.
func (h *Handle) AddrAdd(link Link, addr *Addr) error {
req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK)
return h.addrHandle(link, addr, req)
}
// AddrReplace will replace (or, if not present, add) an IP address on a link device.
+//
// Equivalent to: `ip addr replace $addr dev $link`
+//
+// If `addr` is an IPv4 address and the broadcast address is not given, it
+// will be automatically computed based on the IP mask if /30 or larger.
func AddrReplace(link Link, addr *Addr) error {
return pkgHandle.AddrReplace(link, addr)
}
// AddrReplace will replace (or, if not present, add) an IP address on a link device.
+//
// Equivalent to: `ip addr replace $addr dev $link`
+//
+// If `addr` is an IPv4 address and the broadcast address is not given, it
+// will be automatically computed based on the IP mask if /30 or larger.
func (h *Handle) AddrReplace(link Link, addr *Addr) error {
req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK)
return h.addrHandle(link, addr, req)
}
// AddrDel will delete an IP address from a link device.
+//
// Equivalent to: `ip addr del $addr dev $link`
+//
+// If `addr` is an IPv4 address and the broadcast address is not given, it
+// will be automatically computed based on the IP mask if /30 or larger.
func AddrDel(link Link, addr *Addr) error {
return pkgHandle.AddrDel(link, addr)
}
// AddrDel will delete an IP address from a link device.
// Equivalent to: `ip addr del $addr dev $link`
+//
+// If `addr` is an IPv4 address and the broadcast address is not given, it
+// will be automatically computed based on the IP mask if /30 or larger.
func (h *Handle) AddrDel(link Link, addr *Addr) error {
req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK)
return h.addrHandle(link, addr, req)
@@ -65,7 +88,11 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error
msg := nl.NewIfAddrmsg(family)
msg.Index = uint32(base.Index)
msg.Scope = uint8(addr.Scope)
- prefixlen, masklen := addr.Mask.Size()
+ mask := addr.Mask
+ if addr.Peer != nil {
+ mask = addr.Peer.Mask
+ }
+ prefixlen, masklen := mask.Size()
msg.Prefixlen = uint8(prefixlen)
req.AddData(msg)
@@ -104,14 +131,20 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error
}
if family == FAMILY_V4 {
- if addr.Broadcast == nil {
+ // Automatically set the broadcast address if it is unset and the
+ // subnet is large enough to sensibly have one (/30 or larger).
+ // See: RFC 3021
+ if addr.Broadcast == nil && prefixlen < 31 {
calcBroadcast := make(net.IP, masklen/8)
for i := range localAddrData {
- calcBroadcast[i] = localAddrData[i] | ^addr.Mask[i]
+ calcBroadcast[i] = localAddrData[i] | ^mask[i]
}
addr.Broadcast = calcBroadcast
}
- req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast))
+
+ if addr.Broadcast != nil {
+ req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast))
+ }
if addr.Label != "" {
labelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label))
@@ -206,13 +239,17 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) {
IP: attr.Value,
Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
}
- addr.Peer = dst
case unix.IFA_LOCAL:
+ // iproute2 manual:
+ // If a peer address is specified, the local address
+ // cannot have a prefix length. The network prefix is
+ // associated with the peer rather than with the local
+ // address.
+ n := 8 * len(attr.Value)
local = &net.IPNet{
IP: attr.Value,
- Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
+ Mask: net.CIDRMask(n, n),
}
- addr.IPNet = local
case unix.IFA_BROADCAST:
addr.Broadcast = attr.Value
case unix.IFA_LABEL:
@@ -226,12 +263,24 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) {
}
}
- // IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS
+ // libnl addr.c comment:
+ // IPv6 sends the local address as IFA_ADDRESS with no
+ // IFA_LOCAL, IPv4 sends both IFA_LOCAL and IFA_ADDRESS
+ // with IFA_ADDRESS being the peer address if they differ
+ //
+ // But obviously, as there are IPv6 PtP addresses, too,
+ // IFA_LOCAL should also be handled for IPv6.
if local != nil {
- addr.IPNet = local
+ if family == FAMILY_V4 && local.IP.Equal(dst.IP) {
+ addr.IPNet = dst
+ } else {
+ addr.IPNet = local
+ addr.Peer = dst
+ }
} else {
addr.IPNet = dst
}
+
addr.Scope = int(msg.Scope)
return
@@ -250,21 +299,22 @@ type AddrUpdate struct {
// AddrSubscribe takes a chan down which notifications will be sent
// when addresses change. Close the 'done' chan to stop subscription.
func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {
- return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false)
+ return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0)
}
// AddrSubscribeAt works like AddrSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
- return addrSubscribeAt(ns, netns.None(), ch, done, nil, false)
+ return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0)
}
// AddrSubscribeOptions contains a set of options to use with
// AddrSubscribeWithOptions.
type AddrSubscribeOptions struct {
- Namespace *netns.NsHandle
- ErrorCallback func(error)
- ListExisting bool
+ Namespace *netns.NsHandle
+ ErrorCallback func(error)
+ ListExisting bool
+ ReceiveBufferSize int
}
// AddrSubscribeWithOptions work like AddrSubscribe but enable to
@@ -275,10 +325,10 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option
none := netns.None()
options.Namespace = &none
}
- return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting)
+ return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize)
}
-func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error {
+func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int) error {
s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR)
if err != nil {
return err
@@ -289,6 +339,12 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c
s.Close()
}()
}
+ if rcvbuf != 0 {
+ err = pkgHandle.SetSocketReceiveBufferSize(rcvbuf, false)
+ if err != nil {
+ return err
+ }
+ }
if listExisting {
req := pkgHandle.newNetlinkRequest(unix.RTM_GETADDR,
unix.NLM_F_DUMP)
@@ -301,13 +357,19 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c
go func() {
defer close(ch)
for {
- msgs, err := s.Receive()
+ msgs, from, err := s.Receive()
if err != nil {
if cberr != nil {
cberr(err)
}
return
}
+ if from.Pid != nl.PidKernel {
+ if cberr != nil {
+ cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel))
+ }
+ continue
+ }
for _, m := range msgs {
if m.Header.Type == unix.NLMSG_DONE {
continue
@@ -319,16 +381,17 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c
continue
}
if cberr != nil {
- cberr(syscall.Errno(-error))
+ cberr(fmt.Errorf("error message: %v",
+ syscall.Errno(-error)))
}
- return
+ continue
}
msgType := m.Header.Type
if msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR {
if cberr != nil {
cberr(fmt.Errorf("bad message type: %d", msgType))
}
- return
+ continue
}
addr, _, ifindex, err := parseAddr(m.Data)
@@ -336,7 +399,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c
if cberr != nil {
cberr(fmt.Errorf("could not parse address: %v", err))
}
- return
+ continue
}
ch <- AddrUpdate{LinkAddress: *addr.IPNet,
diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go
index 350ab0db4..6e1224c47 100644
--- a/vendor/github.com/vishvananda/netlink/bridge_linux.go
+++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go
@@ -96,7 +96,7 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged
flags |= nl.BRIDGE_FLAGS_MASTER
}
if flags > 0 {
- nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags))
+ br.AddRtAttr(nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags))
}
vlanInfo := &nl.BridgeVlanInfo{Vid: vid}
if pvid {
@@ -105,11 +105,8 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged
if untagged {
vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED
}
- nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize())
+ br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize())
req.AddData(br)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
- if err != nil {
- return err
- }
- return nil
+ return err
}
diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go
index 8ee13af48..dcc22d9e9 100644
--- a/vendor/github.com/vishvananda/netlink/class.go
+++ b/vendor/github.com/vishvananda/netlink/class.go
@@ -4,25 +4,76 @@ import (
"fmt"
)
+// Class interfaces for all classes
type Class interface {
Attrs() *ClassAttrs
Type() string
}
+// Generic networking statistics for netlink users.
+// This file contains "gnet_" prefixed structs and relevant functions.
+// See Documentation/networking/getn_stats.txt in Linux source code for more details.
+
+// GnetStatsBasic Ref: struct gnet_stats_basic { ... }
+type GnetStatsBasic struct {
+ Bytes uint64 // number of seen bytes
+ Packets uint32 // number of seen packets
+}
+
+// GnetStatsRateEst Ref: struct gnet_stats_rate_est { ... }
+type GnetStatsRateEst struct {
+ Bps uint32 // current byte rate
+ Pps uint32 // current packet rate
+}
+
+// GnetStatsRateEst64 Ref: struct gnet_stats_rate_est64 { ... }
+type GnetStatsRateEst64 struct {
+ Bps uint64 // current byte rate
+ Pps uint64 // current packet rate
+}
+
+// GnetStatsQueue Ref: struct gnet_stats_queue { ... }
+type GnetStatsQueue struct {
+ Qlen uint32 // queue length
+ Backlog uint32 // backlog size of queue
+ Drops uint32 // number of dropped packets
+ Requeues uint32 // number of requues
+ Overlimits uint32 // number of enqueues over the limit
+}
+
+// ClassStatistics representation based on generic networking statistics for netlink.
+// See Documentation/networking/gen_stats.txt in Linux source code for more details.
+type ClassStatistics struct {
+ Basic *GnetStatsBasic
+ Queue *GnetStatsQueue
+ RateEst *GnetStatsRateEst
+}
+
+// NewClassStatistics Construct a ClassStatistics struct which fields are all initialized by 0.
+func NewClassStatistics() *ClassStatistics {
+ return &ClassStatistics{
+ Basic: &GnetStatsBasic{},
+ Queue: &GnetStatsQueue{},
+ RateEst: &GnetStatsRateEst{},
+ }
+}
+
// ClassAttrs represents a netlink class. A filter is associated with a link,
// has a handle and a parent. The root filter of a device should have a
// parent == HANDLE_ROOT.
type ClassAttrs struct {
- LinkIndex int
- Handle uint32
- Parent uint32
- Leaf uint32
+ LinkIndex int
+ Handle uint32
+ Parent uint32
+ Leaf uint32
+ Statistics *ClassStatistics
}
func (q ClassAttrs) String() string {
return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Leaf: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Leaf)
}
+// HtbClassAttrs stores the attributes of HTB class
type HtbClassAttrs struct {
// TODO handle all attributes
Rate uint64
@@ -54,10 +105,12 @@ func (q HtbClass) String() string {
return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer)
}
+// Attrs returns the class attributes
func (q *HtbClass) Attrs() *ClassAttrs {
return &q.ClassAttrs
}
+// Type return the class type
func (q *HtbClass) Type() string {
return "htb"
}
@@ -69,10 +122,90 @@ type GenericClass struct {
ClassType string
}
+// Attrs return the class attributes
func (class *GenericClass) Attrs() *ClassAttrs {
return &class.ClassAttrs
}
+// Type return the class type
func (class *GenericClass) Type() string {
return class.ClassType
}
+
+// ServiceCurve is the way the HFSC curve are represented
+type ServiceCurve struct {
+ m1 uint32
+ d uint32
+ m2 uint32
+}
+
+// Attrs return the parameters of the service curve
+func (c *ServiceCurve) Attrs() (uint32, uint32, uint32) {
+ return c.m1, c.d, c.m2
+}
+
+// HfscClass is a representation of the HFSC class
+type HfscClass struct {
+ ClassAttrs
+ Rsc ServiceCurve
+ Fsc ServiceCurve
+ Usc ServiceCurve
+}
+
+// SetUsc sets the Usc curve
+func (hfsc *HfscClass) SetUsc(m1 uint32, d uint32, m2 uint32) {
+ hfsc.Usc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8}
+}
+
+// SetFsc sets the Fsc curve
+func (hfsc *HfscClass) SetFsc(m1 uint32, d uint32, m2 uint32) {
+ hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8}
+}
+
+// SetRsc sets the Rsc curve
+func (hfsc *HfscClass) SetRsc(m1 uint32, d uint32, m2 uint32) {
+ hfsc.Rsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8}
+}
+
+// SetSC implements the SC from the tc CLI
+func (hfsc *HfscClass) SetSC(m1 uint32, d uint32, m2 uint32) {
+ hfsc.Rsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8}
+ hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8}
+}
+
+// SetUL implements the UL from the tc CLI
+func (hfsc *HfscClass) SetUL(m1 uint32, d uint32, m2 uint32) {
+ hfsc.Usc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8}
+}
+
+// SetLS implements the LS from the tc CLI
+func (hfsc *HfscClass) SetLS(m1 uint32, d uint32, m2 uint32) {
+ hfsc.Fsc = ServiceCurve{m1: m1 / 8, d: d, m2: m2 / 8}
+}
+
+// NewHfscClass returns a new HFSC struct with the set parameters
+func NewHfscClass(attrs ClassAttrs) *HfscClass {
+ return &HfscClass{
+ ClassAttrs: attrs,
+ Rsc: ServiceCurve{},
+ Fsc: ServiceCurve{},
+ Usc: ServiceCurve{},
+ }
+}
+
+func (hfsc *HfscClass) String() string {
+ return fmt.Sprintf(
+ "{%s -- {RSC: {m1=%d d=%d m2=%d}} {FSC: {m1=%d d=%d m2=%d}} {USC: {m1=%d d=%d m2=%d}}}",
+ hfsc.Attrs(), hfsc.Rsc.m1*8, hfsc.Rsc.d, hfsc.Rsc.m2*8, hfsc.Fsc.m1*8, hfsc.Fsc.d, hfsc.Fsc.m2*8, hfsc.Usc.m1*8, hfsc.Usc.d, hfsc.Usc.m2*8,
+ )
+}
+
+// Attrs return the Hfsc parameters
+func (hfsc *HfscClass) Attrs() *ClassAttrs {
+ return &hfsc.ClassAttrs
+}
+
+// Type return the type of the class
+func (hfsc *HfscClass) Type() string {
+ return "hfsc"
+}
diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go
index a4997740e..31091e501 100644
--- a/vendor/github.com/vishvananda/netlink/class_linux.go
+++ b/vendor/github.com/vishvananda/netlink/class_linux.go
@@ -1,14 +1,34 @@
package netlink
import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
"errors"
+ "fmt"
"syscall"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
)
-// NOTE: function is in here because it uses other linux functions
+// Internal tc_stats representation in Go struct.
+// This is for internal uses only to deserialize the payload of rtattr.
+// After the deserialization, this should be converted into the canonical stats
+// struct, ClassStatistics, in case of statistics of a class.
+// Ref: struct tc_stats { ... }
+type tcStats struct {
+ Bytes uint64 // Number of enqueued bytes
+ Packets uint32 // Number of enqueued packets
+ Drops uint32 // Packets dropped because of lack of resources
+ Overlimits uint32 // Number of throttle events when this flow goes out of allocated bandwidth
+ Bps uint32 // Current flow byte rate
+ Pps uint32 // Current flow packet rate
+ Qlen uint32
+ Backlog uint32
+}
+
+// NewHtbClass NOTE: function is in here because it uses other linux functions
func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass {
mtu := 1600
rate := cattrs.Rate / 8
@@ -126,7 +146,9 @@ func classPayload(req *nl.NetlinkRequest, class Class) error {
req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(class.Type())))
options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
- if htb, ok := class.(*HtbClass); ok {
+ switch class.Type() {
+ case "htb":
+ htb := class.(*HtbClass)
opt := nl.TcHtbCopt{}
opt.Buffer = htb.Buffer
opt.Cbuffer = htb.Cbuffer
@@ -151,9 +173,18 @@ func classPayload(req *nl.NetlinkRequest, class Class) error {
return errors.New("HTB: failed to calculate ceil rate table")
}
opt.Ceil = tcceil
- nl.NewRtAttrChild(options, nl.TCA_HTB_PARMS, opt.Serialize())
- nl.NewRtAttrChild(options, nl.TCA_HTB_RTAB, SerializeRtab(rtab))
- nl.NewRtAttrChild(options, nl.TCA_HTB_CTAB, SerializeRtab(ctab))
+ options.AddRtAttr(nl.TCA_HTB_PARMS, opt.Serialize())
+ options.AddRtAttr(nl.TCA_HTB_RTAB, SerializeRtab(rtab))
+ options.AddRtAttr(nl.TCA_HTB_CTAB, SerializeRtab(ctab))
+ case "hfsc":
+ hfsc := class.(*HfscClass)
+ opt := nl.HfscCopt{}
+ opt.Rsc.Set(hfsc.Rsc.Attrs())
+ opt.Fsc.Set(hfsc.Fsc.Attrs())
+ opt.Usc.Set(hfsc.Usc.Attrs())
+ options.AddRtAttr(nl.TCA_HFSC_RSC, nl.SerializeHfscCurve(&opt.Rsc))
+ options.AddRtAttr(nl.TCA_HFSC_FSC, nl.SerializeHfscCurve(&opt.Fsc))
+ options.AddRtAttr(nl.TCA_HFSC_USC, nl.SerializeHfscCurve(&opt.Usc))
}
req.AddData(options)
return nil
@@ -197,9 +228,10 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
}
base := ClassAttrs{
- LinkIndex: int(msg.Ifindex),
- Handle: msg.Handle,
- Parent: msg.Parent,
+ LinkIndex: int(msg.Ifindex),
+ Handle: msg.Handle,
+ Parent: msg.Parent,
+ Statistics: nil,
}
var class Class
@@ -211,6 +243,8 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
switch classType {
case "htb":
class = &HtbClass{}
+ case "hfsc":
+ class = &HfscClass{}
default:
class = &GenericClass{ClassType: classType}
}
@@ -225,6 +259,26 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) {
if err != nil {
return nil, err
}
+ case "hfsc":
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ _, err = parseHfscClassData(class, data)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // For backward compatibility.
+ case nl.TCA_STATS:
+ base.Statistics, err = parseTcStats(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ case nl.TCA_STATS2:
+ base.Statistics, err = parseTcStats2(attr.Value)
+ if err != nil {
+ return nil, err
}
}
}
@@ -253,3 +307,78 @@ func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, erro
}
return detailed, nil
}
+
+func parseHfscClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) {
+ hfsc := class.(*HfscClass)
+ detailed := false
+ for _, datum := range data {
+ m1, d, m2 := nl.DeserializeHfscCurve(datum.Value).Attrs()
+ switch datum.Attr.Type {
+ case nl.TCA_HFSC_RSC:
+ hfsc.Rsc = ServiceCurve{m1: m1, d: d, m2: m2}
+ case nl.TCA_HFSC_FSC:
+ hfsc.Fsc = ServiceCurve{m1: m1, d: d, m2: m2}
+ case nl.TCA_HFSC_USC:
+ hfsc.Usc = ServiceCurve{m1: m1, d: d, m2: m2}
+ }
+ }
+ return detailed, nil
+}
+
+func parseTcStats(data []byte) (*ClassStatistics, error) {
+ buf := &bytes.Buffer{}
+ buf.Write(data)
+ native := nl.NativeEndian()
+ tcStats := &tcStats{}
+ if err := binary.Read(buf, native, tcStats); err != nil {
+ return nil, err
+ }
+
+ stats := NewClassStatistics()
+ stats.Basic.Bytes = tcStats.Bytes
+ stats.Basic.Packets = tcStats.Packets
+ stats.Queue.Qlen = tcStats.Qlen
+ stats.Queue.Backlog = tcStats.Backlog
+ stats.Queue.Drops = tcStats.Drops
+ stats.Queue.Overlimits = tcStats.Overlimits
+ stats.RateEst.Bps = tcStats.Bps
+ stats.RateEst.Pps = tcStats.Pps
+
+ return stats, nil
+}
+
+func parseGnetStats(data []byte, gnetStats interface{}) error {
+ buf := &bytes.Buffer{}
+ buf.Write(data)
+ native := nl.NativeEndian()
+ return binary.Read(buf, native, gnetStats)
+}
+
+func parseTcStats2(data []byte) (*ClassStatistics, error) {
+ rtAttrs, err := nl.ParseRouteAttr(data)
+ if err != nil {
+ return nil, err
+ }
+ stats := NewClassStatistics()
+ for _, datum := range rtAttrs {
+ switch datum.Attr.Type {
+ case nl.TCA_STATS_BASIC:
+ if err := parseGnetStats(datum.Value, stats.Basic); err != nil {
+ return nil, fmt.Errorf("Failed to parse ClassStatistics.Basic with: %v\n%s",
+ err, hex.Dump(datum.Value))
+ }
+ case nl.TCA_STATS_QUEUE:
+ if err := parseGnetStats(datum.Value, stats.Queue); err != nil {
+ return nil, fmt.Errorf("Failed to parse ClassStatistics.Queue with: %v\n%s",
+ err, hex.Dump(datum.Value))
+ }
+ case nl.TCA_STATS_RATE_EST:
+ if err := parseGnetStats(datum.Value, stats.RateEst); err != nil {
+ return nil, fmt.Errorf("Failed to parse ClassStatistics.RateEst with: %v\n%s",
+ err, hex.Dump(datum.Value))
+ }
+ }
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
index a0fc74a37..4bff0dcba 100644
--- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
@@ -22,11 +22,7 @@ const (
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2
ConntrackExpectTable = 2
)
-const (
- // For Parsing Mark
- TCP_PROTO = 6
- UDP_PROTO = 17
-)
+
const (
// backward compatibility with golang 1.6 which does not have io.SeekCurrent
seekCurrent = 1
@@ -135,11 +131,13 @@ func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily)
// http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h
// For the time being, the structure below allows to parse and extract the base information of a flow
type ipTuple struct {
- SrcIP net.IP
+ Bytes uint64
DstIP net.IP
+ DstPort uint16
+ Packets uint64
Protocol uint8
+ SrcIP net.IP
SrcPort uint16
- DstPort uint16
}
type ConntrackFlow struct {
@@ -151,11 +149,12 @@ type ConntrackFlow struct {
func (s *ConntrackFlow) String() string {
// conntrack cmd output:
- // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 mark=0
- return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d mark=%d",
+ // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0
+ return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=%d",
nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol,
- s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort,
- s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Mark)
+ s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, s.Forward.Packets, s.Forward.Bytes,
+ s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Reverse.Packets, s.Reverse.Bytes,
+ s.Mark)
}
// This method parse the ip tuple structure
@@ -220,9 +219,35 @@ func parseBERaw16(r *bytes.Reader, v *uint16) {
binary.Read(r, binary.BigEndian, v)
}
+func parseBERaw32(r *bytes.Reader, v *uint32) {
+ binary.Read(r, binary.BigEndian, v)
+}
+
+func parseBERaw64(r *bytes.Reader, v *uint64) {
+ binary.Read(r, binary.BigEndian, v)
+}
+
+func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) {
+ for i := 0; i < 2; i++ {
+ switch _, t, _ := parseNfAttrTL(r); t {
+ case nl.CTA_COUNTERS_BYTES:
+ parseBERaw64(r, &bytes)
+ case nl.CTA_COUNTERS_PACKETS:
+ parseBERaw64(r, &packets)
+ default:
+ return
+ }
+ }
+ return
+}
+
+func parseConnectionMark(r *bytes.Reader) (mark uint32) {
+ parseBERaw32(r, &mark)
+ return
+}
+
func parseRawData(data []byte) *ConntrackFlow {
s := &ConntrackFlow{}
- var proto uint8
// First there is the Nfgenmsg header
// consume only the family field
reader := bytes.NewReader(data)
@@ -238,36 +263,31 @@ func parseRawData(data []byte) *ConntrackFlow {
// <len, NLA_F_NESTED|CTA_TUPLE_IP> 4 bytes
// flow information of the reverse flow
for reader.Len() > 0 {
- nested, t, l := parseNfAttrTL(reader)
- if nested && t == nl.CTA_TUPLE_ORIG {
- if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
- proto = parseIpTuple(reader, &s.Forward)
+ if nested, t, l := parseNfAttrTL(reader); nested {
+ switch t {
+ case nl.CTA_TUPLE_ORIG:
+ if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
+ parseIpTuple(reader, &s.Forward)
+ }
+ case nl.CTA_TUPLE_REPLY:
+ if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
+ parseIpTuple(reader, &s.Reverse)
+ } else {
+ // Header not recognized skip it
+ reader.Seek(int64(l), seekCurrent)
+ }
+ case nl.CTA_COUNTERS_ORIG:
+ s.Forward.Bytes, s.Forward.Packets = parseByteAndPacketCounters(reader)
+ case nl.CTA_COUNTERS_REPLY:
+ s.Reverse.Bytes, s.Reverse.Packets = parseByteAndPacketCounters(reader)
}
- } else if nested && t == nl.CTA_TUPLE_REPLY {
- if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
- parseIpTuple(reader, &s.Reverse)
-
- // Got all the useful information stop parsing
- break
- } else {
- // Header not recognized skip it
- reader.Seek(int64(l), seekCurrent)
+ } else {
+ switch t {
+ case nl.CTA_MARK:
+ s.Mark = parseConnectionMark(reader)
}
}
}
- if proto == TCP_PROTO {
- reader.Seek(64, seekCurrent)
- _, t, _, v := parseNfAttrTLV(reader)
- if t == nl.CTA_MARK {
- s.Mark = uint32(v[3])
- }
- } else if proto == UDP_PROTO {
- reader.Seek(16, seekCurrent)
- _, t, _, v := parseNfAttrTLV(reader)
- if t == nl.CTA_MARK {
- s.Mark = uint32(v[3])
- }
- }
return s
}
@@ -285,7 +305,7 @@ func parseRawData(data []byte) *ConntrackFlow {
// Common parameters and options:
// -s, --src, --orig-src ip Source address from original direction
// -d, --dst, --orig-dst ip Destination address from original direction
-// -r, --reply-src ip Source addres from reply direction
+// -r, --reply-src ip Source address from reply direction
// -q, --reply-dst ip Destination address from reply direction
// -p, --protonum proto Layer 4 Protocol, eg. 'tcp'
// -f, --family proto Layer 3 Protocol, eg. 'ipv6'
@@ -302,11 +322,14 @@ func parseRawData(data []byte) *ConntrackFlow {
type ConntrackFilterType uint8
const (
- ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction
- ConntrackOrigDstIP // -orig-dst ip Destination address from original direction
- ConntrackNatSrcIP // -src-nat ip Source NAT ip
- ConntrackNatDstIP // -dst-nat ip Destination NAT ip
- ConntrackNatAnyIP // -any-nat ip Source or destination NAT ip
+ ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction
+ ConntrackOrigDstIP // -orig-dst ip Destination address from original direction
+ ConntrackReplySrcIP // --reply-src ip Reply Source IP
+ ConntrackReplyDstIP // --reply-dst ip Reply Destination IP
+ ConntrackReplyAnyIP // Match source or destination reply IP
+ ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP
+ ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP
+ ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instaed ConntrackReplyAnyIP
)
type CustomConntrackFilter interface {
@@ -351,17 +374,17 @@ func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool {
}
// -src-nat ip Source NAT ip
- if elem, found := f.ipFilter[ConntrackNatSrcIP]; match && found {
+ if elem, found := f.ipFilter[ConntrackReplySrcIP]; match && found {
match = match && elem.Equal(flow.Reverse.SrcIP)
}
// -dst-nat ip Destination NAT ip
- if elem, found := f.ipFilter[ConntrackNatDstIP]; match && found {
+ if elem, found := f.ipFilter[ConntrackReplyDstIP]; match && found {
match = match && elem.Equal(flow.Reverse.DstIP)
}
- // -any-nat ip Source or destination NAT ip
- if elem, found := f.ipFilter[ConntrackNatAnyIP]; match && found {
+ // Match source or destination reply IP
+ if elem, found := f.ipFilter[ConntrackReplyAnyIP]; match && found {
match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP))
}
diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go
new file mode 100644
index 000000000..29b3f8ec1
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go
@@ -0,0 +1,272 @@
+package netlink
+
+import (
+ "syscall"
+
+ "fmt"
+ "github.com/vishvananda/netlink/nl"
+ "golang.org/x/sys/unix"
+)
+
+// DevlinkDevEswitchAttr represents device's eswitch attributes
+type DevlinkDevEswitchAttr struct {
+ Mode string
+ InlineMode string
+ EncapMode string
+}
+
+// DevlinkDevAttrs represents device attributes
+type DevlinkDevAttrs struct {
+ Eswitch DevlinkDevEswitchAttr
+}
+
+// DevlinkDevice represents device and its attributes
+type DevlinkDevice struct {
+ BusName string
+ DeviceName string
+ Attrs DevlinkDevAttrs
+}
+
+func parseDevLinkDeviceList(msgs [][]byte) ([]*DevlinkDevice, error) {
+ devices := make([]*DevlinkDevice, 0, len(msgs))
+ for _, m := range msgs {
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:])
+ if err != nil {
+ return nil, err
+ }
+ dev := &DevlinkDevice{}
+ if err = dev.parseAttributes(attrs); err != nil {
+ return nil, err
+ }
+ devices = append(devices, dev)
+ }
+ return devices, nil
+}
+
+func eswitchStringToMode(modeName string) (uint16, error) {
+ if modeName == "legacy" {
+ return nl.DEVLINK_ESWITCH_MODE_LEGACY, nil
+ } else if modeName == "switchdev" {
+ return nl.DEVLINK_ESWITCH_MODE_SWITCHDEV, nil
+ } else {
+ return 0xffff, fmt.Errorf("invalid switchdev mode")
+ }
+}
+
+func parseEswitchMode(mode uint16) string {
+ var eswitchMode = map[uint16]string{
+ nl.DEVLINK_ESWITCH_MODE_LEGACY: "legacy",
+ nl.DEVLINK_ESWITCH_MODE_SWITCHDEV: "switchdev",
+ }
+ if eswitchMode[mode] == "" {
+ return "unknown"
+ } else {
+ return eswitchMode[mode]
+ }
+}
+
+func parseEswitchInlineMode(inlinemode uint8) string {
+ var eswitchInlineMode = map[uint8]string{
+ nl.DEVLINK_ESWITCH_INLINE_MODE_NONE: "none",
+ nl.DEVLINK_ESWITCH_INLINE_MODE_LINK: "link",
+ nl.DEVLINK_ESWITCH_INLINE_MODE_NETWORK: "network",
+ nl.DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: "transport",
+ }
+ if eswitchInlineMode[inlinemode] == "" {
+ return "unknown"
+ } else {
+ return eswitchInlineMode[inlinemode]
+ }
+}
+
+func parseEswitchEncapMode(encapmode uint8) string {
+ var eswitchEncapMode = map[uint8]string{
+ nl.DEVLINK_ESWITCH_ENCAP_MODE_NONE: "disable",
+ nl.DEVLINK_ESWITCH_ENCAP_MODE_BASIC: "enable",
+ }
+ if eswitchEncapMode[encapmode] == "" {
+ return "unknown"
+ } else {
+ return eswitchEncapMode[encapmode]
+ }
+}
+
+func (d *DevlinkDevice) parseAttributes(attrs []syscall.NetlinkRouteAttr) error {
+ for _, a := range attrs {
+ switch a.Attr.Type {
+ case nl.DEVLINK_ATTR_BUS_NAME:
+ d.BusName = string(a.Value)
+ case nl.DEVLINK_ATTR_DEV_NAME:
+ d.DeviceName = string(a.Value)
+ case nl.DEVLINK_ATTR_ESWITCH_MODE:
+ d.Attrs.Eswitch.Mode = parseEswitchMode(native.Uint16(a.Value))
+ case nl.DEVLINK_ATTR_ESWITCH_INLINE_MODE:
+ d.Attrs.Eswitch.InlineMode = parseEswitchInlineMode(uint8(a.Value[0]))
+ case nl.DEVLINK_ATTR_ESWITCH_ENCAP_MODE:
+ d.Attrs.Eswitch.EncapMode = parseEswitchEncapMode(uint8(a.Value[0]))
+ }
+ }
+ return nil
+}
+
+func (dev *DevlinkDevice) parseEswitchAttrs(msgs [][]byte) {
+ m := msgs[0]
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:])
+ if err != nil {
+ return
+ }
+ dev.parseAttributes(attrs)
+}
+
+func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) {
+ msg := &nl.Genlmsg{
+ Command: nl.DEVLINK_CMD_ESWITCH_GET,
+ Version: nl.GENL_DEVLINK_VERSION,
+ }
+ req := h.newNetlinkRequest(int(family.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK)
+ req.AddData(msg)
+
+ b := make([]byte, len(dev.BusName))
+ copy(b, dev.BusName)
+ data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b)
+ req.AddData(data)
+
+ b = make([]byte, len(dev.DeviceName))
+ copy(b, dev.DeviceName)
+ data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b)
+ req.AddData(data)
+
+ msgs, err := req.Execute(unix.NETLINK_GENERIC, 0)
+ if err != nil {
+ return
+ }
+ dev.parseEswitchAttrs(msgs)
+}
+
+// DevLinkGetDeviceList provides a pointer to devlink devices and nil error,
+// otherwise returns an error code.
+func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) {
+ f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME)
+ if err != nil {
+ return nil, err
+ }
+ msg := &nl.Genlmsg{
+ Command: nl.DEVLINK_CMD_GET,
+ Version: nl.GENL_DEVLINK_VERSION,
+ }
+ req := h.newNetlinkRequest(int(f.ID),
+ unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP)
+ req.AddData(msg)
+ msgs, err := req.Execute(unix.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ devices, err := parseDevLinkDeviceList(msgs)
+ if err != nil {
+ return nil, err
+ }
+ for _, d := range devices {
+ h.getEswitchAttrs(f, d)
+ }
+ return devices, nil
+}
+
+// DevLinkGetDeviceList provides a pointer to devlink devices and nil error,
+// otherwise returns an error code.
+func DevLinkGetDeviceList() ([]*DevlinkDevice, error) {
+ return pkgHandle.DevLinkGetDeviceList()
+}
+
+func parseDevlinkDevice(msgs [][]byte) (*DevlinkDevice, error) {
+ m := msgs[0]
+ attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:])
+ if err != nil {
+ return nil, err
+ }
+ dev := &DevlinkDevice{}
+ if err = dev.parseAttributes(attrs); err != nil {
+ return nil, err
+ }
+ return dev, nil
+}
+
+func (h *Handle) createCmdReq(cmd uint8, bus string, device string) (*GenlFamily, *nl.NetlinkRequest, error) {
+ f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ msg := &nl.Genlmsg{
+ Command: cmd,
+ Version: nl.GENL_DEVLINK_VERSION,
+ }
+ req := h.newNetlinkRequest(int(f.ID),
+ unix.NLM_F_REQUEST|unix.NLM_F_ACK)
+ req.AddData(msg)
+
+ b := make([]byte, len(bus)+1)
+ copy(b, bus)
+ data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b)
+ req.AddData(data)
+
+ b = make([]byte, len(device)+1)
+ copy(b, device)
+ data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b)
+ req.AddData(data)
+
+ return f, req, nil
+}
+
+// DevlinkGetDeviceByName provides a pointer to devlink device and nil error,
+// otherwise returns an error code.
+func (h *Handle) DevLinkGetDeviceByName(Bus string, Device string) (*DevlinkDevice, error) {
+ f, req, err := h.createCmdReq(nl.DEVLINK_CMD_GET, Bus, Device)
+ if err != nil {
+ return nil, err
+ }
+
+ respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0)
+ if err != nil {
+ return nil, err
+ }
+ dev, err := parseDevlinkDevice(respmsg)
+ if err == nil {
+ h.getEswitchAttrs(f, dev)
+ }
+ return dev, err
+}
+
+// DevlinkGetDeviceByName provides a pointer to devlink device and nil error,
+// otherwise returns an error code.
+func DevLinkGetDeviceByName(Bus string, Device string) (*DevlinkDevice, error) {
+ return pkgHandle.DevLinkGetDeviceByName(Bus, Device)
+}
+
+// DevLinkSetEswitchMode sets eswitch mode if able to set successfully or
+// returns an error code.
+// Equivalent to: `devlink dev eswitch set $dev mode switchdev`
+// Equivalent to: `devlink dev eswitch set $dev mode legacy`
+func (h *Handle) DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error {
+ mode, err := eswitchStringToMode(NewMode)
+ if err != nil {
+ return err
+ }
+
+ _, req, err := h.createCmdReq(nl.DEVLINK_CMD_ESWITCH_SET, Dev.BusName, Dev.DeviceName)
+ if err != nil {
+ return err
+ }
+
+ req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_ESWITCH_MODE, nl.Uint16Attr(mode)))
+
+ _, err = req.Execute(unix.NETLINK_GENERIC, 0)
+ return err
+}
+
+// DevLinkSetEswitchMode sets eswitch mode if able to set successfully or
+// returns an error code.
+// Equivalent to: `devlink dev eswitch set $dev mode switchdev`
+// Equivalent to: `devlink dev eswitch set $dev mode legacy`
+func DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error {
+ return pkgHandle.DevLinkSetEswitchMode(Dev, NewMode)
+}
diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go
index c2cf8e4dc..88792eab0 100644
--- a/vendor/github.com/vishvananda/netlink/filter.go
+++ b/vendor/github.com/vishvananda/netlink/filter.go
@@ -2,6 +2,7 @@ package netlink
import (
"fmt"
+ "net"
)
type Filter interface {
@@ -135,6 +136,27 @@ func (action *BpfAction) Attrs() *ActionAttrs {
return &action.ActionAttrs
}
+type ConnmarkAction struct {
+ ActionAttrs
+ Zone uint16
+}
+
+func (action *ConnmarkAction) Type() string {
+ return "connmark"
+}
+
+func (action *ConnmarkAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+func NewConnmarkAction() *ConnmarkAction {
+ return &ConnmarkAction{
+ ActionAttrs: ActionAttrs{
+ Action: TC_ACT_PIPE,
+ },
+ }
+}
+
type MirredAct uint8
func (a MirredAct) String() string {
@@ -182,47 +204,59 @@ func NewMirredAction(redirIndex int) *MirredAction {
}
}
-// Sel of the U32 filters that contains multiple TcU32Key. This is the copy
-// and the frontend representation of nl.TcU32Sel. It is serialized into canonical
-// nl.TcU32Sel with the appropriate endianness.
-type TcU32Sel struct {
- Flags uint8
- Offshift uint8
- Nkeys uint8
- Pad uint8
- Offmask uint16
- Off uint16
- Offoff int16
- Hoff int16
- Hmask uint32
- Keys []TcU32Key
-}
-
-// TcU32Key contained of Sel in the U32 filters. This is the copy and the frontend
-// representation of nl.TcU32Key. It is serialized into chanonical nl.TcU32Sel
-// with the appropriate endianness.
-type TcU32Key struct {
- Mask uint32
- Val uint32
- Off int32
- OffMask int32
-}
-
-// U32 filters on many packet related properties
-type U32 struct {
- FilterAttrs
- ClassId uint32
- RedirIndex int
- Sel *TcU32Sel
- Actions []Action
+type TunnelKeyAct int8
+
+const (
+ TCA_TUNNEL_KEY_SET TunnelKeyAct = 1 // set tunnel key
+ TCA_TUNNEL_KEY_UNSET TunnelKeyAct = 2 // unset tunnel key
+)
+
+type TunnelKeyAction struct {
+ ActionAttrs
+ Action TunnelKeyAct
+ SrcAddr net.IP
+ DstAddr net.IP
+ KeyID uint32
}
-func (filter *U32) Attrs() *FilterAttrs {
- return &filter.FilterAttrs
+func (action *TunnelKeyAction) Type() string {
+ return "tunnel_key"
}
-func (filter *U32) Type() string {
- return "u32"
+func (action *TunnelKeyAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+func NewTunnelKeyAction() *TunnelKeyAction {
+ return &TunnelKeyAction{
+ ActionAttrs: ActionAttrs{
+ Action: TC_ACT_PIPE,
+ },
+ }
+}
+
+type SkbEditAction struct {
+ ActionAttrs
+ QueueMapping *uint16
+ PType *uint16
+ Priority *uint32
+ Mark *uint32
+}
+
+func (action *SkbEditAction) Type() string {
+ return "skbedit"
+}
+
+func (action *SkbEditAction) Attrs() *ActionAttrs {
+ return &action.ActionAttrs
+}
+
+func NewSkbEditAction() *SkbEditAction {
+ return &SkbEditAction{
+ ActionAttrs: ActionAttrs{
+ Action: TC_ACT_PIPE,
+ },
+ }
}
// MatchAll filters match all packets
@@ -262,6 +296,8 @@ type BpfFilter struct {
Fd int
Name string
DirectAction bool
+ Id int
+ Tag string
}
func (filter *BpfFilter) Type() string {
diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go
index f0eac6b78..c56f314cd 100644
--- a/vendor/github.com/vishvananda/netlink/filter_linux.go
+++ b/vendor/github.com/vishvananda/netlink/filter_linux.go
@@ -3,10 +3,11 @@ package netlink
import (
"bytes"
"encoding/binary"
+ "encoding/hex"
"errors"
"fmt"
+ "net"
"syscall"
- "unsafe"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
@@ -20,6 +21,35 @@ const (
TC_U32_EAT = nl.TC_U32_EAT
)
+// Sel of the U32 filters that contains multiple TcU32Key. This is the type
+// alias and the frontend representation of nl.TcU32Sel. It is serialized into
+// canonical nl.TcU32Sel with the appropriate endianness.
+type TcU32Sel = nl.TcU32Sel
+
+// TcU32Key contained of Sel in the U32 filters. This is the type alias and the
+// frontend representation of nl.TcU32Key. It is serialized into chanonical
+// nl.TcU32Sel with the appropriate endianness.
+type TcU32Key = nl.TcU32Key
+
+// U32 filters on many packet related properties
+type U32 struct {
+ FilterAttrs
+ ClassId uint32
+ Divisor uint32 // Divisor MUST be power of 2.
+ Hash uint32
+ RedirIndex int
+ Sel *TcU32Sel
+ Actions []Action
+}
+
+func (filter *U32) Attrs() *FilterAttrs {
+ return &filter.FilterAttrs
+}
+
+func (filter *U32) Type() string {
+ return "u32"
+}
+
// Fw filter filters on firewall marks
// NOTE: this is in filter_linux because it refers to nl.TcPolice which
// is defined in nl/tc_linux.go
@@ -123,8 +153,24 @@ func FilterAdd(filter Filter) error {
// FilterAdd will add a filter to the system.
// Equivalent to: `tc filter add $filter`
func (h *Handle) FilterAdd(filter Filter) error {
+ return h.filterModify(filter, unix.NLM_F_CREATE|unix.NLM_F_EXCL)
+}
+
+// FilterReplace will replace a filter.
+// Equivalent to: `tc filter replace $filter`
+func FilterReplace(filter Filter) error {
+ return pkgHandle.FilterReplace(filter)
+}
+
+// FilterReplace will replace a filter.
+// Equivalent to: `tc filter replace $filter`
+func (h *Handle) FilterReplace(filter Filter) error {
+ return h.filterModify(filter, unix.NLM_F_CREATE)
+}
+
+func (h *Handle) filterModify(filter Filter, flags int) error {
native = nl.NativeEndian()
- req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK)
+ req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, flags|unix.NLM_F_ACK)
base := filter.Attrs()
msg := &nl.TcMsg{
Family: nl.FAMILY_ALL,
@@ -140,8 +186,7 @@ func (h *Handle) FilterAdd(filter Filter) error {
switch filter := filter.(type) {
case *U32:
- // Convert TcU32Sel into nl.TcU32Sel as it is without copy.
- sel := (*nl.TcU32Sel)(unsafe.Pointer(filter.Sel))
+ sel := filter.Sel
if sel == nil {
// match all
sel = &nl.TcU32Sel{
@@ -168,11 +213,20 @@ func (h *Handle) FilterAdd(filter Filter) error {
}
}
sel.Nkeys = uint8(len(sel.Keys))
- nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize())
+ options.AddRtAttr(nl.TCA_U32_SEL, sel.Serialize())
if filter.ClassId != 0 {
- nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId))
+ options.AddRtAttr(nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId))
+ }
+ if filter.Divisor != 0 {
+ if (filter.Divisor-1)&filter.Divisor != 0 {
+ return fmt.Errorf("illegal divisor %d. Must be a power of 2", filter.Divisor)
+ }
+ options.AddRtAttr(nl.TCA_U32_DIVISOR, nl.Uint32Attr(filter.Divisor))
}
- actionsAttr := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil)
+ if filter.Hash != 0 {
+ options.AddRtAttr(nl.TCA_U32_HASH, nl.Uint32Attr(filter.Hash))
+ }
+ actionsAttr := options.AddRtAttr(nl.TCA_U32_ACT, nil)
// backwards compatibility
if filter.RedirIndex != 0 {
filter.Actions = append([]Action{NewMirredAction(filter.RedirIndex)}, filter.Actions...)
@@ -184,51 +238,51 @@ func (h *Handle) FilterAdd(filter Filter) error {
if filter.Mask != 0 {
b := make([]byte, 4)
native.PutUint32(b, filter.Mask)
- nl.NewRtAttrChild(options, nl.TCA_FW_MASK, b)
+ options.AddRtAttr(nl.TCA_FW_MASK, b)
}
if filter.InDev != "" {
- nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev))
+ options.AddRtAttr(nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev))
}
if (filter.Police != nl.TcPolice{}) {
- police := nl.NewRtAttrChild(options, nl.TCA_FW_POLICE, nil)
- nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, filter.Police.Serialize())
+ police := options.AddRtAttr(nl.TCA_FW_POLICE, nil)
+ police.AddRtAttr(nl.TCA_POLICE_TBF, filter.Police.Serialize())
if (filter.Police.Rate != nl.TcRateSpec{}) {
payload := SerializeRtab(filter.Rtab)
- nl.NewRtAttrChild(police, nl.TCA_POLICE_RATE, payload)
+ police.AddRtAttr(nl.TCA_POLICE_RATE, payload)
}
if (filter.Police.PeakRate != nl.TcRateSpec{}) {
payload := SerializeRtab(filter.Ptab)
- nl.NewRtAttrChild(police, nl.TCA_POLICE_PEAKRATE, payload)
+ police.AddRtAttr(nl.TCA_POLICE_PEAKRATE, payload)
}
}
if filter.ClassId != 0 {
b := make([]byte, 4)
native.PutUint32(b, filter.ClassId)
- nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b)
+ options.AddRtAttr(nl.TCA_FW_CLASSID, b)
}
case *BpfFilter:
var bpfFlags uint32
if filter.ClassId != 0 {
- nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId))
+ options.AddRtAttr(nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId))
}
if filter.Fd >= 0 {
- nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd))))
+ options.AddRtAttr(nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd))))
}
if filter.Name != "" {
- nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name))
+ options.AddRtAttr(nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name))
}
if filter.DirectAction {
bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT
}
- nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags))
+ options.AddRtAttr(nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags))
case *MatchAll:
- actionsAttr := nl.NewRtAttrChild(options, nl.TCA_MATCHALL_ACT, nil)
+ actionsAttr := options.AddRtAttr(nl.TCA_MATCHALL_ACT, nil)
if err := EncodeActions(actionsAttr, filter.Actions); err != nil {
return err
}
if filter.ClassId != 0 {
- nl.NewRtAttrChild(options, nl.TCA_MATCHALL_CLASSID, nl.Uint32Attr(filter.ClassId))
+ options.AddRtAttr(nl.TCA_MATCHALL_CLASSID, nl.Uint32Attr(filter.ClassId))
}
}
@@ -366,34 +420,91 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error {
default:
return fmt.Errorf("unknown action type %s", action.Type())
case *MirredAction:
- table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ table := attr.AddRtAttr(tabIndex, nil)
tabIndex++
- nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("mirred"))
- aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("mirred"))
+ aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil)
mirred := nl.TcMirred{
Eaction: int32(action.MirredAction),
Ifindex: uint32(action.Ifindex),
}
toTcGen(action.Attrs(), &mirred.TcGen)
- nl.NewRtAttrChild(aopts, nl.TCA_MIRRED_PARMS, mirred.Serialize())
+ aopts.AddRtAttr(nl.TCA_MIRRED_PARMS, mirred.Serialize())
+ case *TunnelKeyAction:
+ table := attr.AddRtAttr(tabIndex, nil)
+ tabIndex++
+ table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("tunnel_key"))
+ aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil)
+ tun := nl.TcTunnelKey{
+ Action: int32(action.Action),
+ }
+ toTcGen(action.Attrs(), &tun.TcGen)
+ aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_PARMS, tun.Serialize())
+ if action.Action == TCA_TUNNEL_KEY_SET {
+ aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_KEY_ID, htonl(action.KeyID))
+ if v4 := action.SrcAddr.To4(); v4 != nil {
+ aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC, v4[:])
+ } else if v6 := action.SrcAddr.To16(); v6 != nil {
+ aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC, v6[:])
+ } else {
+ return fmt.Errorf("invalid src addr %s for tunnel_key action", action.SrcAddr)
+ }
+ if v4 := action.DstAddr.To4(); v4 != nil {
+ aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV4_DST, v4[:])
+ } else if v6 := action.DstAddr.To16(); v6 != nil {
+ aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV6_DST, v6[:])
+ } else {
+ return fmt.Errorf("invalid dst addr %s for tunnel_key action", action.DstAddr)
+ }
+ }
+ case *SkbEditAction:
+ table := attr.AddRtAttr(tabIndex, nil)
+ tabIndex++
+ table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("skbedit"))
+ aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil)
+ skbedit := nl.TcSkbEdit{}
+ toTcGen(action.Attrs(), &skbedit.TcGen)
+ aopts.AddRtAttr(nl.TCA_SKBEDIT_PARMS, skbedit.Serialize())
+ if action.QueueMapping != nil {
+ aopts.AddRtAttr(nl.TCA_SKBEDIT_QUEUE_MAPPING, nl.Uint16Attr(*action.QueueMapping))
+ }
+ if action.Priority != nil {
+ aopts.AddRtAttr(nl.TCA_SKBEDIT_PRIORITY, nl.Uint32Attr(*action.Priority))
+ }
+ if action.PType != nil {
+ aopts.AddRtAttr(nl.TCA_SKBEDIT_PTYPE, nl.Uint16Attr(*action.PType))
+ }
+ if action.Mark != nil {
+ aopts.AddRtAttr(nl.TCA_SKBEDIT_MARK, nl.Uint32Attr(*action.Mark))
+ }
+ case *ConnmarkAction:
+ table := attr.AddRtAttr(tabIndex, nil)
+ tabIndex++
+ table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("connmark"))
+ aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil)
+ connmark := nl.TcConnmark{
+ Zone: action.Zone,
+ }
+ toTcGen(action.Attrs(), &connmark.TcGen)
+ aopts.AddRtAttr(nl.TCA_CONNMARK_PARMS, connmark.Serialize())
case *BpfAction:
- table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ table := attr.AddRtAttr(tabIndex, nil)
tabIndex++
- nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("bpf"))
- aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("bpf"))
+ aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil)
gen := nl.TcGen{}
toTcGen(action.Attrs(), &gen)
- nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_PARMS, gen.Serialize())
- nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd)))
- nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name))
+ aopts.AddRtAttr(nl.TCA_ACT_BPF_PARMS, gen.Serialize())
+ aopts.AddRtAttr(nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd)))
+ aopts.AddRtAttr(nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name))
case *GenericAction:
- table := nl.NewRtAttrChild(attr, tabIndex, nil)
+ table := attr.AddRtAttr(tabIndex, nil)
tabIndex++
- nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("gact"))
- aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil)
+ table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("gact"))
+ aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil)
gen := nl.TcGen{}
toTcGen(action.Attrs(), &gen)
- nl.NewRtAttrChild(aopts, nl.TCA_GACT_PARMS, gen.Serialize())
+ aopts.AddRtAttr(nl.TCA_GACT_PARMS, gen.Serialize())
}
}
return nil
@@ -419,8 +530,14 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
action = &MirredAction{}
case "bpf":
action = &BpfAction{}
+ case "connmark":
+ action = &ConnmarkAction{}
case "gact":
action = &GenericAction{}
+ case "tunnel_key":
+ action = &TunnelKeyAction{}
+ case "skbedit":
+ action = &SkbEditAction{}
default:
break nextattr
}
@@ -435,11 +552,46 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
switch adatum.Attr.Type {
case nl.TCA_MIRRED_PARMS:
mirred := *nl.DeserializeTcMirred(adatum.Value)
- toAttrs(&mirred.TcGen, action.Attrs())
action.(*MirredAction).ActionAttrs = ActionAttrs{}
+ toAttrs(&mirred.TcGen, action.Attrs())
action.(*MirredAction).Ifindex = int(mirred.Ifindex)
action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction)
}
+ case "tunnel_key":
+ switch adatum.Attr.Type {
+ case nl.TCA_TUNNEL_KEY_PARMS:
+ tun := *nl.DeserializeTunnelKey(adatum.Value)
+ action.(*TunnelKeyAction).ActionAttrs = ActionAttrs{}
+ toAttrs(&tun.TcGen, action.Attrs())
+ action.(*TunnelKeyAction).Action = TunnelKeyAct(tun.Action)
+ case nl.TCA_TUNNEL_KEY_ENC_KEY_ID:
+ action.(*TunnelKeyAction).KeyID = networkOrder.Uint32(adatum.Value[0:4])
+ case nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC:
+ case nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC:
+ action.(*TunnelKeyAction).SrcAddr = net.IP(adatum.Value[:])
+ case nl.TCA_TUNNEL_KEY_ENC_IPV6_DST:
+ case nl.TCA_TUNNEL_KEY_ENC_IPV4_DST:
+ action.(*TunnelKeyAction).DstAddr = net.IP(adatum.Value[:])
+ }
+ case "skbedit":
+ switch adatum.Attr.Type {
+ case nl.TCA_SKBEDIT_PARMS:
+ skbedit := *nl.DeserializeSkbEdit(adatum.Value)
+ action.(*SkbEditAction).ActionAttrs = ActionAttrs{}
+ toAttrs(&skbedit.TcGen, action.Attrs())
+ case nl.TCA_SKBEDIT_MARK:
+ mark := native.Uint32(adatum.Value[0:4])
+ action.(*SkbEditAction).Mark = &mark
+ case nl.TCA_SKBEDIT_PRIORITY:
+ priority := native.Uint32(adatum.Value[0:4])
+ action.(*SkbEditAction).Priority = &priority
+ case nl.TCA_SKBEDIT_PTYPE:
+ ptype := native.Uint16(adatum.Value[0:2])
+ action.(*SkbEditAction).PType = &ptype
+ case nl.TCA_SKBEDIT_QUEUE_MAPPING:
+ mapping := native.Uint16(adatum.Value[0:2])
+ action.(*SkbEditAction).QueueMapping = &mapping
+ }
case "bpf":
switch adatum.Attr.Type {
case nl.TCA_ACT_BPF_PARMS:
@@ -450,6 +602,14 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) {
case nl.TCA_ACT_BPF_NAME:
action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1])
}
+ case "connmark":
+ switch adatum.Attr.Type {
+ case nl.TCA_CONNMARK_PARMS:
+ connmark := *nl.DeserializeTcConnmark(adatum.Value)
+ action.(*ConnmarkAction).ActionAttrs = ActionAttrs{}
+ toAttrs(&connmark.TcGen, action.Attrs())
+ action.(*ConnmarkAction).Zone = connmark.Zone
+ }
case "gact":
switch adatum.Attr.Type {
case nl.TCA_GACT_PARMS:
@@ -474,7 +634,7 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error)
case nl.TCA_U32_SEL:
detailed = true
sel := nl.DeserializeTcU32Sel(datum.Value)
- u32.Sel = (*TcU32Sel)(unsafe.Pointer(sel))
+ u32.Sel = sel
if native != networkOrder {
// Handle the endianness of attributes
u32.Sel.Offmask = native.Uint16(htons(sel.Offmask))
@@ -500,6 +660,10 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error)
}
case nl.TCA_U32_CLASSID:
u32.ClassId = native.Uint32(datum.Value)
+ case nl.TCA_U32_DIVISOR:
+ u32.Divisor = native.Uint32(datum.Value)
+ case nl.TCA_U32_HASH:
+ u32.Hash = native.Uint32(datum.Value)
}
}
return detailed, nil
@@ -551,6 +715,10 @@ func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error)
if (flags & nl.TCA_BPF_FLAG_ACT_DIRECT) != 0 {
bpf.DirectAction = true
}
+ case nl.TCA_BPF_ID:
+ bpf.Id = int(native.Uint32(datum.Value[0:4]))
+ case nl.TCA_BPF_TAG:
+ bpf.Tag = hex.EncodeToString(datum.Value[:len(datum.Value)-1])
}
}
return detailed, nil
diff --git a/vendor/github.com/vishvananda/netlink/fou_linux.go b/vendor/github.com/vishvananda/netlink/fou_linux.go
index 62d59bd2d..ed55b2b79 100644
--- a/vendor/github.com/vishvananda/netlink/fou_linux.go
+++ b/vendor/github.com/vishvananda/netlink/fou_linux.go
@@ -90,11 +90,7 @@ func (h *Handle) FouAdd(f Fou) error {
req.AddRawData(raw)
_, err = req.Execute(unix.NETLINK_GENERIC, 0)
- if err != nil {
- return err
- }
-
- return nil
+ return err
}
func FouDel(f Fou) error {
diff --git a/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/genetlink_linux.go
index ce7969907..772e5834a 100644
--- a/vendor/github.com/vishvananda/netlink/genetlink_linux.go
+++ b/vendor/github.com/vishvananda/netlink/genetlink_linux.go
@@ -157,6 +157,9 @@ func (h *Handle) GenlFamilyGet(name string) (*GenlFamily, error) {
return nil, err
}
families, err := parseFamilies(msgs)
+ if err != nil {
+ return nil, err
+ }
if len(families) != 1 {
return nil, fmt.Errorf("invalid response for GENL_CTRL_CMD_GETFAMILY")
}
diff --git a/vendor/github.com/vishvananda/netlink/go.mod b/vendor/github.com/vishvananda/netlink/go.mod
new file mode 100644
index 000000000..09ee60e77
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/go.mod
@@ -0,0 +1,8 @@
+module github.com/vishvananda/netlink
+
+go 1.12
+
+require (
+ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df
+ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444
+)
diff --git a/vendor/github.com/vishvananda/netlink/go.sum b/vendor/github.com/vishvananda/netlink/go.sum
new file mode 100644
index 000000000..402d14ec5
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/go.sum
@@ -0,0 +1,4 @@
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444 h1:/d2cWp6PSamH4jDPFLyO150psQdqvtoNX8Zjg3AQ31g=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/vishvananda/netlink/handle_linux.go b/vendor/github.com/vishvananda/netlink/handle_linux.go
index 9f6d7fe0f..26887b759 100644
--- a/vendor/github.com/vishvananda/netlink/handle_linux.go
+++ b/vendor/github.com/vishvananda/netlink/handle_linux.go
@@ -91,7 +91,7 @@ func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) {
return results, nil
}
-// NewHandle returns a netlink handle on the network namespace
+// NewHandleAt returns a netlink handle on the network namespace
// specified by ns. If ns=netns.None(), current network namespace
// will be assumed
func NewHandleAt(ns netns.NsHandle, nlFamilies ...int) (*Handle, error) {
diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
index 915b765de..ef914dcb8 100644
--- a/vendor/github.com/vishvananda/netlink/handle_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
@@ -73,10 +73,18 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error {
return ErrNotImplemented
}
+func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error {
+ return ErrNotImplemented
+}
+
func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error {
return ErrNotImplemented
}
+func (h *Handle) LinkSetVfRate(link Link, vf, minRate, maxRate int) error {
+ return ErrNotImplemented
+}
+
func (h *Handle) LinkSetMaster(link Link, master *Bridge) error {
return ErrNotImplemented
}
@@ -149,6 +157,10 @@ func (h *Handle) LinkSetTxQLen(link Link, qlen int) error {
return ErrNotImplemented
}
+func (h *Handle) LinkSetGroup(link Link, group int) error {
+ return ErrNotImplemented
+}
+
func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
return ErrNotImplemented
}
diff --git a/vendor/github.com/vishvananda/netlink/ioctl_linux.go b/vendor/github.com/vishvananda/netlink/ioctl_linux.go
index a8503126d..4d33db5da 100644
--- a/vendor/github.com/vishvananda/netlink/ioctl_linux.go
+++ b/vendor/github.com/vishvananda/netlink/ioctl_linux.go
@@ -56,18 +56,10 @@ type ethtoolSset struct {
data [1]uint32
}
-// ethtoolGstrings is string set for data tagging
-type ethtoolGstrings struct {
- cmd uint32
- stringSet uint32
- length uint32
- data [32]byte
-}
-
type ethtoolStats struct {
cmd uint32
nStats uint32
- data [1]uint64
+ // Followed by nStats * []uint64.
}
// newIocltSlaveReq returns filled IfreqSlave with proper interface names
diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go
index fe74ffab9..886d88d1b 100644
--- a/vendor/github.com/vishvananda/netlink/link.go
+++ b/vendor/github.com/vishvananda/netlink/link.go
@@ -4,6 +4,7 @@ import (
"fmt"
"net"
"os"
+ "strconv"
)
// Link represents a link device from netlink. Shared link attributes
@@ -41,6 +42,29 @@ type LinkAttrs struct {
NetNsID int
NumTxQueues int
NumRxQueues int
+ GSOMaxSize uint32
+ GSOMaxSegs uint32
+ Vfs []VfInfo // virtual functions available on link
+ Group uint32
+ Slave LinkSlave
+}
+
+// LinkSlave represents a slave device.
+type LinkSlave interface {
+ SlaveType() string
+}
+
+// VfInfo represents configuration of virtual function
+type VfInfo struct {
+ ID int
+ Mac net.HardwareAddr
+ Vlan int
+ Qos int
+ TxRate int // IFLA_VF_TX_RATE Max TxRate
+ Spoofchk bool
+ LinkState uint32
+ MaxTxRate uint32 // IFLA_VF_RATE Max TxRate
+ MinTxRate uint32 // IFLA_VF_RATE Min TxRate
}
// LinkOperState represents the values of the IFLA_OPERSTATE link
@@ -223,6 +247,7 @@ type Bridge struct {
LinkAttrs
MulticastSnooping *bool
HelloTime *uint32
+ VlanFiltering *bool
}
func (bridge *Bridge) Attrs() *LinkAttrs {
@@ -236,7 +261,8 @@ func (bridge *Bridge) Type() string {
// Vlan links have ParentIndex set in their Attrs()
type Vlan struct {
LinkAttrs
- VlanId int
+ VlanId int
+ VlanProtocol VlanProtocol
}
func (vlan *Vlan) Attrs() *LinkAttrs {
@@ -290,10 +316,13 @@ type TuntapFlag uint16
// Tuntap links created via /dev/tun/tap, but can be destroyed via netlink
type Tuntap struct {
LinkAttrs
- Mode TuntapMode
- Flags TuntapFlag
- Queues int
- Fds []*os.File
+ Mode TuntapMode
+ Flags TuntapFlag
+ NonPersist bool
+ Queues int
+ Fds []*os.File
+ Owner uint32
+ Group uint32
}
func (tuntap *Tuntap) Attrs() *LinkAttrs {
@@ -307,7 +336,8 @@ func (tuntap *Tuntap) Type() string {
// Veth devices must specify PeerName on create
type Veth struct {
LinkAttrs
- PeerName string // veth on create only
+ PeerName string // veth on create only
+ PeerHardwareAddr net.HardwareAddr
}
func (veth *Veth) Attrs() *LinkAttrs {
@@ -376,9 +406,18 @@ const (
IPVLAN_MODE_MAX
)
+type IPVlanFlag uint16
+
+const (
+ IPVLAN_FLAG_BRIDGE IPVlanFlag = iota
+ IPVLAN_FLAG_PRIVATE
+ IPVLAN_FLAG_VEPA
+)
+
type IPVlan struct {
LinkAttrs
Mode IPVlanMode
+ Flag IPVlanFlag
}
func (ipvlan *IPVlan) Attrs() *LinkAttrs {
@@ -389,6 +428,43 @@ func (ipvlan *IPVlan) Type() string {
return "ipvlan"
}
+// VlanProtocol type
+type VlanProtocol int
+
+func (p VlanProtocol) String() string {
+ s, ok := VlanProtocolToString[p]
+ if !ok {
+ return fmt.Sprintf("VlanProtocol(%d)", p)
+ }
+ return s
+}
+
+// StringToVlanProtocol returns vlan protocol, or unknown is the s is invalid.
+func StringToVlanProtocol(s string) VlanProtocol {
+ mode, ok := StringToVlanProtocolMap[s]
+ if !ok {
+ return VLAN_PROTOCOL_UNKNOWN
+ }
+ return mode
+}
+
+// VlanProtocol possible values
+const (
+ VLAN_PROTOCOL_UNKNOWN VlanProtocol = 0
+ VLAN_PROTOCOL_8021Q VlanProtocol = 0x8100
+ VLAN_PROTOCOL_8021AD VlanProtocol = 0x88A8
+)
+
+var VlanProtocolToString = map[VlanProtocol]string{
+ VLAN_PROTOCOL_8021Q: "802.1q",
+ VLAN_PROTOCOL_8021AD: "802.1ad",
+}
+
+var StringToVlanProtocolMap = map[string]VlanProtocol{
+ "802.1q": VLAN_PROTOCOL_8021Q,
+ "802.1ad": VLAN_PROTOCOL_8021AD,
+}
+
// BondMode type
type BondMode int
@@ -400,7 +476,7 @@ func (b BondMode) String() string {
return s
}
-// StringToBondMode returns bond mode, or uknonw is the s is invalid.
+// StringToBondMode returns bond mode, or unknown is the s is invalid.
func StringToBondMode(s string) BondMode {
mode, ok := StringToBondModeMap[s]
if !ok {
@@ -491,7 +567,7 @@ func (b BondXmitHashPolicy) String() string {
return s
}
-// StringToBondXmitHashPolicy returns bond lacp arte, or uknonw is the s is invalid.
+// StringToBondXmitHashPolicy returns bond lacp arte, or unknown is the s is invalid.
func StringToBondXmitHashPolicy(s string) BondXmitHashPolicy {
lacp, ok := StringToBondXmitHashPolicyMap[s]
if !ok {
@@ -536,7 +612,7 @@ func (b BondLacpRate) String() string {
return s
}
-// StringToBondLacpRate returns bond lacp arte, or uknonw is the s is invalid.
+// StringToBondLacpRate returns bond lacp arte, or unknown is the s is invalid.
func StringToBondLacpRate(s string) BondLacpRate {
lacp, ok := StringToBondLacpRateMap[s]
if !ok {
@@ -680,6 +756,67 @@ func (bond *Bond) Type() string {
return "bond"
}
+// BondSlaveState represents the values of the IFLA_BOND_SLAVE_STATE bond slave
+// attribute, which contains the state of the bond slave.
+type BondSlaveState uint8
+
+const (
+ BondStateActive = iota // Link is active.
+ BondStateBackup // Link is backup.
+)
+
+func (s BondSlaveState) String() string {
+ switch s {
+ case BondStateActive:
+ return "ACTIVE"
+ case BondStateBackup:
+ return "BACKUP"
+ default:
+ return strconv.Itoa(int(s))
+ }
+}
+
+// BondSlaveState represents the values of the IFLA_BOND_SLAVE_MII_STATUS bond slave
+// attribute, which contains the status of MII link monitoring
+type BondSlaveMiiStatus uint8
+
+const (
+ BondLinkUp = iota // link is up and running.
+ BondLinkFail // link has just gone down.
+ BondLinkDown // link has been down for too long time.
+ BondLinkBack // link is going back.
+)
+
+func (s BondSlaveMiiStatus) String() string {
+ switch s {
+ case BondLinkUp:
+ return "UP"
+ case BondLinkFail:
+ return "GOING_DOWN"
+ case BondLinkDown:
+ return "DOWN"
+ case BondLinkBack:
+ return "GOING_BACK"
+ default:
+ return strconv.Itoa(int(s))
+ }
+}
+
+type BondSlave struct {
+ State BondSlaveState
+ MiiStatus BondSlaveMiiStatus
+ LinkFailureCount uint32
+ PermHardwareAddr net.HardwareAddr
+ QueueId uint16
+ AggregatorId uint16
+ AdActorOperPortState uint8
+ AdPartnerOperPortState uint16
+}
+
+func (b *BondSlave) SlaveType() string {
+ return "bond"
+}
+
// Gretap devices must specify LocalIP and RemoteIP on create
type Gretap struct {
LinkAttrs
@@ -734,6 +871,27 @@ func (iptun *Iptun) Type() string {
return "ipip"
}
+type Ip6tnl struct {
+ LinkAttrs
+ Link uint32
+ Local net.IP
+ Remote net.IP
+ Ttl uint8
+ Tos uint8
+ EncapLimit uint8
+ Flags uint32
+ Proto uint8
+ FlowInfo uint32
+}
+
+func (ip6tnl *Ip6tnl) Attrs() *LinkAttrs {
+ return &ip6tnl.LinkAttrs
+}
+
+func (ip6tnl *Ip6tnl) Type() string {
+ return "ip6tnl"
+}
+
type Sittun struct {
LinkAttrs
Link uint32
@@ -769,7 +927,10 @@ func (vti *Vti) Attrs() *LinkAttrs {
return &vti.LinkAttrs
}
-func (iptun *Vti) Type() string {
+func (vti *Vti) Type() string {
+ if vti.Local.To4() == nil {
+ return "vti6"
+ }
return "vti"
}
@@ -831,11 +992,68 @@ func (gtp *GTP) Type() string {
return "gtp"
}
+// Virtual XFRM Interfaces
+// Named "xfrmi" to prevent confusion with XFRM objects
+type Xfrmi struct {
+ LinkAttrs
+ Ifid uint32
+}
+
+func (xfrm *Xfrmi) Attrs() *LinkAttrs {
+ return &xfrm.LinkAttrs
+}
+
+func (xfrm *Xfrmi) Type() string {
+ return "xfrm"
+}
+
+// IPoIB interface
+
+type IPoIBMode uint16
+
+func (m *IPoIBMode) String() string {
+ str, ok := iPoIBModeToString[*m]
+ if !ok {
+ return fmt.Sprintf("mode(%d)", *m)
+ }
+ return str
+}
+
+const (
+ IPOIB_MODE_DATAGRAM = iota
+ IPOIB_MODE_CONNECTED
+)
+
+var iPoIBModeToString = map[IPoIBMode]string{
+ IPOIB_MODE_DATAGRAM: "datagram",
+ IPOIB_MODE_CONNECTED: "connected",
+}
+
+var StringToIPoIBMode = map[string]IPoIBMode{
+ "datagram": IPOIB_MODE_DATAGRAM,
+ "connected": IPOIB_MODE_CONNECTED,
+}
+
+type IPoIB struct {
+ LinkAttrs
+ Pkey uint16
+ Mode IPoIBMode
+ Umcast uint16
+}
+
+func (ipoib *IPoIB) Attrs() *LinkAttrs {
+ return &ipoib.LinkAttrs
+}
+
+func (ipoib *IPoIB) Type() string {
+ return "ipoib"
+}
+
// iproute2 supported devices;
// vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
// bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan |
-// gre | gretap | ip6gre | ip6gretap | vti | nlmon |
-// bond_slave | ipvlan
+// gre | gretap | ip6gre | ip6gretap | vti | vti6 | nlmon |
+// bond_slave | ipvlan | xfrm
// LinkNotFoundError wraps the various not found errors when
// getting/reading links. This is intended for better error
diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go
index 540191ed8..ec915a0b9 100644
--- a/vendor/github.com/vishvananda/netlink/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/link_linux.go
@@ -4,8 +4,11 @@ import (
"bytes"
"encoding/binary"
"fmt"
+ "io/ioutil"
"net"
"os"
+ "strconv"
+ "strings"
"syscall"
"unsafe"
@@ -16,7 +19,7 @@ import (
const (
SizeofLinkStats32 = 0x5c
- SizeofLinkStats64 = 0xd8
+ SizeofLinkStats64 = 0xb8
)
const (
@@ -31,6 +34,12 @@ const (
TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI
)
+const (
+ VF_LINK_STATE_AUTO uint32 = 0
+ VF_LINK_STATE_ENABLE uint32 = 1
+ VF_LINK_STATE_DISABLE uint32 = 2
+)
+
var lookupByDump = false
var macvlanModes = [...]uint32{
@@ -113,6 +122,52 @@ func (h *Handle) SetPromiscOn(link Link) error {
return err
}
+// LinkSetAllmulticastOn enables the reception of all hardware multicast packets for the link device.
+// Equivalent to: `ip link set $link allmulticast on`
+func LinkSetAllmulticastOn(link Link) error {
+ return pkgHandle.LinkSetAllmulticastOn(link)
+}
+
+// LinkSetAllmulticastOn enables the reception of all hardware multicast packets for the link device.
+// Equivalent to: `ip link set $link allmulticast on`
+func (h *Handle) LinkSetAllmulticastOn(link Link) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Change = unix.IFF_ALLMULTI
+ msg.Flags = unix.IFF_ALLMULTI
+
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetAllmulticastOff disables the reception of all hardware multicast packets for the link device.
+// Equivalent to: `ip link set $link allmulticast off`
+func LinkSetAllmulticastOff(link Link) error {
+ return pkgHandle.LinkSetAllmulticastOff(link)
+}
+
+// LinkSetAllmulticastOff disables the reception of all hardware multicast packets for the link device.
+// Equivalent to: `ip link set $link allmulticast off`
+func (h *Handle) LinkSetAllmulticastOff(link Link) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Change = unix.IFF_ALLMULTI
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error {
return pkgHandle.MacvlanMACAddrAdd(link, addr)
}
@@ -155,24 +210,24 @@ func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode
req.AddData(msg)
linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil)
- nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
- inner := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
+ inner := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
// IFLA_MACVLAN_MACADDR_MODE = mode
b := make([]byte, 4)
native.PutUint32(b, mode)
- nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_MODE, b)
+ inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR_MODE, b)
// populate message with MAC addrs, if necessary
switch mode {
case nl.MACVLAN_MACADDR_ADD, nl.MACVLAN_MACADDR_DEL:
if len(addrs) == 1 {
- nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0]))
+ inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0]))
}
case nl.MACVLAN_MACADDR_SET:
- mad := nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_DATA, nil)
+ mad := inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR_DATA, nil)
for _, addr := range addrs {
- nl.NewRtAttrChild(mad, nl.IFLA_MACVLAN_MACADDR, []byte(addr))
+ mad.AddRtAttr(nl.IFLA_MACVLAN_MACADDR, []byte(addr))
}
}
@@ -203,7 +258,6 @@ func (h *Handle) SetPromiscOff(link Link) error {
msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
msg.Change = unix.IFF_PROMISC
- msg.Flags = 0 & ^unix.IFF_PROMISC
msg.Index = int32(base.Index)
req.AddData(msg)
@@ -253,7 +307,6 @@ func (h *Handle) LinkSetDown(link Link) error {
msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
msg.Change = unix.IFF_UP
- msg.Flags = 0 & ^unix.IFF_UP
msg.Index = int32(base.Index)
req.AddData(msg)
@@ -378,12 +431,12 @@ func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAdd
req.AddData(msg)
data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
- info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ info := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
vfmsg := nl.VfMac{
Vf: uint32(vf),
}
copy(vfmsg.Mac[:], []byte(hwaddr))
- nl.NewRtAttrChild(info, nl.IFLA_VF_MAC, vfmsg.Serialize())
+ info.AddRtAttr(nl.IFLA_VF_MAC, vfmsg.Serialize())
req.AddData(data)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
@@ -408,10 +461,41 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error {
req.AddData(msg)
data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
+ info := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
+ vfmsg := nl.VfVlan{
+ Vf: uint32(vf),
+ Vlan: uint32(vlan),
+ }
+ info.AddRtAttr(nl.IFLA_VF_VLAN, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetVfVlanQos sets the vlan and qos priority of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos`
+func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error {
+ return pkgHandle.LinkSetVfVlanQos(link, vf, vlan, qos)
+}
+
+// LinkSetVfVlanQos sets the vlan and qos priority of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos`
+func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
vfmsg := nl.VfVlan{
Vf: uint32(vf),
Vlan: uint32(vlan),
+ Qos: uint32(qos),
}
nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize())
req.AddData(data)
@@ -438,12 +522,73 @@ func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error {
req.AddData(msg)
data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
- info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ info := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
vfmsg := nl.VfTxRate{
Vf: uint32(vf),
Rate: uint32(rate),
}
- nl.NewRtAttrChild(info, nl.IFLA_VF_TX_RATE, vfmsg.Serialize())
+ info.AddRtAttr(nl.IFLA_VF_TX_RATE, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetVfRate sets the min and max tx rate of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf min_tx_rate $min_rate max_tx_rate $max_rate`
+func LinkSetVfRate(link Link, vf, minRate, maxRate int) error {
+ return pkgHandle.LinkSetVfRate(link, vf, minRate, maxRate)
+}
+
+// LinkSetVfRate sets the min and max tx rate of a vf for the link.
+// Equivalent to: `ip link set $link vf $vf min_tx_rate $min_rate max_tx_rate $max_rate`
+func (h *Handle) LinkSetVfRate(link Link, vf, minRate, maxRate int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
+ info := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
+ vfmsg := nl.VfRate{
+ Vf: uint32(vf),
+ MinTxRate: uint32(minRate),
+ MaxTxRate: uint32(maxRate),
+ }
+ info.AddRtAttr(nl.IFLA_VF_RATE, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetVfState enables/disables virtual link state on a vf.
+// Equivalent to: `ip link set $link vf $vf state $state`
+func LinkSetVfState(link Link, vf int, state uint32) error {
+ return pkgHandle.LinkSetVfState(link, vf, state)
+}
+
+// LinkSetVfState enables/disables virtual link state on a vf.
+// Equivalent to: `ip link set $link vf $vf state $state`
+func (h *Handle) LinkSetVfState(link Link, vf int, state uint32) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
+ info := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
+ vfmsg := nl.VfLinkState{
+ Vf: uint32(vf),
+ LinkState: state,
+ }
+ info.AddRtAttr(nl.IFLA_VF_LINK_STATE, vfmsg.Serialize())
req.AddData(data)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
@@ -456,7 +601,7 @@ func LinkSetVfSpoofchk(link Link, vf int, check bool) error {
return pkgHandle.LinkSetVfSpoofchk(link, vf, check)
}
-// LinkSetVfSpookfchk enables/disables spoof check on a vf for the link.
+// LinkSetVfSpoofchk enables/disables spoof check on a vf for the link.
// Equivalent to: `ip link set $link vf $vf spoofchk $check`
func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error {
var setting uint32
@@ -469,7 +614,7 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error {
req.AddData(msg)
data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
- info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ info := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
if check {
setting = 1
}
@@ -477,7 +622,7 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error {
Vf: uint32(vf),
Setting: setting,
}
- nl.NewRtAttrChild(info, nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize())
+ info.AddRtAttr(nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize())
req.AddData(data)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
@@ -503,7 +648,7 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error {
req.AddData(msg)
data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
- info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ info := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
if state {
setting = 1
}
@@ -511,22 +656,66 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error {
Vf: uint32(vf),
Setting: setting,
}
- nl.NewRtAttrChild(info, nl.IFLA_VF_TRUST, vfmsg.Serialize())
+ info.AddRtAttr(nl.IFLA_VF_TRUST, vfmsg.Serialize())
req.AddData(data)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
return err
}
+// LinkSetVfNodeGUID sets the node GUID of a vf for the link.
+// Equivalent to: `ip link set dev $link vf $vf node_guid $nodeguid`
+func LinkSetVfNodeGUID(link Link, vf int, nodeguid net.HardwareAddr) error {
+ return pkgHandle.LinkSetVfGUID(link, vf, nodeguid, nl.IFLA_VF_IB_NODE_GUID)
+}
+
+// LinkSetVfPortGUID sets the port GUID of a vf for the link.
+// Equivalent to: `ip link set dev $link vf $vf port_guid $portguid`
+func LinkSetVfPortGUID(link Link, vf int, portguid net.HardwareAddr) error {
+ return pkgHandle.LinkSetVfGUID(link, vf, portguid, nl.IFLA_VF_IB_PORT_GUID)
+}
+
+// LinkSetVfGUID sets the node or port GUID of a vf for the link.
+func (h *Handle) LinkSetVfGUID(link Link, vf int, vfGuid net.HardwareAddr, guidType int) error {
+ var err error
+ var guid uint64
+
+ buf := bytes.NewBuffer(vfGuid)
+ err = binary.Read(buf, binary.BigEndian, &guid)
+ if err != nil {
+ return err
+ }
+
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil)
+ info := data.AddRtAttr(nl.IFLA_VF_INFO, nil)
+ vfmsg := nl.VfGUID{
+ Vf: uint32(vf),
+ GUID: guid,
+ }
+ info.AddRtAttr(guidType, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err = req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
// LinkSetMaster sets the master of the link device.
// Equivalent to: `ip link set $link master $master`
-func LinkSetMaster(link Link, master *Bridge) error {
+func LinkSetMaster(link Link, master Link) error {
return pkgHandle.LinkSetMaster(link, master)
}
// LinkSetMaster sets the master of the link device.
// Equivalent to: `ip link set $link master $master`
-func (h *Handle) LinkSetMaster(link Link, master *Bridge) error {
+func (h *Handle) LinkSetMaster(link Link, master Link) error {
index := 0
if master != nil {
masterBase := master.Attrs()
@@ -672,69 +861,69 @@ type vxlanPortRange struct {
}
func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
if vxlan.FlowBased {
vxlan.VxlanId = 0
}
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
+ data.AddRtAttr(nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
if vxlan.VtepDevIndex != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
+ data.AddRtAttr(nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
}
if vxlan.SrcAddr != nil {
ip := vxlan.SrcAddr.To4()
if ip != nil {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL, []byte(ip))
+ data.AddRtAttr(nl.IFLA_VXLAN_LOCAL, []byte(ip))
} else {
ip = vxlan.SrcAddr.To16()
if ip != nil {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL6, []byte(ip))
+ data.AddRtAttr(nl.IFLA_VXLAN_LOCAL6, []byte(ip))
}
}
}
if vxlan.Group != nil {
group := vxlan.Group.To4()
if group != nil {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP, []byte(group))
+ data.AddRtAttr(nl.IFLA_VXLAN_GROUP, []byte(group))
} else {
group = vxlan.Group.To16()
if group != nil {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP6, []byte(group))
+ data.AddRtAttr(nl.IFLA_VXLAN_GROUP6, []byte(group))
}
}
}
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL)))
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS)))
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning))
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy))
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC))
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss))
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss))
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx))
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx))
+ data.AddRtAttr(nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL)))
+ data.AddRtAttr(nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS)))
+ data.AddRtAttr(nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning))
+ data.AddRtAttr(nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy))
+ data.AddRtAttr(nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC))
+ data.AddRtAttr(nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss))
+ data.AddRtAttr(nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss))
+ data.AddRtAttr(nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx))
+ data.AddRtAttr(nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx))
if vxlan.UDPCSum {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum))
+ data.AddRtAttr(nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum))
}
if vxlan.GBP {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, []byte{})
+ data.AddRtAttr(nl.IFLA_VXLAN_GBP, []byte{})
}
if vxlan.FlowBased {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_FLOWBASED, boolAttr(vxlan.FlowBased))
+ data.AddRtAttr(nl.IFLA_VXLAN_FLOWBASED, boolAttr(vxlan.FlowBased))
}
if vxlan.NoAge {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
+ data.AddRtAttr(nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
} else if vxlan.Age > 0 {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age)))
+ data.AddRtAttr(nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age)))
}
if vxlan.Limit > 0 {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit)))
+ data.AddRtAttr(nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit)))
}
if vxlan.Port > 0 {
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, htons(uint16(vxlan.Port)))
+ data.AddRtAttr(nl.IFLA_VXLAN_PORT, htons(uint16(vxlan.Port)))
}
if vxlan.PortLow > 0 || vxlan.PortHigh > 0 {
pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)}
@@ -742,100 +931,100 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, &pr)
- nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes())
+ data.AddRtAttr(nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes())
}
}
func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
if bond.Mode >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_MODE, nl.Uint8Attr(uint8(bond.Mode)))
+ data.AddRtAttr(nl.IFLA_BOND_MODE, nl.Uint8Attr(uint8(bond.Mode)))
}
if bond.ActiveSlave >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_ACTIVE_SLAVE, nl.Uint32Attr(uint32(bond.ActiveSlave)))
+ data.AddRtAttr(nl.IFLA_BOND_ACTIVE_SLAVE, nl.Uint32Attr(uint32(bond.ActiveSlave)))
}
if bond.Miimon >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_MIIMON, nl.Uint32Attr(uint32(bond.Miimon)))
+ data.AddRtAttr(nl.IFLA_BOND_MIIMON, nl.Uint32Attr(uint32(bond.Miimon)))
}
if bond.UpDelay >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_UPDELAY, nl.Uint32Attr(uint32(bond.UpDelay)))
+ data.AddRtAttr(nl.IFLA_BOND_UPDELAY, nl.Uint32Attr(uint32(bond.UpDelay)))
}
if bond.DownDelay >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_DOWNDELAY, nl.Uint32Attr(uint32(bond.DownDelay)))
+ data.AddRtAttr(nl.IFLA_BOND_DOWNDELAY, nl.Uint32Attr(uint32(bond.DownDelay)))
}
if bond.UseCarrier >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_USE_CARRIER, nl.Uint8Attr(uint8(bond.UseCarrier)))
+ data.AddRtAttr(nl.IFLA_BOND_USE_CARRIER, nl.Uint8Attr(uint8(bond.UseCarrier)))
}
if bond.ArpInterval >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_INTERVAL, nl.Uint32Attr(uint32(bond.ArpInterval)))
+ data.AddRtAttr(nl.IFLA_BOND_ARP_INTERVAL, nl.Uint32Attr(uint32(bond.ArpInterval)))
}
if bond.ArpIpTargets != nil {
- msg := nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_IP_TARGET, nil)
+ msg := data.AddRtAttr(nl.IFLA_BOND_ARP_IP_TARGET, nil)
for i := range bond.ArpIpTargets {
ip := bond.ArpIpTargets[i].To4()
if ip != nil {
- nl.NewRtAttrChild(msg, i, []byte(ip))
+ msg.AddRtAttr(i, []byte(ip))
continue
}
ip = bond.ArpIpTargets[i].To16()
if ip != nil {
- nl.NewRtAttrChild(msg, i, []byte(ip))
+ msg.AddRtAttr(i, []byte(ip))
}
}
}
if bond.ArpValidate >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_VALIDATE, nl.Uint32Attr(uint32(bond.ArpValidate)))
+ data.AddRtAttr(nl.IFLA_BOND_ARP_VALIDATE, nl.Uint32Attr(uint32(bond.ArpValidate)))
}
if bond.ArpAllTargets >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_ALL_TARGETS, nl.Uint32Attr(uint32(bond.ArpAllTargets)))
+ data.AddRtAttr(nl.IFLA_BOND_ARP_ALL_TARGETS, nl.Uint32Attr(uint32(bond.ArpAllTargets)))
}
if bond.Primary >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY, nl.Uint32Attr(uint32(bond.Primary)))
+ data.AddRtAttr(nl.IFLA_BOND_PRIMARY, nl.Uint32Attr(uint32(bond.Primary)))
}
if bond.PrimaryReselect >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY_RESELECT, nl.Uint8Attr(uint8(bond.PrimaryReselect)))
+ data.AddRtAttr(nl.IFLA_BOND_PRIMARY_RESELECT, nl.Uint8Attr(uint8(bond.PrimaryReselect)))
}
if bond.FailOverMac >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_FAIL_OVER_MAC, nl.Uint8Attr(uint8(bond.FailOverMac)))
+ data.AddRtAttr(nl.IFLA_BOND_FAIL_OVER_MAC, nl.Uint8Attr(uint8(bond.FailOverMac)))
}
if bond.XmitHashPolicy >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_XMIT_HASH_POLICY, nl.Uint8Attr(uint8(bond.XmitHashPolicy)))
+ data.AddRtAttr(nl.IFLA_BOND_XMIT_HASH_POLICY, nl.Uint8Attr(uint8(bond.XmitHashPolicy)))
}
if bond.ResendIgmp >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_RESEND_IGMP, nl.Uint32Attr(uint32(bond.ResendIgmp)))
+ data.AddRtAttr(nl.IFLA_BOND_RESEND_IGMP, nl.Uint32Attr(uint32(bond.ResendIgmp)))
}
if bond.NumPeerNotif >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_NUM_PEER_NOTIF, nl.Uint8Attr(uint8(bond.NumPeerNotif)))
+ data.AddRtAttr(nl.IFLA_BOND_NUM_PEER_NOTIF, nl.Uint8Attr(uint8(bond.NumPeerNotif)))
}
if bond.AllSlavesActive >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_ALL_SLAVES_ACTIVE, nl.Uint8Attr(uint8(bond.AllSlavesActive)))
+ data.AddRtAttr(nl.IFLA_BOND_ALL_SLAVES_ACTIVE, nl.Uint8Attr(uint8(bond.AllSlavesActive)))
}
if bond.MinLinks >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_MIN_LINKS, nl.Uint32Attr(uint32(bond.MinLinks)))
+ data.AddRtAttr(nl.IFLA_BOND_MIN_LINKS, nl.Uint32Attr(uint32(bond.MinLinks)))
}
if bond.LpInterval >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval)))
+ data.AddRtAttr(nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval)))
}
if bond.PackersPerSlave >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave)))
+ data.AddRtAttr(nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave)))
}
if bond.LacpRate >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate)))
+ data.AddRtAttr(nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate)))
}
if bond.AdSelect >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_SELECT, nl.Uint8Attr(uint8(bond.AdSelect)))
+ data.AddRtAttr(nl.IFLA_BOND_AD_SELECT, nl.Uint8Attr(uint8(bond.AdSelect)))
}
if bond.AdActorSysPrio >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_ACTOR_SYS_PRIO, nl.Uint16Attr(uint16(bond.AdActorSysPrio)))
+ data.AddRtAttr(nl.IFLA_BOND_AD_ACTOR_SYS_PRIO, nl.Uint16Attr(uint16(bond.AdActorSysPrio)))
}
if bond.AdUserPortKey >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_USER_PORT_KEY, nl.Uint16Attr(uint16(bond.AdUserPortKey)))
+ data.AddRtAttr(nl.IFLA_BOND_AD_USER_PORT_KEY, nl.Uint16Attr(uint16(bond.AdUserPortKey)))
}
if bond.AdActorSystem != nil {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_ACTOR_SYSTEM, []byte(bond.AdActorSystem))
+ data.AddRtAttr(nl.IFLA_BOND_AD_ACTOR_SYSTEM, []byte(bond.AdActorSystem))
}
if bond.TlbDynamicLb >= 0 {
- nl.NewRtAttrChild(data, nl.IFLA_BOND_TLB_DYNAMIC_LB, nl.Uint8Attr(uint8(bond.TlbDynamicLb)))
+ data.AddRtAttr(nl.IFLA_BOND_TLB_DYNAMIC_LB, nl.Uint8Attr(uint8(bond.TlbDynamicLb)))
}
}
@@ -853,7 +1042,7 @@ func LinkAdd(link Link) error {
}
// LinkAdd adds a new link device. The type and features of the device
-// are taken fromt the parameters in the link object.
+// are taken from the parameters in the link object.
// Equivalent to: `ip link add $link`
func (h *Handle) LinkAdd(link Link) error {
return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK)
@@ -863,16 +1052,18 @@ func (h *Handle) linkModify(link Link, flags int) error {
// TODO: support extra data for macvlan
base := link.Attrs()
- if base.Name == "" {
- return fmt.Errorf("LinkAttrs.Name cannot be empty!")
+ // if tuntap, then the name can be empty, OS will provide a name
+ tuntap, isTuntap := link.(*Tuntap)
+
+ if base.Name == "" && !isTuntap {
+ return fmt.Errorf("LinkAttrs.Name cannot be empty")
}
- if tuntap, ok := link.(*Tuntap); ok {
+ if isTuntap {
// TODO: support user
// TODO: support group
- // TODO: support non- persistent
if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP {
- return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode)
+ return fmt.Errorf("Tuntap.Mode %v unknown", tuntap.Mode)
}
queues := tuntap.Queues
@@ -913,12 +1104,25 @@ func (h *Handle) linkModify(link Link, flags int) error {
cleanupFds(fds)
return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno)
}
+ // 1) we only care for the name of the first tap in the multi queue set
+ // 2) if the original name was empty, the localReq has now the actual name
+ //
+ // In addition:
+ // This ensures that the link name is always identical to what the kernel returns.
+ // Not only in case of an empty name, but also when using name templates.
+ // e.g. when the provided name is "tap%d", the kernel replaces %d with the next available number.
+ if i == 0 {
+ link.Attrs().Name = strings.Trim(string(localReq.Name[:]), "\x00")
+ }
}
- _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1)
- if errno != 0 {
- cleanupFds(fds)
- return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno)
+ // only persist interface if NonPersist is NOT set
+ if !tuntap.NonPersist {
+ _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1)
+ if errno != 0 {
+ cleanupFds(fds)
+ return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno)
+ }
}
h.ensureIndex(base)
@@ -928,7 +1132,11 @@ func (h *Handle) linkModify(link Link, flags int) error {
// TODO: verify MasterIndex is actually a bridge?
err := h.LinkSetMasterByIndex(link, base.MasterIndex)
if err != nil {
- _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0)
+ // un-persist (e.g. allow the interface to be removed) the tuntap
+ // should not hurt if not set prior, condition might be not needed
+ if !tuntap.NonPersist {
+ _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0)
+ }
cleanupFds(fds)
return err
}
@@ -978,8 +1186,8 @@ func (h *Handle) linkModify(link Link, flags int) error {
native.PutUint32(b, uint32(base.ParentIndex))
data := nl.NewRtAttr(unix.IFLA_LINK, b)
req.AddData(data)
- } else if link.Type() == "ipvlan" {
- return fmt.Errorf("Can't create ipvlan link without ParentIndex")
+ } else if link.Type() == "ipvlan" || link.Type() == "ipoib" {
+ return fmt.Errorf("Can't create %s link without ParentIndex", link.Type())
}
nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name))
@@ -1010,14 +1218,29 @@ func (h *Handle) linkModify(link Link, flags int) error {
req.AddData(rxqueues)
}
+ if base.GSOMaxSegs > 0 {
+ gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(base.GSOMaxSegs))
+ req.AddData(gsoAttr)
+ }
+
+ if base.GSOMaxSize > 0 {
+ gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(base.GSOMaxSize))
+ req.AddData(gsoAttr)
+ }
+
+ if base.Group > 0 {
+ groupAttr := nl.NewRtAttr(unix.IFLA_GROUP, nl.Uint32Attr(base.Group))
+ req.AddData(groupAttr)
+ }
+
if base.Namespace != nil {
var attr *nl.RtAttr
- switch base.Namespace.(type) {
+ switch ns := base.Namespace.(type) {
case NsPid:
- val := nl.Uint32Attr(uint32(base.Namespace.(NsPid)))
+ val := nl.Uint32Attr(uint32(ns))
attr = nl.NewRtAttr(unix.IFLA_NET_NS_PID, val)
case NsFd:
- val := nl.Uint32Attr(uint32(base.Namespace.(NsFd)))
+ val := nl.Uint32Attr(uint32(ns))
attr = nl.NewRtAttr(unix.IFLA_NET_NS_FD, val)
}
@@ -1029,47 +1252,56 @@ func (h *Handle) linkModify(link Link, flags int) error {
}
linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil)
- nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
+ linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
switch link := link.(type) {
case *Vlan:
b := make([]byte, 2)
native.PutUint16(b, uint16(link.VlanId))
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ data.AddRtAttr(nl.IFLA_VLAN_ID, b)
+
+ if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN {
+ data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol)))
+ }
case *Veth:
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil)
nl.NewIfInfomsgChild(peer, unix.AF_UNSPEC)
- nl.NewRtAttrChild(peer, unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName))
+ peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName))
if base.TxQLen >= 0 {
- nl.NewRtAttrChild(peer, unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
+ peer.AddRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
}
if base.MTU > 0 {
- nl.NewRtAttrChild(peer, unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+ peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+ }
+ if link.PeerHardwareAddr != nil {
+ peer.AddRtAttr(unix.IFLA_ADDRESS, []byte(link.PeerHardwareAddr))
}
-
case *Vxlan:
addVxlanAttrs(link, linkInfo)
case *Bond:
addBondAttrs(link, linkInfo)
case *IPVlan:
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode)))
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode)))
+ data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag)))
case *Macvlan:
if link.Mode != MACVLAN_MODE_DEFAULT {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
}
case *Macvtap:
if link.Mode != MACVLAN_MODE_DEFAULT {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
}
case *Gretap:
addGretapAttrs(link, linkInfo)
case *Iptun:
addIptunAttrs(link, linkInfo)
+ case *Ip6tnl:
+ addIp6tnlAttrs(link, linkInfo)
case *Sittun:
addSittunAttrs(link, linkInfo)
case *Gretun:
@@ -1082,6 +1314,10 @@ func (h *Handle) linkModify(link Link, flags int) error {
addBridgeAttrs(link, linkInfo)
case *GTP:
addGTPAttrs(link, linkInfo)
+ case *Xfrmi:
+ addXfrmiAttrs(link, linkInfo)
+ case *IPoIB:
+ addIPoIBAttrs(link, linkInfo)
}
req.AddData(linkInfo)
@@ -1170,6 +1406,9 @@ func (h *Handle) LinkByName(name string) (Link, error) {
msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
req.AddData(msg)
+ attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF))
+ req.AddData(attr)
+
nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name))
req.AddData(nameData)
@@ -1202,6 +1441,9 @@ func (h *Handle) LinkByAlias(alias string) (Link, error) {
msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
req.AddData(msg)
+ attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF))
+ req.AddData(attr)
+
nameData := nl.NewRtAttr(unix.IFLA_IFALIAS, nl.ZeroTerminated(alias))
req.AddData(nameData)
@@ -1228,6 +1470,8 @@ func (h *Handle) LinkByIndex(index int) (Link, error) {
msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
msg.Index = int32(index)
req.AddData(msg)
+ attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF))
+ req.AddData(attr)
return execGetLink(req)
}
@@ -1270,10 +1514,12 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
base.Promisc = 1
}
var (
- link Link
- stats32 []byte
- stats64 []byte
- linkType string
+ link Link
+ stats32 *LinkStatistics32
+ stats64 *LinkStatistics64
+ linkType string
+ linkSlave LinkSlave
+ slaveType string
)
for _, attr := range attrs {
switch attr.Attr.Type {
@@ -1313,18 +1559,26 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
link = &Gretap{}
case "ipip":
link = &Iptun{}
+ case "ip6tnl":
+ link = &Ip6tnl{}
case "sit":
link = &Sittun{}
case "gre":
link = &Gretun{}
case "ip6gre":
link = &Gretun{}
- case "vti":
+ case "vti", "vti6":
link = &Vti{}
case "vrf":
link = &Vrf{}
case "gtp":
link = &GTP{}
+ case "xfrm":
+ link = &Xfrmi{}
+ case "tun":
+ link = &Tuntap{}
+ case "ipoib":
+ link = &IPoIB{}
default:
link = &GenericLink{LinkType: linkType}
}
@@ -1352,13 +1606,15 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
parseGretapData(link, data)
case "ipip":
parseIptunData(link, data)
+ case "ip6tnl":
+ parseIp6tnlData(link, data)
case "sit":
parseSittunData(link, data)
case "gre":
parseGretunData(link, data)
case "ip6gre":
parseGretunData(link, data)
- case "vti":
+ case "vti", "vti6":
parseVtiData(link, data)
case "vrf":
parseVrfData(link, data)
@@ -1366,6 +1622,27 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
parseBridgeData(link, data)
case "gtp":
parseGTPData(link, data)
+ case "xfrm":
+ parseXfrmiData(link, data)
+ case "tun":
+ parseTuntapData(link, data)
+ case "ipoib":
+ parseIPoIBData(link, data)
+ }
+ case nl.IFLA_INFO_SLAVE_KIND:
+ slaveType = string(info.Value[:len(info.Value)-1])
+ switch slaveType {
+ case "bond":
+ linkSlave = &BondSlave{}
+ }
+ case nl.IFLA_INFO_SLAVE_DATA:
+ switch slaveType {
+ case "bond":
+ data, err := nl.ParseRouteAttr(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ parseBondSlaveData(linkSlave, data)
}
}
}
@@ -1392,9 +1669,15 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
case unix.IFLA_IFALIAS:
base.Alias = string(attr.Value[:len(attr.Value)-1])
case unix.IFLA_STATS:
- stats32 = attr.Value[:]
+ stats32 = new(LinkStatistics32)
+ if err := binary.Read(bytes.NewBuffer(attr.Value[:]), nl.NativeEndian(), stats32); err != nil {
+ return nil, err
+ }
case unix.IFLA_STATS64:
- stats64 = attr.Value[:]
+ stats64 = new(LinkStatistics64)
+ if err := binary.Read(bytes.NewBuffer(attr.Value[:]), nl.NativeEndian(), stats64); err != nil {
+ return nil, err
+ }
case unix.IFLA_XDP:
xdp, err := parseLinkXdp(attr.Value[:])
if err != nil {
@@ -1408,19 +1691,40 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
if err != nil {
return nil, err
}
- base.Protinfo = parseProtinfo(attrs)
+ protinfo := parseProtinfo(attrs)
+ base.Protinfo = &protinfo
}
case unix.IFLA_OPERSTATE:
base.OperState = LinkOperState(uint8(attr.Value[0]))
case unix.IFLA_LINK_NETNSID:
base.NetNsID = int(native.Uint32(attr.Value[0:4]))
+ case unix.IFLA_GSO_MAX_SIZE:
+ base.GSOMaxSize = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_GSO_MAX_SEGS:
+ base.GSOMaxSegs = native.Uint32(attr.Value[0:4])
+ case unix.IFLA_VFINFO_LIST:
+ data, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ vfs, err := parseVfInfoList(data)
+ if err != nil {
+ return nil, err
+ }
+ base.Vfs = vfs
+ case unix.IFLA_NUM_TX_QUEUES:
+ base.NumTxQueues = int(native.Uint32(attr.Value[0:4]))
+ case unix.IFLA_NUM_RX_QUEUES:
+ base.NumRxQueues = int(native.Uint32(attr.Value[0:4]))
+ case unix.IFLA_GROUP:
+ base.Group = native.Uint32(attr.Value[0:4])
}
}
if stats64 != nil {
- base.Statistics = parseLinkStats64(stats64)
+ base.Statistics = (*LinkStatistics)(stats64)
} else if stats32 != nil {
- base.Statistics = parseLinkStats32(stats32)
+ base.Statistics = (*LinkStatistics)(stats32.to64())
}
// Links that don't have IFLA_INFO_KIND are hardware devices
@@ -1428,10 +1732,59 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) {
link = &Device{}
}
*link.Attrs() = base
+ link.Attrs().Slave = linkSlave
+
+ // If the tuntap attributes are not updated by netlink due to
+ // an older driver, use sysfs
+ if link != nil && linkType == "tun" {
+ tuntap := link.(*Tuntap)
+
+ if tuntap.Mode == 0 {
+ ifname := tuntap.Attrs().Name
+ if flags, err := readSysPropAsInt64(ifname, "tun_flags"); err == nil {
+
+ if flags&unix.IFF_TUN != 0 {
+ tuntap.Mode = unix.IFF_TUN
+ } else if flags&unix.IFF_TAP != 0 {
+ tuntap.Mode = unix.IFF_TAP
+ }
+
+ tuntap.NonPersist = false
+ if flags&unix.IFF_PERSIST == 0 {
+ tuntap.NonPersist = true
+ }
+ }
+
+ // The sysfs interface for owner/group returns -1 for root user, instead of returning 0.
+ // So explicitly check for negative value, before assigning the owner uid/gid.
+ if owner, err := readSysPropAsInt64(ifname, "owner"); err == nil && owner > 0 {
+ tuntap.Owner = uint32(owner)
+ }
+
+ if group, err := readSysPropAsInt64(ifname, "group"); err == nil && group > 0 {
+ tuntap.Group = uint32(group)
+ }
+ }
+ }
return link, nil
}
+func readSysPropAsInt64(ifname, prop string) (int64, error) {
+ fname := fmt.Sprintf("/sys/class/net/%s/%s", ifname, prop)
+ contents, err := ioutil.ReadFile(fname)
+ if err != nil {
+ return 0, err
+ }
+
+ num, err := strconv.ParseInt(strings.TrimSpace(string(contents)), 0, 64)
+ if err == nil {
+ return num, nil
+ }
+
+ return 0, err
+}
+
// LinkList gets a list of link devices.
// Equivalent to: `ip link show`
func LinkList() ([]Link, error) {
@@ -1447,6 +1800,8 @@ func (h *Handle) LinkList() ([]Link, error) {
msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
req.AddData(msg)
+ attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF))
+ req.AddData(attr)
msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK)
if err != nil {
@@ -1526,13 +1881,19 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c
go func() {
defer close(ch)
for {
- msgs, err := s.Receive()
+ msgs, from, err := s.Receive()
if err != nil {
if cberr != nil {
cberr(err)
}
return
}
+ if from.Pid != nl.PidKernel {
+ if cberr != nil {
+ cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel))
+ }
+ continue
+ }
for _, m := range msgs {
if m.Header.Type == unix.NLMSG_DONE {
continue
@@ -1639,7 +2000,7 @@ func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
req.AddData(msg)
br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil)
- nl.NewRtAttrChild(br, attr, boolToByte(mode))
+ br.AddRtAttr(attr, boolToByte(mode))
req.AddData(br)
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
if err != nil {
@@ -1675,12 +2036,43 @@ func (h *Handle) LinkSetTxQLen(link Link, qlen int) error {
return err
}
+// LinkSetGroup sets the link group id which can be used to perform mass actions
+// with iproute2 as well use it as a reference in nft filters.
+// Equivalent to: `ip link set $link group $id`
+func LinkSetGroup(link Link, group int) error {
+ return pkgHandle.LinkSetGroup(link, group)
+}
+
+// LinkSetGroup sets the link group id which can be used to perform mass actions
+// with iproute2 as well use it as a reference in nft filters.
+// Equivalent to: `ip link set $link group $id`
+func (h *Handle) LinkSetGroup(link Link, group int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(group))
+
+ data := nl.NewRtAttr(unix.IFLA_GROUP, b)
+ req.AddData(data)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
vlan := link.(*Vlan)
for _, datum := range data {
switch datum.Attr.Type {
case nl.IFLA_VLAN_ID:
vlan.VlanId = int(native.Uint16(datum.Value[0:2]))
+ case nl.IFLA_VLAN_PROTOCOL:
+ vlan.VlanProtocol = VlanProtocol(int(ntohs(datum.Value[0:2])))
}
}
}
@@ -1762,7 +2154,7 @@ func parseBondData(link Link, data []syscall.NetlinkRouteAttr) {
case nl.IFLA_BOND_ARP_INTERVAL:
bond.ArpInterval = int(native.Uint32(data[i].Value[0:4]))
case nl.IFLA_BOND_ARP_IP_TARGET:
- // TODO: implement
+ bond.ArpIpTargets = parseBondArpIpTargets(data[i].Value)
case nl.IFLA_BOND_ARP_VALIDATE:
bond.ArpValidate = BondArpValidate(native.Uint32(data[i].Value[0:4]))
case nl.IFLA_BOND_ARP_ALL_TARGETS:
@@ -1805,12 +2197,75 @@ func parseBondData(link Link, data []syscall.NetlinkRouteAttr) {
}
}
+func parseBondArpIpTargets(value []byte) []net.IP {
+ data, err := nl.ParseRouteAttr(value)
+ if err != nil {
+ return nil
+ }
+
+ targets := []net.IP{}
+ for i := range data {
+ target := net.IP(data[i].Value)
+ if ip := target.To4(); ip != nil {
+ targets = append(targets, ip)
+ continue
+ }
+ if ip := target.To16(); ip != nil {
+ targets = append(targets, ip)
+ }
+ }
+
+ return targets
+}
+
+func addBondSlaveAttrs(bondSlave *BondSlave, linkInfo *nl.RtAttr) {
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_SLAVE_DATA, nil)
+
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_STATE, nl.Uint8Attr(uint8(bondSlave.State)))
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_MII_STATUS, nl.Uint8Attr(uint8(bondSlave.MiiStatus)))
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, nl.Uint32Attr(bondSlave.LinkFailureCount))
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_QUEUE_ID, nl.Uint16Attr(bondSlave.QueueId))
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, nl.Uint16Attr(bondSlave.AggregatorId))
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, nl.Uint8Attr(bondSlave.AdActorOperPortState))
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, nl.Uint16Attr(bondSlave.AdPartnerOperPortState))
+
+ if mac := bondSlave.PermHardwareAddr; mac != nil {
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_PERM_HWADDR, []byte(mac))
+ }
+}
+
+func parseBondSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) {
+ bondSlave := slave.(*BondSlave)
+ for i := range data {
+ switch data[i].Attr.Type {
+ case nl.IFLA_BOND_SLAVE_STATE:
+ bondSlave.State = BondSlaveState(data[i].Value[0])
+ case nl.IFLA_BOND_SLAVE_MII_STATUS:
+ bondSlave.MiiStatus = BondSlaveMiiStatus(data[i].Value[0])
+ case nl.IFLA_BOND_SLAVE_LINK_FAILURE_COUNT:
+ bondSlave.LinkFailureCount = native.Uint32(data[i].Value[0:4])
+ case nl.IFLA_BOND_SLAVE_PERM_HWADDR:
+ bondSlave.PermHardwareAddr = net.HardwareAddr(data[i].Value[0:6])
+ case nl.IFLA_BOND_SLAVE_QUEUE_ID:
+ bondSlave.QueueId = native.Uint16(data[i].Value[0:2])
+ case nl.IFLA_BOND_SLAVE_AD_AGGREGATOR_ID:
+ bondSlave.AggregatorId = native.Uint16(data[i].Value[0:2])
+ case nl.IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE:
+ bondSlave.AdActorOperPortState = uint8(data[i].Value[0])
+ case nl.IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE:
+ bondSlave.AdPartnerOperPortState = native.Uint16(data[i].Value[0:2])
+ }
+ }
+}
+
func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) {
ipv := link.(*IPVlan)
for _, datum := range data {
- if datum.Attr.Type == nl.IFLA_IPVLAN_MODE {
+ switch datum.Attr.Type {
+ case nl.IFLA_IPVLAN_MODE:
ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4]))
- return
+ case nl.IFLA_IPVLAN_FLAG:
+ ipv.Flag = IPVlanFlag(native.Uint32(datum.Value[0:4]))
}
}
}
@@ -1873,11 +2328,11 @@ func linkFlags(rawFlags uint32) net.Flags {
}
func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
if gretap.FlowBased {
// In flow based mode, no other attributes need to be configured
- nl.NewRtAttrChild(data, nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased))
+ data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased))
return
}
@@ -1885,40 +2340,40 @@ func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) {
if ip.To4() != nil {
ip = ip.To4()
}
- nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip))
+ data.AddRtAttr(nl.IFLA_GRE_LOCAL, []byte(ip))
}
if ip := gretap.Remote; ip != nil {
if ip.To4() != nil {
ip = ip.To4()
}
- nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip))
+ data.AddRtAttr(nl.IFLA_GRE_REMOTE, []byte(ip))
}
if gretap.IKey != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gretap.IKey))
+ data.AddRtAttr(nl.IFLA_GRE_IKEY, htonl(gretap.IKey))
gretap.IFlags |= uint16(nl.GRE_KEY)
}
if gretap.OKey != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gretap.OKey))
+ data.AddRtAttr(nl.IFLA_GRE_OKEY, htonl(gretap.OKey))
gretap.OFlags |= uint16(nl.GRE_KEY)
}
- nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gretap.IFlags))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gretap.OFlags))
+ data.AddRtAttr(nl.IFLA_GRE_IFLAGS, htons(gretap.IFlags))
+ data.AddRtAttr(nl.IFLA_GRE_OFLAGS, htons(gretap.OFlags))
if gretap.Link != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gretap.Link))
+ data.AddRtAttr(nl.IFLA_GRE_LINK, nl.Uint32Attr(gretap.Link))
}
- nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gretap.PMtuDisc))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gretap.Ttl))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gretap.Tos))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gretap.EncapType))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gretap.EncapFlags))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gretap.EncapSport))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gretap.EncapDport))
+ data.AddRtAttr(nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gretap.PMtuDisc))
+ data.AddRtAttr(nl.IFLA_GRE_TTL, nl.Uint8Attr(gretap.Ttl))
+ data.AddRtAttr(nl.IFLA_GRE_TOS, nl.Uint8Attr(gretap.Tos))
+ data.AddRtAttr(nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gretap.EncapType))
+ data.AddRtAttr(nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gretap.EncapFlags))
+ data.AddRtAttr(nl.IFLA_GRE_ENCAP_SPORT, htons(gretap.EncapSport))
+ data.AddRtAttr(nl.IFLA_GRE_ENCAP_DPORT, htons(gretap.EncapDport))
}
func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) {
@@ -1930,9 +2385,9 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) {
case nl.IFLA_GRE_IKEY:
gre.OKey = ntohl(datum.Value[0:4])
case nl.IFLA_GRE_LOCAL:
- gre.Local = net.IP(datum.Value[0:16])
+ gre.Local = net.IP(datum.Value)
case nl.IFLA_GRE_REMOTE:
- gre.Remote = net.IP(datum.Value[0:16])
+ gre.Remote = net.IP(datum.Value)
case nl.IFLA_GRE_ENCAP_SPORT:
gre.EncapSport = ntohs(datum.Value[0:2])
case nl.IFLA_GRE_ENCAP_DPORT:
@@ -1941,7 +2396,6 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) {
gre.IFlags = ntohs(datum.Value[0:2])
case nl.IFLA_GRE_OFLAGS:
gre.OFlags = ntohs(datum.Value[0:2])
-
case nl.IFLA_GRE_TTL:
gre.Ttl = uint8(datum.Value[0])
case nl.IFLA_GRE_TOS:
@@ -1953,73 +2407,70 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) {
case nl.IFLA_GRE_ENCAP_FLAGS:
gre.EncapFlags = native.Uint16(datum.Value[0:2])
case nl.IFLA_GRE_COLLECT_METADATA:
- if len(datum.Value) > 0 {
- gre.FlowBased = int8(datum.Value[0]) != 0
- }
+ gre.FlowBased = true
}
}
}
func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
if ip := gre.Local; ip != nil {
if ip.To4() != nil {
ip = ip.To4()
}
- nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip))
+ data.AddRtAttr(nl.IFLA_GRE_LOCAL, []byte(ip))
}
if ip := gre.Remote; ip != nil {
if ip.To4() != nil {
ip = ip.To4()
}
- nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip))
+ data.AddRtAttr(nl.IFLA_GRE_REMOTE, []byte(ip))
}
if gre.IKey != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gre.IKey))
+ data.AddRtAttr(nl.IFLA_GRE_IKEY, htonl(gre.IKey))
gre.IFlags |= uint16(nl.GRE_KEY)
}
if gre.OKey != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gre.OKey))
+ data.AddRtAttr(nl.IFLA_GRE_OKEY, htonl(gre.OKey))
gre.OFlags |= uint16(nl.GRE_KEY)
}
- nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gre.IFlags))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gre.OFlags))
+ data.AddRtAttr(nl.IFLA_GRE_IFLAGS, htons(gre.IFlags))
+ data.AddRtAttr(nl.IFLA_GRE_OFLAGS, htons(gre.OFlags))
if gre.Link != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link))
+ data.AddRtAttr(nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link))
}
- nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport))
- nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport))
+ data.AddRtAttr(nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc))
+ data.AddRtAttr(nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl))
+ data.AddRtAttr(nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos))
+ data.AddRtAttr(nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType))
+ data.AddRtAttr(nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags))
+ data.AddRtAttr(nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport))
+ data.AddRtAttr(nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport))
}
func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) {
gre := link.(*Gretun)
for _, datum := range data {
switch datum.Attr.Type {
- case nl.IFLA_GRE_OKEY:
- gre.IKey = ntohl(datum.Value[0:4])
case nl.IFLA_GRE_IKEY:
+ gre.IKey = ntohl(datum.Value[0:4])
+ case nl.IFLA_GRE_OKEY:
gre.OKey = ntohl(datum.Value[0:4])
case nl.IFLA_GRE_LOCAL:
- gre.Local = net.IP(datum.Value[0:16])
+ gre.Local = net.IP(datum.Value)
case nl.IFLA_GRE_REMOTE:
- gre.Remote = net.IP(datum.Value[0:16])
+ gre.Remote = net.IP(datum.Value)
case nl.IFLA_GRE_IFLAGS:
gre.IFlags = ntohs(datum.Value[0:2])
case nl.IFLA_GRE_OFLAGS:
gre.OFlags = ntohs(datum.Value[0:2])
-
case nl.IFLA_GRE_TTL:
gre.Ttl = uint8(datum.Value[0])
case nl.IFLA_GRE_TOS:
@@ -2038,23 +2489,15 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) {
}
}
-func parseLinkStats32(data []byte) *LinkStatistics {
- return (*LinkStatistics)((*LinkStatistics32)(unsafe.Pointer(&data[0:SizeofLinkStats32][0])).to64())
-}
-
-func parseLinkStats64(data []byte) *LinkStatistics {
- return (*LinkStatistics)((*LinkStatistics64)(unsafe.Pointer(&data[0:SizeofLinkStats64][0])))
-}
-
func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) {
attrs := nl.NewRtAttr(unix.IFLA_XDP|unix.NLA_F_NESTED, nil)
b := make([]byte, 4)
native.PutUint32(b, uint32(xdp.Fd))
- nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b)
+ attrs.AddRtAttr(nl.IFLA_XDP_FD, b)
if xdp.Flags != 0 {
b := make([]byte, 4)
native.PutUint32(b, xdp.Flags)
- nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b)
+ attrs.AddRtAttr(nl.IFLA_XDP_FLAGS, b)
}
req.AddData(attrs)
}
@@ -2083,32 +2526,32 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) {
func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) {
if iptun.FlowBased {
// In flow based mode, no other attributes need to be configured
- nl.NewRtAttrChild(linkInfo, nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased))
+ linkInfo.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased))
return
}
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
ip := iptun.Local.To4()
if ip != nil {
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip))
+ data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip))
}
ip = iptun.Remote.To4()
if ip != nil {
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip))
+ data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip))
}
if iptun.Link != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(iptun.Link))
+ data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(iptun.Link))
}
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport))
+ data.AddRtAttr(nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc))
+ data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl))
+ data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport))
}
func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) {
@@ -2139,34 +2582,83 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) {
}
}
+func addIp6tnlAttrs(ip6tnl *Ip6tnl, linkInfo *nl.RtAttr) {
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+
+ if ip6tnl.Link != 0 {
+ data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(ip6tnl.Link))
+ }
+
+ ip := ip6tnl.Local.To16()
+ if ip != nil {
+ data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip))
+ }
+
+ ip = ip6tnl.Remote.To16()
+ if ip != nil {
+ data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip))
+ }
+
+ data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(ip6tnl.Ttl))
+ data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(ip6tnl.Tos))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(ip6tnl.EncapLimit))
+ data.AddRtAttr(nl.IFLA_IPTUN_FLAGS, nl.Uint32Attr(ip6tnl.Flags))
+ data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(ip6tnl.Proto))
+ data.AddRtAttr(nl.IFLA_IPTUN_FLOWINFO, nl.Uint32Attr(ip6tnl.FlowInfo))
+}
+
+func parseIp6tnlData(link Link, data []syscall.NetlinkRouteAttr) {
+ ip6tnl := link.(*Ip6tnl)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_IPTUN_LOCAL:
+ ip6tnl.Local = net.IP(datum.Value[:16])
+ case nl.IFLA_IPTUN_REMOTE:
+ ip6tnl.Remote = net.IP(datum.Value[:16])
+ case nl.IFLA_IPTUN_TTL:
+ ip6tnl.Ttl = uint8(datum.Value[0])
+ case nl.IFLA_IPTUN_TOS:
+ ip6tnl.Tos = uint8(datum.Value[0])
+ case nl.IFLA_IPTUN_ENCAP_LIMIT:
+ ip6tnl.EncapLimit = uint8(datum.Value[0])
+ case nl.IFLA_IPTUN_FLAGS:
+ ip6tnl.Flags = native.Uint32(datum.Value[:4])
+ case nl.IFLA_IPTUN_PROTO:
+ ip6tnl.Proto = uint8(datum.Value[0])
+ case nl.IFLA_IPTUN_FLOWINFO:
+ ip6tnl.FlowInfo = native.Uint32(datum.Value[:4])
+ }
+ }
+}
+
func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
if sittun.Link != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link))
+ data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link))
}
ip := sittun.Local.To4()
if ip != nil {
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip))
+ data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip))
}
ip = sittun.Remote.To4()
if ip != nil {
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip))
+ data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip))
}
if sittun.Ttl > 0 {
// Would otherwise fail on 3.10 kernel
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl))
+ data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl))
}
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport))
- nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport))
+ data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos))
+ data.AddRtAttr(nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport))
+ data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport))
}
func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) {
@@ -2196,24 +2688,39 @@ func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) {
}
func addVtiAttrs(vti *Vti, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+
+ family := FAMILY_V4
+ if vti.Local.To4() == nil {
+ family = FAMILY_V6
+ }
- ip := vti.Local.To4()
+ var ip net.IP
+
+ if family == FAMILY_V4 {
+ ip = vti.Local.To4()
+ } else {
+ ip = vti.Local
+ }
if ip != nil {
- nl.NewRtAttrChild(data, nl.IFLA_VTI_LOCAL, []byte(ip))
+ data.AddRtAttr(nl.IFLA_VTI_LOCAL, []byte(ip))
}
- ip = vti.Remote.To4()
+ if family == FAMILY_V4 {
+ ip = vti.Remote.To4()
+ } else {
+ ip = vti.Remote
+ }
if ip != nil {
- nl.NewRtAttrChild(data, nl.IFLA_VTI_REMOTE, []byte(ip))
+ data.AddRtAttr(nl.IFLA_VTI_REMOTE, []byte(ip))
}
if vti.Link != 0 {
- nl.NewRtAttrChild(data, nl.IFLA_VTI_LINK, nl.Uint32Attr(vti.Link))
+ data.AddRtAttr(nl.IFLA_VTI_LINK, nl.Uint32Attr(vti.Link))
}
- nl.NewRtAttrChild(data, nl.IFLA_VTI_IKEY, htonl(vti.IKey))
- nl.NewRtAttrChild(data, nl.IFLA_VTI_OKEY, htonl(vti.OKey))
+ data.AddRtAttr(nl.IFLA_VTI_IKEY, htonl(vti.IKey))
+ data.AddRtAttr(nl.IFLA_VTI_OKEY, htonl(vti.OKey))
}
func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) {
@@ -2221,9 +2728,9 @@ func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) {
for _, datum := range data {
switch datum.Attr.Type {
case nl.IFLA_VTI_LOCAL:
- vti.Local = net.IP(datum.Value[0:4])
+ vti.Local = net.IP(datum.Value)
case nl.IFLA_VTI_REMOTE:
- vti.Remote = net.IP(datum.Value[0:4])
+ vti.Remote = net.IP(datum.Value)
case nl.IFLA_VTI_IKEY:
vti.IKey = ntohl(datum.Value[0:4])
case nl.IFLA_VTI_OKEY:
@@ -2233,10 +2740,10 @@ func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) {
}
func addVrfAttrs(vrf *Vrf, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
b := make([]byte, 4)
native.PutUint32(b, uint32(vrf.Table))
- nl.NewRtAttrChild(data, nl.IFLA_VRF_TABLE, b)
+ data.AddRtAttr(nl.IFLA_VRF_TABLE, b)
}
func parseVrfData(link Link, data []syscall.NetlinkRouteAttr) {
@@ -2250,12 +2757,15 @@ func parseVrfData(link Link, data []syscall.NetlinkRouteAttr) {
}
func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
if bridge.MulticastSnooping != nil {
- nl.NewRtAttrChild(data, nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping))
+ data.AddRtAttr(nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping))
}
if bridge.HelloTime != nil {
- nl.NewRtAttrChild(data, nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime))
+ data.AddRtAttr(nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime))
+ }
+ if bridge.VlanFiltering != nil {
+ data.AddRtAttr(nl.IFLA_BR_VLAN_FILTERING, boolToByte(*bridge.VlanFiltering))
}
}
@@ -2269,17 +2779,20 @@ func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) {
case nl.IFLA_BR_MCAST_SNOOPING:
mcastSnooping := datum.Value[0] == 1
br.MulticastSnooping = &mcastSnooping
+ case nl.IFLA_BR_VLAN_FILTERING:
+ vlanFiltering := datum.Value[0] == 1
+ br.VlanFiltering = &vlanFiltering
}
}
}
func addGTPAttrs(gtp *GTP, linkInfo *nl.RtAttr) {
- data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- nl.NewRtAttrChild(data, nl.IFLA_GTP_FD0, nl.Uint32Attr(uint32(gtp.FD0)))
- nl.NewRtAttrChild(data, nl.IFLA_GTP_FD1, nl.Uint32Attr(uint32(gtp.FD1)))
- nl.NewRtAttrChild(data, nl.IFLA_GTP_PDP_HASHSIZE, nl.Uint32Attr(131072))
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ data.AddRtAttr(nl.IFLA_GTP_FD0, nl.Uint32Attr(uint32(gtp.FD0)))
+ data.AddRtAttr(nl.IFLA_GTP_FD1, nl.Uint32Attr(uint32(gtp.FD1)))
+ data.AddRtAttr(nl.IFLA_GTP_PDP_HASHSIZE, nl.Uint32Attr(131072))
if gtp.Role != nl.GTP_ROLE_GGSN {
- nl.NewRtAttrChild(data, nl.IFLA_GTP_ROLE, nl.Uint32Attr(uint32(gtp.Role)))
+ data.AddRtAttr(nl.IFLA_GTP_ROLE, nl.Uint32Attr(uint32(gtp.Role)))
}
}
@@ -2299,6 +2812,70 @@ func parseGTPData(link Link, data []syscall.NetlinkRouteAttr) {
}
}
+func parseVfInfoList(data []syscall.NetlinkRouteAttr) ([]VfInfo, error) {
+ var vfs []VfInfo
+
+ for i, element := range data {
+ if element.Attr.Type != nl.IFLA_VF_INFO {
+ return nil, fmt.Errorf("Incorrect element type in vf info list: %d", element.Attr.Type)
+ }
+ vfAttrs, err := nl.ParseRouteAttr(element.Value)
+ if err != nil {
+ return nil, err
+ }
+ vfs = append(vfs, parseVfInfo(vfAttrs, i))
+ }
+ return vfs, nil
+}
+
+func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo {
+ vf := VfInfo{ID: id}
+ for _, element := range data {
+ switch element.Attr.Type {
+ case nl.IFLA_VF_MAC:
+ mac := nl.DeserializeVfMac(element.Value[:])
+ vf.Mac = mac.Mac[:6]
+ case nl.IFLA_VF_VLAN:
+ vl := nl.DeserializeVfVlan(element.Value[:])
+ vf.Vlan = int(vl.Vlan)
+ vf.Qos = int(vl.Qos)
+ case nl.IFLA_VF_TX_RATE:
+ txr := nl.DeserializeVfTxRate(element.Value[:])
+ vf.TxRate = int(txr.Rate)
+ case nl.IFLA_VF_SPOOFCHK:
+ sp := nl.DeserializeVfSpoofchk(element.Value[:])
+ vf.Spoofchk = sp.Setting != 0
+ case nl.IFLA_VF_LINK_STATE:
+ ls := nl.DeserializeVfLinkState(element.Value[:])
+ vf.LinkState = ls.LinkState
+ case nl.IFLA_VF_RATE:
+ vfr := nl.DeserializeVfRate(element.Value[:])
+ vf.MaxTxRate = vfr.MaxTxRate
+ vf.MinTxRate = vfr.MinTxRate
+ }
+ }
+ return vf
+}
+
+func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) {
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ data.AddRtAttr(nl.IFLA_XFRM_LINK, nl.Uint32Attr(uint32(xfrmi.ParentIndex)))
+ data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid))
+
+}
+
+func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) {
+ xfrmi := link.(*Xfrmi)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_XFRM_LINK:
+ xfrmi.ParentIndex = int(native.Uint32(datum.Value))
+ case nl.IFLA_XFRM_IF_ID:
+ xfrmi.Ifid = native.Uint32(datum.Value)
+ }
+ }
+}
+
// LinkSetBondSlave add slave to bond link via ioctl interface.
func LinkSetBondSlave(link Link, master *Bond) error {
fd, err := getSocketUDP()
@@ -2316,6 +2893,52 @@ func LinkSetBondSlave(link Link, master *Bond) error {
return nil
}
+// LinkSetBondSlaveQueueId modify bond slave queue-id.
+func (h *Handle) LinkSetBondSlaveQueueId(link Link, queueId uint16) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(unix.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil)
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_SLAVE_DATA, nil)
+ data.AddRtAttr(nl.IFLA_BOND_SLAVE_QUEUE_ID, nl.Uint16Attr(queueId))
+
+ req.AddData(linkInfo)
+ _, err := req.Execute(unix.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetBondSlaveQueueId modify bond slave queue-id.
+func LinkSetBondSlaveQueueId(link Link, queueId uint16) error {
+ return pkgHandle.LinkSetBondSlaveQueueId(link, queueId)
+}
+
+func vethStatsSerialize(stats ethtoolStats) ([]byte, error) {
+ statsSize := int(unsafe.Sizeof(stats)) + int(stats.nStats)*int(unsafe.Sizeof(uint64(0)))
+ b := make([]byte, 0, statsSize)
+ buf := bytes.NewBuffer(b)
+ err := binary.Write(buf, nl.NativeEndian(), stats)
+ return buf.Bytes()[:statsSize], err
+}
+
+type vethEthtoolStats struct {
+ Cmd uint32
+ NStats uint32
+ Peer uint64
+ // Newer kernels have XDP stats in here, but we only care
+ // to extract the peer ifindex here.
+}
+
+func vethStatsDeserialize(b []byte) (vethEthtoolStats, error) {
+ var stats = vethEthtoolStats{}
+ err := binary.Read(bytes.NewReader(b), nl.NativeEndian(), &stats)
+ return stats, err
+}
+
// VethPeerIndex get veth peer index.
func VethPeerIndex(link *Veth) (int, error) {
fd, err := getSocketUDP()
@@ -2330,25 +2953,66 @@ func VethPeerIndex(link *Veth) (int, error) {
return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno)
}
- gstrings := &ethtoolGstrings{
- cmd: ETHTOOL_GSTRINGS,
- stringSet: ETH_SS_STATS,
- length: sSet.data[0],
+ stats := ethtoolStats{
+ cmd: ETHTOOL_GSTATS,
+ nStats: sSet.data[0],
+ }
+
+ buffer, err := vethStatsSerialize(stats)
+ if err != nil {
+ return -1, err
}
- ifreq.Data = uintptr(unsafe.Pointer(gstrings))
+
+ ifreq.Data = uintptr(unsafe.Pointer(&buffer[0]))
_, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq)))
if errno != 0 {
return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno)
}
- stats := &ethtoolStats{
- cmd: ETHTOOL_GSTATS,
- nStats: gstrings.length,
+ vstats, err := vethStatsDeserialize(buffer)
+ if err != nil {
+ return -1, err
}
- ifreq.Data = uintptr(unsafe.Pointer(stats))
- _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq)))
- if errno != 0 {
- return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno)
+
+ return int(vstats.Peer), nil
+}
+
+func parseTuntapData(link Link, data []syscall.NetlinkRouteAttr) {
+ tuntap := link.(*Tuntap)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_TUN_OWNER:
+ tuntap.Owner = native.Uint32(datum.Value)
+ case nl.IFLA_TUN_GROUP:
+ tuntap.Group = native.Uint32(datum.Value)
+ case nl.IFLA_TUN_TYPE:
+ tuntap.Mode = TuntapMode(uint8(datum.Value[0]))
+ case nl.IFLA_TUN_PERSIST:
+ tuntap.NonPersist = false
+ if uint8(datum.Value[0]) == 0 {
+ tuntap.NonPersist = true
+ }
+ }
+ }
+}
+
+func parseIPoIBData(link Link, data []syscall.NetlinkRouteAttr) {
+ ipoib := link.(*IPoIB)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_IPOIB_PKEY:
+ ipoib.Pkey = uint16(native.Uint16(datum.Value))
+ case nl.IFLA_IPOIB_MODE:
+ ipoib.Mode = IPoIBMode(native.Uint16(datum.Value))
+ case nl.IFLA_IPOIB_UMCAST:
+ ipoib.Umcast = uint16(native.Uint16(datum.Value))
+ }
}
- return int(stats.data[0]), nil
+}
+
+func addIPoIBAttrs(ipoib *IPoIB, linkInfo *nl.RtAttr) {
+ data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil)
+ data.AddRtAttr(nl.IFLA_IPOIB_PKEY, nl.Uint16Attr(uint16(ipoib.Pkey)))
+ data.AddRtAttr(nl.IFLA_IPOIB_MODE, nl.Uint16Attr(uint16(ipoib.Mode)))
+ data.AddRtAttr(nl.IFLA_IPOIB_UMCAST, nl.Uint16Attr(uint16(ipoib.Umcast)))
}
diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go
index 3f5cd497a..379e5655f 100644
--- a/vendor/github.com/vishvananda/netlink/neigh.go
+++ b/vendor/github.com/vishvananda/netlink/neigh.go
@@ -17,9 +17,16 @@ type Neigh struct {
LLIPAddr net.IP //Used in the case of NHRP
Vlan int
VNI int
+ MasterIndex int
}
// String returns $ip/$hwaddr $label
func (neigh *Neigh) String() string {
return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr)
}
+
+// NeighUpdate is sent when a neighbor changes - type is RTM_NEWNEIGH or RTM_DELNEIGH.
+type NeighUpdate struct {
+ Type uint16
+ Neigh
+}
diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go
index f75c22649..cb3b55d35 100644
--- a/vendor/github.com/vishvananda/netlink/neigh_linux.go
+++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go
@@ -1,10 +1,13 @@
package netlink
import (
+ "fmt"
"net"
+ "syscall"
"unsafe"
"github.com/vishvananda/netlink/nl"
+ "github.com/vishvananda/netns"
"golang.org/x/sys/unix"
)
@@ -18,7 +21,10 @@ const (
NDA_PORT
NDA_VNI
NDA_IFINDEX
- NDA_MAX = NDA_IFINDEX
+ NDA_MASTER
+ NDA_LINK_NETNSID
+ NDA_SRC_VNI
+ NDA_MAX = NDA_SRC_VNI
)
// Neighbor Cache Entry States.
@@ -43,6 +49,7 @@ const (
NTF_ROUTER = 0x80
)
+// Ndmsg is for adding, removing or receiving information about a neighbor table entry
type Ndmsg struct {
Family uint8
Index uint32
@@ -170,45 +177,58 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
req.AddData(vniData)
}
+ if neigh.MasterIndex != 0 {
+ masterData := nl.NewRtAttr(NDA_MASTER, nl.Uint32Attr(uint32(neigh.MasterIndex)))
+ req.AddData(masterData)
+ }
+
_, err := req.Execute(unix.NETLINK_ROUTE, 0)
return err
}
-// NeighList gets a list of IP-MAC mappings in the system (ARP table).
+// NeighList returns a list of IP-MAC mappings in the system (ARP table).
// Equivalent to: `ip neighbor show`.
// The list can be filtered by link and ip family.
func NeighList(linkIndex, family int) ([]Neigh, error) {
return pkgHandle.NeighList(linkIndex, family)
}
-// NeighProxyList gets a list of neighbor proxies in the system.
+// NeighProxyList returns a list of neighbor proxies in the system.
// Equivalent to: `ip neighbor show proxy`.
// The list can be filtered by link and ip family.
func NeighProxyList(linkIndex, family int) ([]Neigh, error) {
return pkgHandle.NeighProxyList(linkIndex, family)
}
-// NeighList gets a list of IP-MAC mappings in the system (ARP table).
+// NeighList returns a list of IP-MAC mappings in the system (ARP table).
// Equivalent to: `ip neighbor show`.
// The list can be filtered by link and ip family.
func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) {
- return h.neighList(linkIndex, family, 0)
+ return h.NeighListExecute(Ndmsg{
+ Family: uint8(family),
+ Index: uint32(linkIndex),
+ })
}
-// NeighProxyList gets a list of neighbor proxies in the system.
+// NeighProxyList returns a list of neighbor proxies in the system.
// Equivalent to: `ip neighbor show proxy`.
// The list can be filtered by link, ip family.
func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) {
- return h.neighList(linkIndex, family, NTF_PROXY)
+ return h.NeighListExecute(Ndmsg{
+ Family: uint8(family),
+ Index: uint32(linkIndex),
+ Flags: NTF_PROXY,
+ })
+}
+
+// NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state.
+func NeighListExecute(msg Ndmsg) ([]Neigh, error) {
+ return pkgHandle.NeighListExecute(msg)
}
-func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) {
+// NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state.
+func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) {
req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP)
- msg := Ndmsg{
- Family: uint8(family),
- Index: uint32(linkIndex),
- Flags: uint8(flags),
- }
req.AddData(&msg)
msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH)
@@ -219,7 +239,7 @@ func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) {
var res []Neigh
for _, m := range msgs {
ndm := deserializeNdmsg(m)
- if linkIndex != 0 && int(ndm.Index) != linkIndex {
+ if msg.Index != 0 && ndm.Index != msg.Index {
// Ignore messages from other interfaces
continue
}
@@ -251,14 +271,6 @@ func NeighDeserialize(m []byte) (*Neigh, error) {
return nil, err
}
- // This should be cached for perfomance
- // once per table dump
- link, err := LinkByIndex(neigh.LinkIndex)
- if err != nil {
- return nil, err
- }
- encapType := link.Attrs().EncapType
-
for _, attr := range attrs {
switch attr.Attr.Type {
case NDA_DST:
@@ -268,13 +280,16 @@ func NeighDeserialize(m []byte) (*Neigh, error) {
// #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len))
// #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0))
attrLen := attr.Attr.Len - unix.SizeofRtAttr
- if attrLen == 4 && (encapType == "ipip" ||
- encapType == "sit" ||
- encapType == "gre") {
+ if attrLen == 4 {
neigh.LLIPAddr = net.IP(attr.Value)
- } else if attrLen == 16 &&
- encapType == "tunnel6" {
- neigh.IP = net.IP(attr.Value)
+ } else if attrLen == 16 {
+ // Can be IPv6 or FireWire HWAddr
+ link, err := LinkByIndex(neigh.LinkIndex)
+ if err == nil && link.Attrs().EncapType == "tunnel6" {
+ neigh.IP = net.IP(attr.Value)
+ } else {
+ neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+ }
} else {
neigh.HardwareAddr = net.HardwareAddr(attr.Value)
}
@@ -282,8 +297,126 @@ func NeighDeserialize(m []byte) (*Neigh, error) {
neigh.Vlan = int(native.Uint16(attr.Value[0:2]))
case NDA_VNI:
neigh.VNI = int(native.Uint32(attr.Value[0:4]))
+ case NDA_MASTER:
+ neigh.MasterIndex = int(native.Uint32(attr.Value[0:4]))
}
}
return &neigh, nil
}
+
+// NeighSubscribe takes a chan down which notifications will be sent
+// when neighbors are added or deleted. Close the 'done' chan to stop subscription.
+func NeighSubscribe(ch chan<- NeighUpdate, done <-chan struct{}) error {
+ return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false)
+}
+
+// NeighSubscribeAt works like NeighSubscribe plus it allows the caller
+// to choose the network namespace in which to subscribe (ns).
+func NeighSubscribeAt(ns netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}) error {
+ return neighSubscribeAt(ns, netns.None(), ch, done, nil, false)
+}
+
+// NeighSubscribeOptions contains a set of options to use with
+// NeighSubscribeWithOptions.
+type NeighSubscribeOptions struct {
+ Namespace *netns.NsHandle
+ ErrorCallback func(error)
+ ListExisting bool
+}
+
+// NeighSubscribeWithOptions work like NeighSubscribe but enable to
+// provide additional options to modify the behavior. Currently, the
+// namespace can be provided as well as an error callback.
+func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, options NeighSubscribeOptions) error {
+ if options.Namespace == nil {
+ none := netns.None()
+ options.Namespace = &none
+ }
+ return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting)
+}
+
+func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error {
+ s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_NEIGH)
+ makeRequest := func(family int) error {
+ req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH,
+ unix.NLM_F_DUMP)
+ infmsg := nl.NewIfInfomsg(family)
+ req.AddData(infmsg)
+ if err := s.Send(req); err != nil {
+ return err
+ }
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ if done != nil {
+ go func() {
+ <-done
+ s.Close()
+ }()
+ }
+ if listExisting {
+ if err := makeRequest(unix.AF_UNSPEC); err != nil {
+ return err
+ }
+ // We have to wait for NLMSG_DONE before making AF_BRIDGE request
+ }
+ go func() {
+ defer close(ch)
+ for {
+ msgs, from, err := s.Receive()
+ if err != nil {
+ if cberr != nil {
+ cberr(err)
+ }
+ return
+ }
+ if from.Pid != nl.PidKernel {
+ if cberr != nil {
+ cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel))
+ }
+ continue
+ }
+ for _, m := range msgs {
+ if m.Header.Type == unix.NLMSG_DONE {
+ if listExisting {
+ // This will be called after handling AF_UNSPEC
+ // list request, we have to wait for NLMSG_DONE
+ // before making another request
+ if err := makeRequest(unix.AF_BRIDGE); err != nil {
+ if cberr != nil {
+ cberr(err)
+ }
+ return
+ }
+ listExisting = false
+ }
+ continue
+ }
+ if m.Header.Type == unix.NLMSG_ERROR {
+ native := nl.NativeEndian()
+ error := int32(native.Uint32(m.Data[0:4]))
+ if error == 0 {
+ continue
+ }
+ if cberr != nil {
+ cberr(syscall.Errno(-error))
+ }
+ return
+ }
+ neigh, err := NeighDeserialize(m.Data)
+ if err != nil {
+ if cberr != nil {
+ cberr(err)
+ }
+ return
+ }
+ ch <- NeighUpdate{Type: m.Header.Type, Neigh: *neigh}
+ }
+ }
+ }()
+
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/netlink.go b/vendor/github.com/vishvananda/netlink/netlink.go
index fb159526e..9cb685dc8 100644
--- a/vendor/github.com/vishvananda/netlink/netlink.go
+++ b/vendor/github.com/vishvananda/netlink/netlink.go
@@ -27,7 +27,8 @@ func ParseIPNet(s string) (*net.IPNet, error) {
if err != nil {
return nil, err
}
- return &net.IPNet{IP: ip, Mask: ipNet.Mask}, nil
+ ipNet.IP = ip
+ return ipNet, nil
}
// NewIPNet generates an IPNet from an ip address using a netmask of 32 or 128.
diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
index 86111b92c..42d3acf91 100644
--- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
@@ -48,10 +48,18 @@ func LinkSetVfVlan(link Link, vf, vlan int) error {
return ErrNotImplemented
}
+func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error {
+ return ErrNotImplemented
+}
+
func LinkSetVfTxRate(link Link, vf, rate int) error {
return ErrNotImplemented
}
+func LinkSetVfRate(link Link, vf, minRate, maxRate int) error {
+ return ErrNotImplemented
+}
+
func LinkSetNoMaster(link Link) error {
return ErrNotImplemented
}
@@ -152,6 +160,10 @@ func AddrAdd(link Link, addr *Addr) error {
return ErrNotImplemented
}
+func AddrReplace(link Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
func AddrDel(link Link, addr *Addr) error {
return ErrNotImplemented
}
diff --git a/vendor/github.com/vishvananda/netlink/netns_linux.go b/vendor/github.com/vishvananda/netlink/netns_linux.go
new file mode 100644
index 000000000..77cf6f469
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/netns_linux.go
@@ -0,0 +1,141 @@
+package netlink
+
+// Network namespace ID functions
+//
+// The kernel has a weird concept called the network namespace ID.
+// This is different from the file reference in proc (and any bind-mounted
+// namespaces, etc.)
+//
+// Instead, namespaces can be assigned a numeric ID at any time. Once set,
+// the ID is fixed. The ID can either be set manually by the user, or
+// automatically, triggered by certain kernel actions. The most common kernel
+// action that triggers namespace ID creation is moving one end of a veth pair
+// in to that namespace.
+
+import (
+ "fmt"
+
+ "github.com/vishvananda/netlink/nl"
+ "golang.org/x/sys/unix"
+)
+
+// These can be replaced by the values from sys/unix when it is next released.
+const (
+ _ = iota
+ NETNSA_NSID
+ NETNSA_PID
+ NETNSA_FD
+)
+
+// GetNetNsIdByPid looks up the network namespace ID for a given pid (really thread id).
+// Returns -1 if the namespace does not have an ID set.
+func (h *Handle) GetNetNsIdByPid(pid int) (int, error) {
+ return h.getNetNsId(NETNSA_PID, uint32(pid))
+}
+
+// GetNetNsIdByPid looks up the network namespace ID for a given pid (really thread id).
+// Returns -1 if the namespace does not have an ID set.
+func GetNetNsIdByPid(pid int) (int, error) {
+ return pkgHandle.GetNetNsIdByPid(pid)
+}
+
+// SetNetNSIdByPid sets the ID of the network namespace for a given pid (really thread id).
+// The ID can only be set for namespaces without an ID already set.
+func (h *Handle) SetNetNsIdByPid(pid, nsid int) error {
+ return h.setNetNsId(NETNSA_PID, uint32(pid), uint32(nsid))
+}
+
+// SetNetNSIdByPid sets the ID of the network namespace for a given pid (really thread id).
+// The ID can only be set for namespaces without an ID already set.
+func SetNetNsIdByPid(pid, nsid int) error {
+ return pkgHandle.SetNetNsIdByPid(pid, nsid)
+}
+
+// GetNetNsIdByFd looks up the network namespace ID for a given fd.
+// fd must be an open file descriptor to a namespace file.
+// Returns -1 if the namespace does not have an ID set.
+func (h *Handle) GetNetNsIdByFd(fd int) (int, error) {
+ return h.getNetNsId(NETNSA_FD, uint32(fd))
+}
+
+// GetNetNsIdByFd looks up the network namespace ID for a given fd.
+// fd must be an open file descriptor to a namespace file.
+// Returns -1 if the namespace does not have an ID set.
+func GetNetNsIdByFd(fd int) (int, error) {
+ return pkgHandle.GetNetNsIdByFd(fd)
+}
+
+// SetNetNSIdByFd sets the ID of the network namespace for a given fd.
+// fd must be an open file descriptor to a namespace file.
+// The ID can only be set for namespaces without an ID already set.
+func (h *Handle) SetNetNsIdByFd(fd, nsid int) error {
+ return h.setNetNsId(NETNSA_FD, uint32(fd), uint32(nsid))
+}
+
+// SetNetNSIdByFd sets the ID of the network namespace for a given fd.
+// fd must be an open file descriptor to a namespace file.
+// The ID can only be set for namespaces without an ID already set.
+func SetNetNsIdByFd(fd, nsid int) error {
+ return pkgHandle.SetNetNsIdByFd(fd, nsid)
+}
+
+// getNetNsId requests the netnsid for a given type-val pair
+// type should be either NETNSA_PID or NETNSA_FD
+func (h *Handle) getNetNsId(attrType int, val uint32) (int, error) {
+ req := h.newNetlinkRequest(unix.RTM_GETNSID, unix.NLM_F_REQUEST)
+
+ rtgen := nl.NewRtGenMsg()
+ req.AddData(rtgen)
+
+ b := make([]byte, 4, 4)
+ native.PutUint32(b, val)
+ attr := nl.NewRtAttr(attrType, b)
+ req.AddData(attr)
+
+ msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNSID)
+
+ if err != nil {
+ return 0, err
+ }
+
+ for _, m := range msgs {
+ msg := nl.DeserializeRtGenMsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return 0, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case NETNSA_NSID:
+ return int(int32(native.Uint32(attr.Value))), nil
+ }
+ }
+ }
+
+ return 0, fmt.Errorf("unexpected empty result")
+}
+
+// setNetNsId sets the netnsid for a given type-val pair
+// type should be either NETNSA_PID or NETNSA_FD
+// The ID can only be set for namespaces without an ID already set
+func (h *Handle) setNetNsId(attrType int, val uint32, newnsid uint32) error {
+ req := h.newNetlinkRequest(unix.RTM_NEWNSID, unix.NLM_F_REQUEST|unix.NLM_F_ACK)
+
+ rtgen := nl.NewRtGenMsg()
+ req.AddData(rtgen)
+
+ b := make([]byte, 4, 4)
+ native.PutUint32(b, val)
+ attr := nl.NewRtAttr(attrType, b)
+ req.AddData(attr)
+
+ b1 := make([]byte, 4, 4)
+ native.PutUint32(b1, newnsid)
+ attr1 := nl.NewRtAttr(NETNSA_NSID, b1)
+ req.AddData(attr1)
+
+ _, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNSID)
+ return err
+}
diff --git a/vendor/github.com/vishvananda/netlink/netns_unspecified.go b/vendor/github.com/vishvananda/netlink/netns_unspecified.go
new file mode 100644
index 000000000..5c5899e36
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/netns_unspecified.go
@@ -0,0 +1,19 @@
+// +build !linux
+
+package netlink
+
+func GetNetNsIdByPid(pid int) (int, error) {
+ return 0, ErrNotImplemented
+}
+
+func SetNetNsIdByPid(pid, nsid int) error {
+ return ErrNotImplemented
+}
+
+func GetNetNsIdByFd(fd int) (int, error) {
+ return 0, ErrNotImplemented
+}
+
+func SetNetNsIdByFd(fd, nsid int) error {
+ return ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
index 6c0d33338..34e78ba8d 100644
--- a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
@@ -11,8 +11,8 @@ const (
/* Bridge Flags */
const (
- BRIDGE_FLAGS_MASTER = iota /* Bridge command to/from master */
- BRIDGE_FLAGS_SELF /* Bridge command to/from lowerdev */
+ BRIDGE_FLAGS_MASTER = iota + 1 /* Bridge command to/from master */
+ BRIDGE_FLAGS_SELF /* Bridge command to/from lowerdev */
)
/* Bridge management nested attributes
diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
index 380cc5967..79d2b6b89 100644
--- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
@@ -76,12 +76,17 @@ const (
// __CTA_MAX
// };
const (
- CTA_TUPLE_ORIG = 1
- CTA_TUPLE_REPLY = 2
- CTA_STATUS = 3
- CTA_TIMEOUT = 7
- CTA_MARK = 8
- CTA_PROTOINFO = 4
+ CTA_TUPLE_ORIG = 1
+ CTA_TUPLE_REPLY = 2
+ CTA_STATUS = 3
+ CTA_PROTOINFO = 4
+ CTA_TIMEOUT = 7
+ CTA_MARK = 8
+ CTA_COUNTERS_ORIG = 9
+ CTA_COUNTERS_REPLY = 10
+ CTA_USE = 11
+ CTA_ID = 12
+ CTA_TIMESTAMP = 20
)
// enum ctattr_tuple {
@@ -163,6 +168,29 @@ const (
CTA_PROTOINFO_TCP_FLAGS_REPLY = 5
)
+// enum ctattr_counters {
+// CTA_COUNTERS_UNSPEC,
+// CTA_COUNTERS_PACKETS, /* 64bit counters */
+// CTA_COUNTERS_BYTES, /* 64bit counters */
+// CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */
+// CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */
+// CTA_COUNTERS_PAD,
+// __CTA_COUNTERS_M
+// };
+// #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
+const (
+ CTA_COUNTERS_PACKETS = 1
+ CTA_COUNTERS_BYTES = 2
+)
+
+// enum CTA TIMESTAMP TLVs
+// CTA_TIMESTAMP_START /* 64bit value */
+// CTA_TIMESTAMP_STOP /* 64bit value */
+const (
+ CTA_TIMESTAMP_START = 1
+ CTA_TIMESTAMP_STOP = 2
+)
+
// /* General form of address family dependent message.
// */
// struct nfgenmsg {
diff --git a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go
new file mode 100644
index 000000000..db66faaad
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go
@@ -0,0 +1,40 @@
+package nl
+
+// All the following constants are coming from:
+// https://github.com/torvalds/linux/blob/master/include/uapi/linux/devlink.h
+
+const (
+ GENL_DEVLINK_VERSION = 1
+ GENL_DEVLINK_NAME = "devlink"
+)
+
+const (
+ DEVLINK_CMD_GET = 1
+ DEVLINK_CMD_ESWITCH_GET = 29
+ DEVLINK_CMD_ESWITCH_SET = 30
+)
+
+const (
+ DEVLINK_ATTR_BUS_NAME = 1
+ DEVLINK_ATTR_DEV_NAME = 2
+ DEVLINK_ATTR_ESWITCH_MODE = 25
+ DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26
+ DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62
+)
+
+const (
+ DEVLINK_ESWITCH_MODE_LEGACY = 0
+ DEVLINK_ESWITCH_MODE_SWITCHDEV = 1
+)
+
+const (
+ DEVLINK_ESWITCH_INLINE_MODE_NONE = 0
+ DEVLINK_ESWITCH_INLINE_MODE_LINK = 1
+ DEVLINK_ESWITCH_INLINE_MODE_NETWORK = 2
+ DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT = 3
+)
+
+const (
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0
+ DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
index 84a3498dd..afb16a9c1 100644
--- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
@@ -13,7 +13,9 @@ const (
IFLA_INFO_KIND
IFLA_INFO_DATA
IFLA_INFO_XSTATS
- IFLA_INFO_MAX = IFLA_INFO_XSTATS
+ IFLA_INFO_SLAVE_KIND
+ IFLA_INFO_SLAVE_DATA
+ IFLA_INFO_MAX = IFLA_INFO_SLAVE_DATA
)
const (
@@ -87,7 +89,8 @@ const (
const (
IFLA_IPVLAN_UNSPEC = iota
IFLA_IPVLAN_MODE
- IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE
+ IFLA_IPVLAN_FLAG
+ IFLA_IPVLAN_MAX = IFLA_IPVLAN_FLAG
)
const (
@@ -164,6 +167,8 @@ const (
IFLA_BOND_SLAVE_PERM_HWADDR
IFLA_BOND_SLAVE_QUEUE_ID
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID
+ IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE
+ IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE
)
const (
@@ -217,9 +222,11 @@ const (
IFLA_VF_RSS_QUERY_EN /* RSS Redirection Table and Hash Key query
* on/off switch
*/
- IFLA_VF_STATS /* network device statistics */
- IFLA_VF_TRUST /* Trust state of VF */
- IFLA_VF_MAX = IFLA_VF_TRUST
+ IFLA_VF_STATS /* network device statistics */
+ IFLA_VF_TRUST /* Trust state of VF */
+ IFLA_VF_IB_NODE_GUID /* VF Infiniband node GUID */
+ IFLA_VF_IB_PORT_GUID /* VF Infiniband port GUID */
+ IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID
)
const (
@@ -248,6 +255,7 @@ const (
SizeofVfLinkState = 0x08
SizeofVfRssQueryEn = 0x08
SizeofVfTrust = 0x08
+ SizeofVfGUID = 0x10
)
// struct ifla_vf_mac {
@@ -430,6 +438,30 @@ func (msg *VfTrust) Serialize() []byte {
return (*(*[SizeofVfTrust]byte)(unsafe.Pointer(msg)))[:]
}
+// struct ifla_vf_guid {
+// __u32 vf;
+// __u32 rsvd;
+// __u64 guid;
+// };
+
+type VfGUID struct {
+ Vf uint32
+ Rsvd uint32
+ GUID uint64
+}
+
+func (msg *VfGUID) Len() int {
+ return SizeofVfGUID
+}
+
+func DeserializeVfGUID(b []byte) *VfGUID {
+ return (*VfGUID)(unsafe.Pointer(&b[0:SizeofVfGUID][0]))
+}
+
+func (msg *VfGUID) Serialize() []byte {
+ return (*(*[SizeofVfGUID]byte)(unsafe.Pointer(msg)))[:]
+}
+
const (
XDP_FLAGS_UPDATE_IF_NOEXIST = 1 << iota
XDP_FLAGS_SKB_MODE
@@ -546,3 +578,33 @@ const (
GTP_ROLE_GGSN = iota
GTP_ROLE_SGSN
)
+
+const (
+ IFLA_XFRM_UNSPEC = iota
+ IFLA_XFRM_LINK
+ IFLA_XFRM_IF_ID
+
+ IFLA_XFRM_MAX = iota - 1
+)
+
+const (
+ IFLA_TUN_UNSPEC = iota
+ IFLA_TUN_OWNER
+ IFLA_TUN_GROUP
+ IFLA_TUN_TYPE
+ IFLA_TUN_PI
+ IFLA_TUN_VNET_HDR
+ IFLA_TUN_PERSIST
+ IFLA_TUN_MULTI_QUEUE
+ IFLA_TUN_NUM_QUEUES
+ IFLA_TUN_NUM_DISABLED_QUEUES
+ IFLA_TUN_MAX = IFLA_TUN_NUM_DISABLED_QUEUES
+)
+
+const (
+ IFLA_IPOIB_UNSPEC = iota
+ IFLA_IPOIB_PKEY
+ IFLA_IPOIB_MODE
+ IFLA_IPOIB_UMCAST
+ IFLA_IPOIB_MAX = IFLA_IPOIB_UMCAST
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
index bc8e82c2c..aaf56c671 100644
--- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -21,7 +21,13 @@ const (
FAMILY_ALL = unix.AF_UNSPEC
FAMILY_V4 = unix.AF_INET
FAMILY_V6 = unix.AF_INET6
- FAMILY_MPLS = AF_MPLS
+ FAMILY_MPLS = unix.AF_MPLS
+ // Arbitrary set value (greater than default 4k) to allow receiving
+ // from kernel more verbose messages e.g. for statistics,
+ // tc rules or filters, or other more memory requiring data.
+ RECEIVE_BUFFER_SIZE = 65536
+ // Kernel netlink pid
+ PidKernel uint32 = 0
)
// SupportedNlFamilies contains the list of netlink families this netlink package supports
@@ -42,7 +48,7 @@ func GetIPFamily(ip net.IP) int {
var nativeEndian binary.ByteOrder
-// Get native endianness for the system
+// NativeEndian gets native endianness for the system
func NativeEndian() binary.ByteOrder {
if nativeEndian == nil {
var x uint32 = 0x01020304
@@ -271,15 +277,22 @@ func NewRtAttr(attrType int, data []byte) *RtAttr {
}
}
-// Create a new RtAttr obj anc add it as a child of an existing object
+// NewRtAttrChild adds an RtAttr as a child to the parent and returns the new attribute
+//
+// Deprecated: Use AddRtAttr() on the parent object
func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr {
+ return parent.AddRtAttr(attrType, data)
+}
+
+// AddRtAttr adds an RtAttr as a child and returns the new attribute
+func (a *RtAttr) AddRtAttr(attrType int, data []byte) *RtAttr {
attr := NewRtAttr(attrType, data)
- parent.children = append(parent.children, attr)
+ a.children = append(a.children, attr)
return attr
}
-// AddChild adds an existing RtAttr as a child.
-func (a *RtAttr) AddChild(attr *RtAttr) {
+// AddChild adds an existing NetlinkRequestData as a child.
+func (a *RtAttr) AddChild(attr NetlinkRequestData) {
a.children = append(a.children, attr)
}
@@ -360,16 +373,12 @@ func (req *NetlinkRequest) Serialize() []byte {
}
func (req *NetlinkRequest) AddData(data NetlinkRequestData) {
- if data != nil {
- req.Data = append(req.Data, data)
- }
+ req.Data = append(req.Data, data)
}
// AddRawData adds raw bytes to the end of the NetlinkRequest object during serialization
func (req *NetlinkRequest) AddRawData(data []byte) {
- if data != nil {
- req.RawData = append(req.RawData, data...)
- }
+ req.RawData = append(req.RawData, data...)
}
// Execute the request against a the given sockType.
@@ -413,10 +422,13 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro
done:
for {
- msgs, err := s.Receive()
+ msgs, from, err := s.Receive()
if err != nil {
return nil, err
}
+ if from.Pid != PidKernel {
+ return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel)
+ }
for _, m := range msgs {
if m.Header.Seq != req.Seq {
if sharedSocket {
@@ -425,7 +437,7 @@ done:
return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq)
}
if m.Header.Pid != pid {
- return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
+ continue
}
if m.Header.Type == unix.NLMSG_DONE {
break done
@@ -610,21 +622,31 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
return nil
}
-func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) {
+func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) {
fd := int(atomic.LoadInt32(&s.fd))
if fd < 0 {
- return nil, fmt.Errorf("Receive called on a closed socket")
+ return nil, nil, fmt.Errorf("Receive called on a closed socket")
}
- rb := make([]byte, unix.Getpagesize())
- nr, _, err := unix.Recvfrom(fd, rb, 0)
+ var fromAddr *unix.SockaddrNetlink
+ var rb [RECEIVE_BUFFER_SIZE]byte
+ nr, from, err := unix.Recvfrom(fd, rb[:], 0)
if err != nil {
- return nil, err
+ return nil, nil, err
+ }
+ fromAddr, ok := from.(*unix.SockaddrNetlink)
+ if !ok {
+ return nil, nil, fmt.Errorf("Error converting to netlink sockaddr")
}
if nr < unix.NLMSG_HDRLEN {
- return nil, fmt.Errorf("Got short response from netlink")
+ return nil, nil, fmt.Errorf("Got short response from netlink")
+ }
+ rb2 := make([]byte, nr)
+ copy(rb2, rb[:nr])
+ nl, err := syscall.ParseNetlinkMessage(rb2)
+ if err != nil {
+ return nil, nil, err
}
- rb = rb[:nr]
- return syscall.ParseNetlinkMessage(rb)
+ return nl, fromAddr, nil
}
// SetSendTimeout allows to set a send timeout on the socket
diff --git a/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go
new file mode 100644
index 000000000..1224b747d
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go
@@ -0,0 +1,35 @@
+package nl
+
+const (
+ RDMA_NL_GET_CLIENT_SHIFT = 10
+)
+
+const (
+ RDMA_NL_NLDEV = 5
+)
+
+const (
+ RDMA_NLDEV_CMD_GET = 1
+ RDMA_NLDEV_CMD_SET = 2
+ RDMA_NLDEV_CMD_SYS_GET = 6
+ RDMA_NLDEV_CMD_SYS_SET = 7
+)
+
+const (
+ RDMA_NLDEV_ATTR_DEV_INDEX = 1
+ RDMA_NLDEV_ATTR_DEV_NAME = 2
+ RDMA_NLDEV_ATTR_PORT_INDEX = 3
+ RDMA_NLDEV_ATTR_CAP_FLAGS = 4
+ RDMA_NLDEV_ATTR_FW_VERSION = 5
+ RDMA_NLDEV_ATTR_NODE_GUID = 6
+ RDMA_NLDEV_ATTR_SYS_IMAGE_GUID = 7
+ RDMA_NLDEV_ATTR_SUBNET_PREFIX = 8
+ RDMA_NLDEV_ATTR_LID = 9
+ RDMA_NLDEV_ATTR_SM_LID = 10
+ RDMA_NLDEV_ATTR_LMC = 11
+ RDMA_NLDEV_ATTR_PORT_STATE = 12
+ RDMA_NLDEV_ATTR_PORT_PHYS_STATE = 13
+ RDMA_NLDEV_ATTR_DEV_NODE_TYPE = 14
+ RDMA_NLDEV_SYS_ATTR_NETNS_MODE = 66
+ RDMA_NLDEV_NET_NS_FD = 68
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go
index f6906fcaf..03c1900ff 100644
--- a/vendor/github.com/vishvananda/netlink/nl/route_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go
@@ -79,3 +79,29 @@ func (msg *RtNexthop) Serialize() []byte {
}
return buf
}
+
+type RtGenMsg struct {
+ unix.RtGenmsg
+}
+
+func NewRtGenMsg() *RtGenMsg {
+ return &RtGenMsg{
+ RtGenmsg: unix.RtGenmsg{
+ Family: unix.AF_UNSPEC,
+ },
+ }
+}
+
+func (msg *RtGenMsg) Len() int {
+ return rtaAlignOf(unix.SizeofRtGenmsg)
+}
+
+func DeserializeRtGenMsg(b []byte) *RtGenMsg {
+ return &RtGenMsg{RtGenmsg: unix.RtGenmsg{Family: b[0]}}
+}
+
+func (msg *RtGenMsg) Serialize() []byte {
+ out := make([]byte, msg.Len())
+ out[0] = msg.Family
+ return out
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go
index b3425f6b0..5774cbb15 100644
--- a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go
@@ -99,6 +99,49 @@ func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) {
return mode, srh.Segments, nil
}
+func DecodeSEG6Srh(buf []byte) ([]net.IP, error) {
+ native := NativeEndian()
+ srh := IPv6SrHdr{
+ nextHdr: buf[0],
+ hdrLen: buf[1],
+ routingType: buf[2],
+ segmentsLeft: buf[3],
+ firstSegment: buf[4],
+ flags: buf[5],
+ reserved: native.Uint16(buf[6:8]),
+ }
+ buf = buf[8:]
+ if len(buf)%16 != 0 {
+ err := fmt.Errorf("DecodeSEG6Srh: error parsing Segment List (buf len: %d)", len(buf))
+ return nil, err
+ }
+ for len(buf) > 0 {
+ srh.Segments = append(srh.Segments, net.IP(buf[:16]))
+ buf = buf[16:]
+ }
+ return srh.Segments, nil
+}
+func EncodeSEG6Srh(segments []net.IP) ([]byte, error) {
+ nsegs := len(segments) // nsegs: number of segments
+ if nsegs == 0 {
+ return nil, errors.New("EncodeSEG6Srh: No Segments")
+ }
+ b := make([]byte, 8, 8+len(segments)*16)
+ native := NativeEndian()
+ b[0] = 0 // srh.nextHdr (0 when calling netlink)
+ b[1] = uint8(16 * nsegs >> 3) // srh.hdrLen (in 8-octets unit)
+ b[2] = IPV6_SRCRT_TYPE_4 // srh.routingType (assigned by IANA)
+ b[3] = uint8(nsegs - 1) // srh.segmentsLeft
+ b[4] = uint8(nsegs - 1) // srh.firstSegment
+ b[5] = 0 // srh.flags (SR6_FLAG1_HMAC for srh_hmac)
+ // srh.reserved: Defined as "Tag" in draft-ietf-6man-segment-routing-header-07
+ native.PutUint16(b[6:], 0) // srh.reserved
+ for _, netIP := range segments {
+ b = append(b, netIP...) // srh.Segments
+ }
+ return b, nil
+}
+
// Helper functions
func SEG6EncapModeString(mode int) string {
switch mode {
diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go
new file mode 100644
index 000000000..150017726
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go
@@ -0,0 +1,76 @@
+package nl
+
+import ()
+
+// seg6local parameters
+const (
+ SEG6_LOCAL_UNSPEC = iota
+ SEG6_LOCAL_ACTION
+ SEG6_LOCAL_SRH
+ SEG6_LOCAL_TABLE
+ SEG6_LOCAL_NH4
+ SEG6_LOCAL_NH6
+ SEG6_LOCAL_IIF
+ SEG6_LOCAL_OIF
+ __SEG6_LOCAL_MAX
+)
+const (
+ SEG6_LOCAL_MAX = __SEG6_LOCAL_MAX
+)
+
+// seg6local actions
+const (
+ SEG6_LOCAL_ACTION_END = iota + 1 // 1
+ SEG6_LOCAL_ACTION_END_X // 2
+ SEG6_LOCAL_ACTION_END_T // 3
+ SEG6_LOCAL_ACTION_END_DX2 // 4
+ SEG6_LOCAL_ACTION_END_DX6 // 5
+ SEG6_LOCAL_ACTION_END_DX4 // 6
+ SEG6_LOCAL_ACTION_END_DT6 // 7
+ SEG6_LOCAL_ACTION_END_DT4 // 8
+ SEG6_LOCAL_ACTION_END_B6 // 9
+ SEG6_LOCAL_ACTION_END_B6_ENCAPS // 10
+ SEG6_LOCAL_ACTION_END_BM // 11
+ SEG6_LOCAL_ACTION_END_S // 12
+ SEG6_LOCAL_ACTION_END_AS // 13
+ SEG6_LOCAL_ACTION_END_AM // 14
+ __SEG6_LOCAL_ACTION_MAX
+)
+const (
+ SEG6_LOCAL_ACTION_MAX = __SEG6_LOCAL_ACTION_MAX - 1
+)
+
+// Helper functions
+func SEG6LocalActionString(action int) string {
+ switch action {
+ case SEG6_LOCAL_ACTION_END:
+ return "End"
+ case SEG6_LOCAL_ACTION_END_X:
+ return "End.X"
+ case SEG6_LOCAL_ACTION_END_T:
+ return "End.T"
+ case SEG6_LOCAL_ACTION_END_DX2:
+ return "End.DX2"
+ case SEG6_LOCAL_ACTION_END_DX6:
+ return "End.DX6"
+ case SEG6_LOCAL_ACTION_END_DX4:
+ return "End.DX4"
+ case SEG6_LOCAL_ACTION_END_DT6:
+ return "End.DT6"
+ case SEG6_LOCAL_ACTION_END_DT4:
+ return "End.DT4"
+ case SEG6_LOCAL_ACTION_END_B6:
+ return "End.B6"
+ case SEG6_LOCAL_ACTION_END_B6_ENCAPS:
+ return "End.B6.Encaps"
+ case SEG6_LOCAL_ACTION_END_BM:
+ return "End.BM"
+ case SEG6_LOCAL_ACTION_END_S:
+ return "End.S"
+ case SEG6_LOCAL_ACTION_END_AS:
+ return "End.AS"
+ case SEG6_LOCAL_ACTION_END_AM:
+ return "End.AM"
+ }
+ return "unknown"
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go
index fc631e0e5..f7f7f92e6 100644
--- a/vendor/github.com/vishvananda/netlink/nl/syscall.go
+++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go
@@ -42,16 +42,6 @@ const (
TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/
)
-const (
- AF_MPLS = 28
-)
-
-const (
- RTA_NEWDST = 0x13
- RTA_ENCAP_TYPE = 0x15
- RTA_ENCAP = 0x16
-)
-
// RTA_ENCAP subtype
const (
MPLS_IPTUNNEL_UNSPEC = iota
@@ -67,6 +57,7 @@ const (
LWTUNNEL_ENCAP_IP6
LWTUNNEL_ENCAP_SEG6
LWTUNNEL_ENCAP_BPF
+ LWTUNNEL_ENCAP_SEG6_LOCAL
)
// routing header types
diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
index 94ebc290a..501f554b2 100644
--- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go
@@ -1,6 +1,7 @@
package nl
import (
+ "encoding/binary"
"unsafe"
)
@@ -65,6 +66,15 @@ const (
)
const (
+ TCA_STATS_UNSPEC = iota
+ TCA_STATS_BASIC
+ TCA_STATS_RATE_EST
+ TCA_STATS_QUEUE
+ TCA_STATS_APP
+ TCA_STATS_MAX = TCA_STATS_APP
+)
+
+const (
SizeofTcMsg = 0x14
SizeofTcActionMsg = 0x04
SizeofTcPrioMap = 0x14
@@ -79,7 +89,10 @@ const (
SizeofTcU32Key = 0x10
SizeofTcU32Sel = 0x10 // without keys
SizeofTcGen = 0x14
+ SizeofTcConnmark = SizeofTcGen + 0x04
SizeofTcMirred = SizeofTcGen + 0x08
+ SizeofTcTunnelKey = SizeofTcGen + 0x04
+ SizeofTcSkbEdit = SizeofTcGen
SizeofTcPolice = 2*SizeofTcRateSpec + 0x20
)
@@ -412,6 +425,57 @@ func (x *TcHtbGlob) Serialize() []byte {
return (*(*[SizeofTcHtbGlob]byte)(unsafe.Pointer(x)))[:]
}
+// HFSC
+
+type Curve struct {
+ m1 uint32
+ d uint32
+ m2 uint32
+}
+
+type HfscCopt struct {
+ Rsc Curve
+ Fsc Curve
+ Usc Curve
+}
+
+func (c *Curve) Attrs() (uint32, uint32, uint32) {
+ return c.m1, c.d, c.m2
+}
+
+func (c *Curve) Set(m1 uint32, d uint32, m2 uint32) {
+ c.m1 = m1
+ c.d = d
+ c.m2 = m2
+}
+
+func DeserializeHfscCurve(b []byte) *Curve {
+ return &Curve{
+ m1: binary.LittleEndian.Uint32(b[0:4]),
+ d: binary.LittleEndian.Uint32(b[4:8]),
+ m2: binary.LittleEndian.Uint32(b[8:12]),
+ }
+}
+
+func SerializeHfscCurve(c *Curve) (b []byte) {
+ t := make([]byte, binary.MaxVarintLen32)
+ binary.LittleEndian.PutUint32(t, c.m1)
+ b = append(b, t[:4]...)
+ binary.LittleEndian.PutUint32(t, c.d)
+ b = append(b, t[:4]...)
+ binary.LittleEndian.PutUint32(t, c.m2)
+ b = append(b, t[:4]...)
+ return b
+}
+
+type TcHfscOpt struct {
+ Defcls uint16
+}
+
+func (x *TcHfscOpt) Serialize() []byte {
+ return (*(*[2]byte)(unsafe.Pointer(x)))[:]
+}
+
const (
TCA_U32_UNSPEC = iota
TCA_U32_CLASSID
@@ -586,12 +650,48 @@ const (
TCA_BPF_FD
TCA_BPF_NAME
TCA_BPF_FLAGS
- TCA_BPF_MAX = TCA_BPF_FLAGS
+ TCA_BPF_FLAGS_GEN
+ TCA_BPF_TAG
+ TCA_BPF_ID
+ TCA_BPF_MAX = TCA_BPF_ID
)
type TcBpf TcGen
const (
+ TCA_ACT_CONNMARK = 14
+)
+
+const (
+ TCA_CONNMARK_UNSPEC = iota
+ TCA_CONNMARK_PARMS
+ TCA_CONNMARK_TM
+ TCA_CONNMARK_MAX = TCA_CONNMARK_TM
+)
+
+// struct tc_connmark {
+// tc_gen;
+// __u16 zone;
+// };
+
+type TcConnmark struct {
+ TcGen
+ Zone uint16
+}
+
+func (msg *TcConnmark) Len() int {
+ return SizeofTcConnmark
+}
+
+func DeserializeTcConnmark(b []byte) *TcConnmark {
+ return (*TcConnmark)(unsafe.Pointer(&b[0:SizeofTcConnmark][0]))
+}
+
+func (x *TcConnmark) Serialize() []byte {
+ return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
TCA_ACT_MIRRED = 8
)
@@ -626,6 +726,63 @@ func (x *TcMirred) Serialize() []byte {
return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:]
}
+const (
+ TCA_TUNNEL_KEY_UNSPEC = iota
+ TCA_TUNNEL_KEY_TM
+ TCA_TUNNEL_KEY_PARMS
+ TCA_TUNNEL_KEY_ENC_IPV4_SRC
+ TCA_TUNNEL_KEY_ENC_IPV4_DST
+ TCA_TUNNEL_KEY_ENC_IPV6_SRC
+ TCA_TUNNEL_KEY_ENC_IPV6_DST
+ TCA_TUNNEL_KEY_ENC_KEY_ID
+ TCA_TUNNEL_KEY_MAX = TCA_TUNNEL_KEY_ENC_KEY_ID
+)
+
+type TcTunnelKey struct {
+ TcGen
+ Action int32
+}
+
+func (x *TcTunnelKey) Len() int {
+ return SizeofTcTunnelKey
+}
+
+func DeserializeTunnelKey(b []byte) *TcTunnelKey {
+ return (*TcTunnelKey)(unsafe.Pointer(&b[0:SizeofTcTunnelKey][0]))
+}
+
+func (x *TcTunnelKey) Serialize() []byte {
+ return (*(*[SizeofTcTunnelKey]byte)(unsafe.Pointer(x)))[:]
+}
+
+const (
+ TCA_SKBEDIT_UNSPEC = iota
+ TCA_SKBEDIT_TM
+ TCA_SKBEDIT_PARMS
+ TCA_SKBEDIT_PRIORITY
+ TCA_SKBEDIT_QUEUE_MAPPING
+ TCA_SKBEDIT_MARK
+ TCA_SKBEDIT_PAD
+ TCA_SKBEDIT_PTYPE
+ TCA_SKBEDIT_MAX = TCA_SKBEDIT_MARK
+)
+
+type TcSkbEdit struct {
+ TcGen
+}
+
+func (x *TcSkbEdit) Len() int {
+ return SizeofTcSkbEdit
+}
+
+func DeserializeSkbEdit(b []byte) *TcSkbEdit {
+ return (*TcSkbEdit)(unsafe.Pointer(&b[0:SizeofTcSkbEdit][0]))
+}
+
+func (x *TcSkbEdit) Serialize() []byte {
+ return (*(*[SizeofTcSkbEdit]byte)(unsafe.Pointer(x)))[:]
+}
+
// struct tc_police {
// __u32 index;
// int action;
@@ -708,3 +865,10 @@ const (
TCA_FQ_CODEL_DROP_BATCH_SIZE
TCA_FQ_CODEL_MEMORY_LIMIT
)
+
+const (
+ TCA_HFSC_UNSPEC = iota
+ TCA_HFSC_RSC
+ TCA_HFSC_FSC
+ TCA_HFSC_USC
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go
index 09a2ffa10..dce9073f7 100644
--- a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go
@@ -50,34 +50,44 @@ const (
// Attribute types
const (
/* Netlink message attributes. */
- XFRMA_UNSPEC = 0x00
- XFRMA_ALG_AUTH = 0x01 /* struct xfrm_algo */
- XFRMA_ALG_CRYPT = 0x02 /* struct xfrm_algo */
- XFRMA_ALG_COMP = 0x03 /* struct xfrm_algo */
- XFRMA_ENCAP = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */
- XFRMA_TMPL = 0x05 /* 1 or more struct xfrm_user_tmpl */
- XFRMA_SA = 0x06 /* struct xfrm_usersa_info */
- XFRMA_POLICY = 0x07 /* struct xfrm_userpolicy_info */
- XFRMA_SEC_CTX = 0x08 /* struct xfrm_sec_ctx */
- XFRMA_LTIME_VAL = 0x09
- XFRMA_REPLAY_VAL = 0x0a
- XFRMA_REPLAY_THRESH = 0x0b
- XFRMA_ETIMER_THRESH = 0x0c
- XFRMA_SRCADDR = 0x0d /* xfrm_address_t */
- XFRMA_COADDR = 0x0e /* xfrm_address_t */
- XFRMA_LASTUSED = 0x0f /* unsigned long */
- XFRMA_POLICY_TYPE = 0x10 /* struct xfrm_userpolicy_type */
- XFRMA_MIGRATE = 0x11
- XFRMA_ALG_AEAD = 0x12 /* struct xfrm_algo_aead */
- XFRMA_KMADDRESS = 0x13 /* struct xfrm_user_kmaddress */
- XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */
- XFRMA_MARK = 0x15 /* struct xfrm_mark */
- XFRMA_TFCPAD = 0x16 /* __u32 */
- XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */
- XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */
- XFRMA_MAX = 0x18
+ XFRMA_UNSPEC = iota
+ XFRMA_ALG_AUTH /* struct xfrm_algo */
+ XFRMA_ALG_CRYPT /* struct xfrm_algo */
+ XFRMA_ALG_COMP /* struct xfrm_algo */
+ XFRMA_ENCAP /* struct xfrm_algo + struct xfrm_encap_tmpl */
+ XFRMA_TMPL /* 1 or more struct xfrm_user_tmpl */
+ XFRMA_SA /* struct xfrm_usersa_info */
+ XFRMA_POLICY /* struct xfrm_userpolicy_info */
+ XFRMA_SEC_CTX /* struct xfrm_sec_ctx */
+ XFRMA_LTIME_VAL
+ XFRMA_REPLAY_VAL
+ XFRMA_REPLAY_THRESH
+ XFRMA_ETIMER_THRESH
+ XFRMA_SRCADDR /* xfrm_address_t */
+ XFRMA_COADDR /* xfrm_address_t */
+ XFRMA_LASTUSED /* unsigned long */
+ XFRMA_POLICY_TYPE /* struct xfrm_userpolicy_type */
+ XFRMA_MIGRATE
+ XFRMA_ALG_AEAD /* struct xfrm_algo_aead */
+ XFRMA_KMADDRESS /* struct xfrm_user_kmaddress */
+ XFRMA_ALG_AUTH_TRUNC /* struct xfrm_algo_auth */
+ XFRMA_MARK /* struct xfrm_mark */
+ XFRMA_TFCPAD /* __u32 */
+ XFRMA_REPLAY_ESN_VAL /* struct xfrm_replay_esn */
+ XFRMA_SA_EXTRA_FLAGS /* __u32 */
+ XFRMA_PROTO /* __u8 */
+ XFRMA_ADDRESS_FILTER /* struct xfrm_address_filter */
+ XFRMA_PAD
+ XFRMA_OFFLOAD_DEV /* struct xfrm_state_offload */
+ XFRMA_SET_MARK /* __u32 */
+ XFRMA_SET_MARK_MASK /* __u32 */
+ XFRMA_IF_ID /* __u32 */
+
+ XFRMA_MAX = iota - 1
)
+const XFRMA_OUTPUT_MARK = XFRMA_SET_MARK
+
const (
SizeofXfrmAddress = 0x10
SizeofXfrmSelector = 0x38
diff --git a/vendor/github.com/vishvananda/netlink/protinfo.go b/vendor/github.com/vishvananda/netlink/protinfo.go
index 0087c4438..60b23b374 100644
--- a/vendor/github.com/vishvananda/netlink/protinfo.go
+++ b/vendor/github.com/vishvananda/netlink/protinfo.go
@@ -18,6 +18,10 @@ type Protinfo struct {
// String returns a list of enabled flags
func (prot *Protinfo) String() string {
+ if prot == nil {
+ return "<nil>"
+ }
+
var boolStrings []string
if prot.Hairpin {
boolStrings = append(boolStrings, "Hairpin")
diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go
index 43c465f05..15b65123c 100644
--- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go
+++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go
@@ -41,7 +41,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) {
if err != nil {
return pi, err
}
- pi = *parseProtinfo(infos)
+ pi = parseProtinfo(infos)
return pi, nil
}
@@ -49,8 +49,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) {
return pi, fmt.Errorf("Device with index %d not found", base.Index)
}
-func parseProtinfo(infos []syscall.NetlinkRouteAttr) *Protinfo {
- var pi Protinfo
+func parseProtinfo(infos []syscall.NetlinkRouteAttr) (pi Protinfo) {
for _, info := range infos {
switch info.Attr.Type {
case nl.IFLA_BRPORT_MODE:
@@ -71,5 +70,5 @@ func parseProtinfo(infos []syscall.NetlinkRouteAttr) *Protinfo {
pi.ProxyArpWiFi = byteToBool(info.Value[0])
}
}
- return &pi
+ return
}
diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go
index 3df4b5c29..af78305ac 100644
--- a/vendor/github.com/vishvananda/netlink/qdisc.go
+++ b/vendor/github.com/vishvananda/netlink/qdisc.go
@@ -176,6 +176,13 @@ type Netem struct {
CorruptCorr uint32
}
+func (netem *Netem) String() string {
+ return fmt.Sprintf(
+ "{Latency: %v, Limit: %v, Loss: %v, Gap: %v, Duplicate: %v, Jitter: %v}",
+ netem.Latency, netem.Limit, netem.Loss, netem.Gap, netem.Duplicate, netem.Jitter,
+ )
+}
+
func (qdisc *Netem) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
@@ -231,6 +238,33 @@ func (qdisc *GenericQdisc) Type() string {
return qdisc.QdiscType
}
+type Hfsc struct {
+ QdiscAttrs
+ Defcls uint16
+}
+
+func NewHfsc(attrs QdiscAttrs) *Hfsc {
+ return &Hfsc{
+ QdiscAttrs: attrs,
+ Defcls: 1,
+ }
+}
+
+func (hfsc *Hfsc) Attrs() *QdiscAttrs {
+ return &hfsc.QdiscAttrs
+}
+
+func (hfsc *Hfsc) Type() string {
+ return "hfsc"
+}
+
+func (hfsc *Hfsc) String() string {
+ return fmt.Sprintf(
+ "{%v -- default: %d}",
+ hfsc.Attrs(), hfsc.Defcls,
+ )
+}
+
// Fq is a classless packet scheduler meant to be mostly used for locally generated traffic.
type Fq struct {
QdiscAttrs
@@ -249,6 +283,13 @@ type Fq struct {
LowRateThreshold uint32
}
+func (fq *Fq) String() string {
+ return fmt.Sprintf(
+ "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v}",
+ fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold,
+ )
+}
+
func NewFq(attrs QdiscAttrs) *Fq {
return &Fq{
QdiscAttrs: attrs,
@@ -276,6 +317,13 @@ type FqCodel struct {
// There are some more attributes here, but support for them seems not ubiquitous
}
+func (fqcodel *FqCodel) String() string {
+ return fmt.Sprintf(
+ "{%v -- Target: %v, Limit: %v, Interval: %v, ECM: %v, Flows: %v, Quantum: %v}",
+ fqcodel.Attrs(), fqcodel.Target, fqcodel.Limit, fqcodel.Interval, fqcodel.ECN, fqcodel.Flows, fqcodel.Quantum,
+ )
+}
+
func NewFqCodel(attrs QdiscAttrs) *FqCodel {
return &FqCodel{
QdiscAttrs: attrs,
diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
index 3794ac18a..e9eee5908 100644
--- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go
+++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
@@ -175,15 +175,15 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
opt.Peakrate.Rate = uint32(qdisc.Peakrate)
opt.Limit = qdisc.Limit
opt.Buffer = qdisc.Buffer
- nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize())
+ options.AddRtAttr(nl.TCA_TBF_PARMS, opt.Serialize())
if qdisc.Rate >= uint64(1<<32) {
- nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate))
+ options.AddRtAttr(nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate))
}
if qdisc.Peakrate >= uint64(1<<32) {
- nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate))
+ options.AddRtAttr(nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate))
}
if qdisc.Peakrate > 0 {
- nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst))
+ options.AddRtAttr(nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst))
}
case *Htb:
opt := nl.TcHtbGlob{}
@@ -193,8 +193,12 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
// TODO: Handle Debug properly. For now default to 0
opt.Debug = qdisc.Debug
opt.DirectPkts = qdisc.DirectPkts
- nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize())
- // nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize())
+ options.AddRtAttr(nl.TCA_HTB_INIT, opt.Serialize())
+ // options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, opt.Serialize())
+ case *Hfsc:
+ opt := nl.TcHfscOpt{}
+ opt.Defcls = qdisc.Defcls
+ options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize())
case *Netem:
opt := nl.TcNetemQopt{}
opt.Latency = qdisc.Latency
@@ -211,21 +215,21 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
corr.DupCorr = qdisc.DuplicateCorr
if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 {
- nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize())
+ options.AddRtAttr(nl.TCA_NETEM_CORR, corr.Serialize())
}
// Corruption
corruption := nl.TcNetemCorrupt{}
corruption.Probability = qdisc.CorruptProb
corruption.Correlation = qdisc.CorruptCorr
if corruption.Probability > 0 {
- nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize())
+ options.AddRtAttr(nl.TCA_NETEM_CORRUPT, corruption.Serialize())
}
// Reorder
reorder := nl.TcNetemReorder{}
reorder.Probability = qdisc.ReorderProb
reorder.Correlation = qdisc.ReorderCorr
if reorder.Probability > 0 {
- nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize())
+ options.AddRtAttr(nl.TCA_NETEM_REORDER, reorder.Serialize())
}
case *Ingress:
// ingress filters must use the proper handle
@@ -233,50 +237,54 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error {
return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS")
}
case *FqCodel:
- nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_ECN, nl.Uint32Attr((uint32(qdisc.ECN))))
+ options.AddRtAttr(nl.TCA_FQ_CODEL_ECN, nl.Uint32Attr((uint32(qdisc.ECN))))
if qdisc.Limit > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_LIMIT, nl.Uint32Attr((uint32(qdisc.Limit))))
+ options.AddRtAttr(nl.TCA_FQ_CODEL_LIMIT, nl.Uint32Attr((uint32(qdisc.Limit))))
}
if qdisc.Interval > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_INTERVAL, nl.Uint32Attr((uint32(qdisc.Interval))))
+ options.AddRtAttr(nl.TCA_FQ_CODEL_INTERVAL, nl.Uint32Attr((uint32(qdisc.Interval))))
}
if qdisc.Flows > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_FLOWS, nl.Uint32Attr((uint32(qdisc.Flows))))
+ options.AddRtAttr(nl.TCA_FQ_CODEL_FLOWS, nl.Uint32Attr((uint32(qdisc.Flows))))
}
if qdisc.Quantum > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_CODEL_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum))))
+ options.AddRtAttr(nl.TCA_FQ_CODEL_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum))))
}
case *Fq:
- nl.NewRtAttrChild(options, nl.TCA_FQ_RATE_ENABLE, nl.Uint32Attr((uint32(qdisc.Pacing))))
+ options.AddRtAttr(nl.TCA_FQ_RATE_ENABLE, nl.Uint32Attr((uint32(qdisc.Pacing))))
if qdisc.Buckets > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets))))
+ options.AddRtAttr(nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets))))
}
if qdisc.LowRateThreshold > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold))))
+ options.AddRtAttr(nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold))))
}
if qdisc.Quantum > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum))))
+ options.AddRtAttr(nl.TCA_FQ_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum))))
}
if qdisc.InitialQuantum > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_INITIAL_QUANTUM, nl.Uint32Attr((uint32(qdisc.InitialQuantum))))
+ options.AddRtAttr(nl.TCA_FQ_INITIAL_QUANTUM, nl.Uint32Attr((uint32(qdisc.InitialQuantum))))
}
if qdisc.FlowRefillDelay > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_REFILL_DELAY, nl.Uint32Attr((uint32(qdisc.FlowRefillDelay))))
+ options.AddRtAttr(nl.TCA_FQ_FLOW_REFILL_DELAY, nl.Uint32Attr((uint32(qdisc.FlowRefillDelay))))
}
if qdisc.FlowPacketLimit > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_PLIMIT, nl.Uint32Attr((uint32(qdisc.FlowPacketLimit))))
+ options.AddRtAttr(nl.TCA_FQ_FLOW_PLIMIT, nl.Uint32Attr((uint32(qdisc.FlowPacketLimit))))
}
if qdisc.FlowMaxRate > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_MAX_RATE, nl.Uint32Attr((uint32(qdisc.FlowMaxRate))))
+ options.AddRtAttr(nl.TCA_FQ_FLOW_MAX_RATE, nl.Uint32Attr((uint32(qdisc.FlowMaxRate))))
}
if qdisc.FlowDefaultRate > 0 {
- nl.NewRtAttrChild(options, nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate))))
+ options.AddRtAttr(nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate))))
}
+ default:
+ options = nil
}
- req.AddData(options)
+ if options != nil {
+ req.AddData(options)
+ }
return nil
}
@@ -348,6 +356,8 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
qdisc = &Htb{}
case "fq":
qdisc = &Fq{}
+ case "hfsc":
+ qdisc = &Hfsc{}
case "fq_codel":
qdisc = &FqCodel{}
case "netem":
@@ -375,6 +385,10 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) {
if err := parseTbfData(qdisc, data); err != nil {
return nil, err
}
+ case "hfsc":
+ if err := parseHfscData(qdisc, attr.Value); err != nil {
+ return nil, err
+ }
case "htb":
data, err := nl.ParseRouteAttr(attr.Value)
if err != nil {
@@ -474,6 +488,13 @@ func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error {
return nil
}
+func parseHfscData(qdisc Qdisc, data []byte) error {
+ Hfsc := qdisc.(*Hfsc)
+ native = nl.NativeEndian()
+ Hfsc.Defcls = native.Uint16(data)
+ return nil
+}
+
func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error {
native = nl.NativeEndian()
fq := qdisc.(*Fq)
diff --git a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go
new file mode 100644
index 000000000..2d0bdc8c3
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go
@@ -0,0 +1,264 @@
+package netlink
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+
+ "github.com/vishvananda/netlink/nl"
+ "golang.org/x/sys/unix"
+)
+
+// LinkAttrs represents data shared by most link types
+type RdmaLinkAttrs struct {
+ Index uint32
+ Name string
+ FirmwareVersion string
+ NodeGuid string
+ SysImageGuid string
+}
+
+// Link represents a rdma device from netlink.
+type RdmaLink struct {
+ Attrs RdmaLinkAttrs
+}
+
+func getProtoField(clientType int, op int) int {
+ return ((clientType << nl.RDMA_NL_GET_CLIENT_SHIFT) | op)
+}
+
+func uint64ToGuidString(guid uint64) string {
+ //Convert to byte array
+ sysGuidBytes := new(bytes.Buffer)
+ binary.Write(sysGuidBytes, binary.LittleEndian, guid)
+
+ //Convert to HardwareAddr
+ sysGuidNet := net.HardwareAddr(sysGuidBytes.Bytes())
+
+ //Get the String
+ return sysGuidNet.String()
+}
+
+func executeOneGetRdmaLink(data []byte) (*RdmaLink, error) {
+
+ link := RdmaLink{}
+
+ reader := bytes.NewReader(data)
+ for reader.Len() >= 4 {
+ _, attrType, len, value := parseNfAttrTLV(reader)
+
+ switch attrType {
+ case nl.RDMA_NLDEV_ATTR_DEV_INDEX:
+ var Index uint32
+ r := bytes.NewReader(value)
+ binary.Read(r, nl.NativeEndian(), &Index)
+ link.Attrs.Index = Index
+ case nl.RDMA_NLDEV_ATTR_DEV_NAME:
+ link.Attrs.Name = string(value[0 : len-1])
+ case nl.RDMA_NLDEV_ATTR_FW_VERSION:
+ link.Attrs.FirmwareVersion = string(value[0 : len-1])
+ case nl.RDMA_NLDEV_ATTR_NODE_GUID:
+ var guid uint64
+ r := bytes.NewReader(value)
+ binary.Read(r, nl.NativeEndian(), &guid)
+ link.Attrs.NodeGuid = uint64ToGuidString(guid)
+ case nl.RDMA_NLDEV_ATTR_SYS_IMAGE_GUID:
+ var sysGuid uint64
+ r := bytes.NewReader(value)
+ binary.Read(r, nl.NativeEndian(), &sysGuid)
+ link.Attrs.SysImageGuid = uint64ToGuidString(sysGuid)
+ }
+ if (len % 4) != 0 {
+ // Skip pad bytes
+ reader.Seek(int64(4-(len%4)), seekCurrent)
+ }
+ }
+ return &link, nil
+}
+
+func execRdmaGetLink(req *nl.NetlinkRequest, name string) (*RdmaLink, error) {
+
+ msgs, err := req.Execute(unix.NETLINK_RDMA, 0)
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range msgs {
+ link, err := executeOneGetRdmaLink(m)
+ if err != nil {
+ return nil, err
+ }
+ if link.Attrs.Name == name {
+ return link, nil
+ }
+ }
+ return nil, fmt.Errorf("Rdma device %v not found", name)
+}
+
+func execRdmaSetLink(req *nl.NetlinkRequest) error {
+
+ _, err := req.Execute(unix.NETLINK_RDMA, 0)
+ return err
+}
+
+// RdmaLinkByName finds a link by name and returns a pointer to the object if
+// found and nil error, otherwise returns error code.
+func RdmaLinkByName(name string) (*RdmaLink, error) {
+ return pkgHandle.RdmaLinkByName(name)
+}
+
+// RdmaLinkByName finds a link by name and returns a pointer to the object if
+// found and nil error, otherwise returns error code.
+func (h *Handle) RdmaLinkByName(name string) (*RdmaLink, error) {
+
+ proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET)
+ req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP)
+
+ return execRdmaGetLink(req, name)
+}
+
+// RdmaLinkSetName sets the name of the rdma link device. Return nil on success
+// or error otherwise.
+// Equivalent to: `rdma dev set $old_devname name $name`
+func RdmaLinkSetName(link *RdmaLink, name string) error {
+ return pkgHandle.RdmaLinkSetName(link, name)
+}
+
+// RdmaLinkSetName sets the name of the rdma link device. Return nil on success
+// or error otherwise.
+// Equivalent to: `rdma dev set $old_devname name $name`
+func (h *Handle) RdmaLinkSetName(link *RdmaLink, name string) error {
+ proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SET)
+ req := h.newNetlinkRequest(proto, unix.NLM_F_ACK)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(link.Attrs.Index))
+ data := nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, b)
+ req.AddData(data)
+
+ b = make([]byte, len(name)+1)
+ copy(b, name)
+ data = nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_NAME, b)
+ req.AddData(data)
+
+ return execRdmaSetLink(req)
+}
+
+func netnsModeToString(mode uint8) string {
+ switch mode {
+ case 0:
+ return "exclusive"
+ case 1:
+ return "shared"
+ default:
+ return "unknown"
+ }
+}
+
+func executeOneGetRdmaNetnsMode(data []byte) (string, error) {
+ reader := bytes.NewReader(data)
+ for reader.Len() >= 4 {
+ _, attrType, len, value := parseNfAttrTLV(reader)
+
+ switch attrType {
+ case nl.RDMA_NLDEV_SYS_ATTR_NETNS_MODE:
+ var mode uint8
+ r := bytes.NewReader(value)
+ binary.Read(r, nl.NativeEndian(), &mode)
+ return netnsModeToString(mode), nil
+ }
+ if (len % 4) != 0 {
+ // Skip pad bytes
+ reader.Seek(int64(4-(len%4)), seekCurrent)
+ }
+ }
+ return "", fmt.Errorf("Invalid netns mode")
+}
+
+// RdmaSystemGetNetnsMode gets the net namespace mode for RDMA subsystem
+// Returns mode string and error status as nil on success or returns error
+// otherwise.
+// Equivalent to: `rdma system show netns'
+func RdmaSystemGetNetnsMode() (string, error) {
+ return pkgHandle.RdmaSystemGetNetnsMode()
+}
+
+// RdmaSystemGetNetnsMode gets the net namespace mode for RDMA subsystem
+// Returns mode string and error status as nil on success or returns error
+// otherwise.
+// Equivalent to: `rdma system show netns'
+func (h *Handle) RdmaSystemGetNetnsMode() (string, error) {
+
+ proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SYS_GET)
+ req := h.newNetlinkRequest(proto, unix.NLM_F_ACK)
+
+ msgs, err := req.Execute(unix.NETLINK_RDMA, 0)
+ if err != nil {
+ return "", err
+ }
+ if len(msgs) == 0 {
+ return "", fmt.Errorf("No valid response from kernel")
+ }
+ return executeOneGetRdmaNetnsMode(msgs[0])
+}
+
+func netnsModeStringToUint8(mode string) (uint8, error) {
+ switch mode {
+ case "exclusive":
+ return 0, nil
+ case "shared":
+ return 1, nil
+ default:
+ return 0, fmt.Errorf("Invalid mode; %q", mode)
+ }
+}
+
+// RdmaSystemSetNetnsMode sets the net namespace mode for RDMA subsystem
+// Returns nil on success or appropriate error code.
+// Equivalent to: `rdma system set netns { shared | exclusive }'
+func RdmaSystemSetNetnsMode(NewMode string) error {
+ return pkgHandle.RdmaSystemSetNetnsMode(NewMode)
+}
+
+// RdmaSystemSetNetnsMode sets the net namespace mode for RDMA subsystem
+// Returns nil on success or appropriate error code.
+// Equivalent to: `rdma system set netns { shared | exclusive }'
+func (h *Handle) RdmaSystemSetNetnsMode(NewMode string) error {
+ value, err := netnsModeStringToUint8(NewMode)
+ if err != nil {
+ return err
+ }
+
+ proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SYS_SET)
+ req := h.newNetlinkRequest(proto, unix.NLM_F_ACK)
+
+ data := nl.NewRtAttr(nl.RDMA_NLDEV_SYS_ATTR_NETNS_MODE, []byte{value})
+ req.AddData(data)
+
+ _, err = req.Execute(unix.NETLINK_RDMA, 0)
+ return err
+}
+
+// RdmaLinkSetNsFd puts the RDMA device into a new network namespace. The
+// fd must be an open file descriptor to a network namespace.
+// Similar to: `rdma dev set $dev netns $ns`
+func RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error {
+ return pkgHandle.RdmaLinkSetNsFd(link, fd)
+}
+
+// RdmaLinkSetNsFd puts the RDMA device into a new network namespace. The
+// fd must be an open file descriptor to a network namespace.
+// Similar to: `rdma dev set $dev netns $ns`
+func (h *Handle) RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error {
+ proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SET)
+ req := h.newNetlinkRequest(proto, unix.NLM_F_ACK)
+
+ data := nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX,
+ nl.Uint32Attr(link.Attrs.Index))
+ req.AddData(data)
+
+ data = nl.NewRtAttr(nl.RDMA_NLDEV_NET_NS_FD, nl.Uint32Attr(fd))
+ req.AddData(data)
+
+ return execRdmaSetLink(req)
+}
diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go
index 2cd58ee33..58ff1af60 100644
--- a/vendor/github.com/vishvananda/netlink/route.go
+++ b/vendor/github.com/vishvananda/netlink/route.go
@@ -47,6 +47,7 @@ type Route struct {
Encap Encap
MTU int
AdvMSS int
+ Hoplimit int
}
func (r Route) String() string {
@@ -89,6 +90,7 @@ func (r Route) Equal(x Route) bool {
r.Table == x.Table &&
r.Type == x.Type &&
r.Tos == x.Tos &&
+ r.Hoplimit == x.Hoplimit &&
r.Flags == x.Flags &&
(r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) &&
(r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) &&
diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go
index 3f856711f..c69c595ed 100644
--- a/vendor/github.com/vishvananda/netlink/route_linux.go
+++ b/vendor/github.com/vishvananda/netlink/route_linux.go
@@ -32,6 +32,7 @@ const (
RT_FILTER_SRC
RT_FILTER_GW
RT_FILTER_TABLE
+ RT_FILTER_HOPLIMIT
)
const (
@@ -207,6 +208,7 @@ func (e *SEG6Encap) Decode(buf []byte) error {
}
buf = buf[:l] // make sure buf size upper limit is Length
typ := native.Uint16(buf[2:])
+ // LWTUNNEL_ENCAP_SEG6 has only one attr type SEG6_IPTUNNEL_SRH
if typ != nl.SEG6_IPTUNNEL_SRH {
return fmt.Errorf("unknown SEG6 Type: %d", typ)
}
@@ -259,6 +261,188 @@ func (e *SEG6Encap) Equal(x Encap) bool {
return true
}
+// SEG6LocalEncap definitions
+type SEG6LocalEncap struct {
+ Flags [nl.SEG6_LOCAL_MAX]bool
+ Action int
+ Segments []net.IP // from SRH in seg6_local_lwt
+ Table int // table id for End.T and End.DT6
+ InAddr net.IP
+ In6Addr net.IP
+ Iif int
+ Oif int
+}
+
+func (e *SEG6LocalEncap) Type() int {
+ return nl.LWTUNNEL_ENCAP_SEG6_LOCAL
+}
+func (e *SEG6LocalEncap) Decode(buf []byte) error {
+ attrs, err := nl.ParseRouteAttr(buf)
+ if err != nil {
+ return err
+ }
+ native := nl.NativeEndian()
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.SEG6_LOCAL_ACTION:
+ e.Action = int(native.Uint32(attr.Value[0:4]))
+ e.Flags[nl.SEG6_LOCAL_ACTION] = true
+ case nl.SEG6_LOCAL_SRH:
+ e.Segments, err = nl.DecodeSEG6Srh(attr.Value[:])
+ e.Flags[nl.SEG6_LOCAL_SRH] = true
+ case nl.SEG6_LOCAL_TABLE:
+ e.Table = int(native.Uint32(attr.Value[0:4]))
+ e.Flags[nl.SEG6_LOCAL_TABLE] = true
+ case nl.SEG6_LOCAL_NH4:
+ e.InAddr = net.IP(attr.Value[0:4])
+ e.Flags[nl.SEG6_LOCAL_NH4] = true
+ case nl.SEG6_LOCAL_NH6:
+ e.In6Addr = net.IP(attr.Value[0:16])
+ e.Flags[nl.SEG6_LOCAL_NH6] = true
+ case nl.SEG6_LOCAL_IIF:
+ e.Iif = int(native.Uint32(attr.Value[0:4]))
+ e.Flags[nl.SEG6_LOCAL_IIF] = true
+ case nl.SEG6_LOCAL_OIF:
+ e.Oif = int(native.Uint32(attr.Value[0:4]))
+ e.Flags[nl.SEG6_LOCAL_OIF] = true
+ }
+ }
+ return err
+}
+func (e *SEG6LocalEncap) Encode() ([]byte, error) {
+ var err error
+ native := nl.NativeEndian()
+ res := make([]byte, 8)
+ native.PutUint16(res, 8) // length
+ native.PutUint16(res[2:], nl.SEG6_LOCAL_ACTION)
+ native.PutUint32(res[4:], uint32(e.Action))
+ if e.Flags[nl.SEG6_LOCAL_SRH] {
+ srh, err := nl.EncodeSEG6Srh(e.Segments)
+ if err != nil {
+ return nil, err
+ }
+ attr := make([]byte, 4)
+ native.PutUint16(attr, uint16(len(srh)+4))
+ native.PutUint16(attr[2:], nl.SEG6_LOCAL_SRH)
+ attr = append(attr, srh...)
+ res = append(res, attr...)
+ }
+ if e.Flags[nl.SEG6_LOCAL_TABLE] {
+ attr := make([]byte, 8)
+ native.PutUint16(attr, 8)
+ native.PutUint16(attr[2:], nl.SEG6_LOCAL_TABLE)
+ native.PutUint32(attr[4:], uint32(e.Table))
+ res = append(res, attr...)
+ }
+ if e.Flags[nl.SEG6_LOCAL_NH4] {
+ attr := make([]byte, 4)
+ native.PutUint16(attr, 8)
+ native.PutUint16(attr[2:], nl.SEG6_LOCAL_NH4)
+ ipv4 := e.InAddr.To4()
+ if ipv4 == nil {
+ err = fmt.Errorf("SEG6_LOCAL_NH4 has invalid IPv4 address")
+ return nil, err
+ }
+ attr = append(attr, ipv4...)
+ res = append(res, attr...)
+ }
+ if e.Flags[nl.SEG6_LOCAL_NH6] {
+ attr := make([]byte, 4)
+ native.PutUint16(attr, 20)
+ native.PutUint16(attr[2:], nl.SEG6_LOCAL_NH6)
+ attr = append(attr, e.In6Addr...)
+ res = append(res, attr...)
+ }
+ if e.Flags[nl.SEG6_LOCAL_IIF] {
+ attr := make([]byte, 8)
+ native.PutUint16(attr, 8)
+ native.PutUint16(attr[2:], nl.SEG6_LOCAL_IIF)
+ native.PutUint32(attr[4:], uint32(e.Iif))
+ res = append(res, attr...)
+ }
+ if e.Flags[nl.SEG6_LOCAL_OIF] {
+ attr := make([]byte, 8)
+ native.PutUint16(attr, 8)
+ native.PutUint16(attr[2:], nl.SEG6_LOCAL_OIF)
+ native.PutUint32(attr[4:], uint32(e.Oif))
+ res = append(res, attr...)
+ }
+ return res, err
+}
+func (e *SEG6LocalEncap) String() string {
+ strs := make([]string, 0, nl.SEG6_LOCAL_MAX)
+ strs = append(strs, fmt.Sprintf("action %s", nl.SEG6LocalActionString(e.Action)))
+
+ if e.Flags[nl.SEG6_LOCAL_TABLE] {
+ strs = append(strs, fmt.Sprintf("table %d", e.Table))
+ }
+ if e.Flags[nl.SEG6_LOCAL_NH4] {
+ strs = append(strs, fmt.Sprintf("nh4 %s", e.InAddr))
+ }
+ if e.Flags[nl.SEG6_LOCAL_NH6] {
+ strs = append(strs, fmt.Sprintf("nh6 %s", e.In6Addr))
+ }
+ if e.Flags[nl.SEG6_LOCAL_IIF] {
+ link, err := LinkByIndex(e.Iif)
+ if err != nil {
+ strs = append(strs, fmt.Sprintf("iif %d", e.Iif))
+ } else {
+ strs = append(strs, fmt.Sprintf("iif %s", link.Attrs().Name))
+ }
+ }
+ if e.Flags[nl.SEG6_LOCAL_OIF] {
+ link, err := LinkByIndex(e.Oif)
+ if err != nil {
+ strs = append(strs, fmt.Sprintf("oif %d", e.Oif))
+ } else {
+ strs = append(strs, fmt.Sprintf("oif %s", link.Attrs().Name))
+ }
+ }
+ if e.Flags[nl.SEG6_LOCAL_SRH] {
+ segs := make([]string, 0, len(e.Segments))
+ //append segment backwards (from n to 0) since seg#0 is the last segment.
+ for i := len(e.Segments); i > 0; i-- {
+ segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1]))
+ }
+ strs = append(strs, fmt.Sprintf("segs %d [ %s ]", len(e.Segments), strings.Join(segs, " ")))
+ }
+ return strings.Join(strs, " ")
+}
+func (e *SEG6LocalEncap) Equal(x Encap) bool {
+ o, ok := x.(*SEG6LocalEncap)
+ if !ok {
+ return false
+ }
+ if e == o {
+ return true
+ }
+ if e == nil || o == nil {
+ return false
+ }
+ // compare all arrays first
+ for i := range e.Flags {
+ if e.Flags[i] != o.Flags[i] {
+ return false
+ }
+ }
+ if len(e.Segments) != len(o.Segments) {
+ return false
+ }
+ for i := range e.Segments {
+ if !e.Segments[i].Equal(o.Segments[i]) {
+ return false
+ }
+ }
+ // compare values
+ if !e.InAddr.Equal(o.InAddr) || !e.In6Addr.Equal(o.In6Addr) {
+ return false
+ }
+ if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif {
+ return false
+ }
+ return true
+}
+
// RouteAdd will add a route to the system.
// Equivalent to: `ip route add $route`
func RouteAdd(route *Route) error {
@@ -335,18 +519,18 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg
if err != nil {
return err
}
- rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_NEWDST, buf))
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_NEWDST, buf))
}
if route.Encap != nil {
buf := make([]byte, 2)
native.PutUint16(buf, uint16(route.Encap.Type()))
- rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf))
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf))
buf, err := route.Encap.Encode()
if err != nil {
return err
}
- rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP, buf))
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP, buf))
}
if route.Src != nil {
@@ -410,17 +594,17 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg
if err != nil {
return err
}
- children = append(children, nl.NewRtAttr(nl.RTA_NEWDST, buf))
+ children = append(children, nl.NewRtAttr(unix.RTA_NEWDST, buf))
}
if nh.Encap != nil {
buf := make([]byte, 2)
native.PutUint16(buf, uint16(nh.Encap.Type()))
- rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf))
+ children = append(children, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf))
buf, err := nh.Encap.Encode()
if err != nil {
return err
}
- children = append(children, nl.NewRtAttr(nl.RTA_ENCAP, buf))
+ children = append(children, nl.NewRtAttr(unix.RTA_ENCAP, buf))
}
rtnh.Children = children
buf = append(buf, rtnh.Serialize()...)
@@ -464,6 +648,10 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg
b := nl.Uint32Attr(uint32(route.AdvMSS))
metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b))
}
+ if route.Hoplimit > 0 {
+ b := nl.Uint32Attr(uint32(route.Hoplimit))
+ metrics = append(metrics, nl.NewRtAttr(unix.RTAX_HOPLIMIT, b))
+ }
if metrics != nil {
attr := nl.NewRtAttr(unix.RTA_METRICS, nil)
@@ -574,6 +762,8 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64)
continue
}
}
+ case filterMask&RT_FILTER_HOPLIMIT != 0 && route.Hoplimit != filter.Hoplimit:
+ continue
}
}
res = append(res, route)
@@ -649,7 +839,7 @@ func deserializeRoute(m []byte) (Route, error) {
switch attr.Attr.Type {
case unix.RTA_GATEWAY:
info.Gw = net.IP(attr.Value)
- case nl.RTA_NEWDST:
+ case unix.RTA_NEWDST:
var d Destination
switch msg.Family {
case nl.FAMILY_MPLS:
@@ -659,9 +849,9 @@ func deserializeRoute(m []byte) (Route, error) {
return nil, nil, err
}
info.NewDst = d
- case nl.RTA_ENCAP_TYPE:
+ case unix.RTA_ENCAP_TYPE:
encapType = attr
- case nl.RTA_ENCAP:
+ case unix.RTA_ENCAP:
encap = attr
}
}
@@ -690,7 +880,7 @@ func deserializeRoute(m []byte) (Route, error) {
route.MultiPath = append(route.MultiPath, info)
rest = buf
}
- case nl.RTA_NEWDST:
+ case unix.RTA_NEWDST:
var d Destination
switch msg.Family {
case nl.FAMILY_MPLS:
@@ -700,9 +890,9 @@ func deserializeRoute(m []byte) (Route, error) {
return route, err
}
route.NewDst = d
- case nl.RTA_ENCAP_TYPE:
+ case unix.RTA_ENCAP_TYPE:
encapType = attr
- case nl.RTA_ENCAP:
+ case unix.RTA_ENCAP:
encap = attr
case unix.RTA_METRICS:
metrics, err := nl.ParseRouteAttr(attr.Value)
@@ -715,6 +905,8 @@ func deserializeRoute(m []byte) (Route, error) {
route.MTU = int(native.Uint32(metric.Value[0:4]))
case unix.RTAX_ADVMSS:
route.AdvMSS = int(native.Uint32(metric.Value[0:4]))
+ case unix.RTAX_HOPLIMIT:
+ route.Hoplimit = int(native.Uint32(metric.Value[0:4]))
}
}
}
@@ -734,6 +926,11 @@ func deserializeRoute(m []byte) (Route, error) {
if err := e.Decode(encap.Value); err != nil {
return route, err
}
+ case nl.LWTUNNEL_ENCAP_SEG6_LOCAL:
+ e = &SEG6LocalEncap{}
+ if err := e.Decode(encap.Value); err != nil {
+ return route, err
+ }
}
route.Encap = e
}
@@ -840,13 +1037,19 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <
go func() {
defer close(ch)
for {
- msgs, err := s.Receive()
+ msgs, from, err := s.Receive()
if err != nil {
if cberr != nil {
cberr(err)
}
return
}
+ if from.Pid != nl.PidKernel {
+ if cberr != nil {
+ cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel))
+ }
+ continue
+ }
for _, m := range msgs {
if m.Header.Type == unix.NLMSG_DONE {
continue
diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go
index 6238ae458..e12569fe4 100644
--- a/vendor/github.com/vishvananda/netlink/rule_linux.go
+++ b/vendor/github.com/vishvananda/netlink/rule_linux.go
@@ -144,7 +144,7 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName)))
}
if rule.Goto >= 0 {
- msg.Type = nl.FR_ACT_NOP
+ msg.Type = nl.FR_ACT_GOTO
b := make([]byte, 4)
native.PutUint32(b, uint32(rule.Goto))
req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b))
diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go
index 99e9fb4d8..c4d89c17e 100644
--- a/vendor/github.com/vishvananda/netlink/socket_linux.go
+++ b/vendor/github.com/vishvananda/netlink/socket_linux.go
@@ -141,10 +141,13 @@ func SocketGet(local, remote net.Addr) (*Socket, error) {
},
})
s.Send(req)
- msgs, err := s.Receive()
+ msgs, from, err := s.Receive()
if err != nil {
return nil, err
}
+ if from.Pid != nl.PidKernel {
+ return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)
+ }
if len(msgs) == 0 {
return nil, errors.New("no message nor error from netlink")
}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go
index efe72ddf2..985d3a915 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go
@@ -54,11 +54,15 @@ func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error
go func() {
defer close(ch)
for {
- msgs, err := s.Receive()
+ msgs, from, err := s.Receive()
if err != nil {
errorChan <- err
return
}
+ if from.Pid != nl.PidKernel {
+ errorChan <- fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)
+ return
+ }
for _, m := range msgs {
switch m.Header.Type {
case nl.XFRM_MSG_EXPIRE:
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/github.com/vishvananda/netlink/xfrm_policy.go
index c97ec43a2..6219d2772 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_policy.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_policy.go
@@ -35,6 +35,25 @@ func (d Dir) String() string {
return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN)
}
+// PolicyAction is an enum representing an ipsec policy action.
+type PolicyAction uint8
+
+const (
+ XFRM_POLICY_ALLOW PolicyAction = 0
+ XFRM_POLICY_BLOCK PolicyAction = 1
+)
+
+func (a PolicyAction) String() string {
+ switch a {
+ case XFRM_POLICY_ALLOW:
+ return "allow"
+ case XFRM_POLICY_BLOCK:
+ return "block"
+ default:
+ return fmt.Sprintf("action %d", a)
+ }
+}
+
// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec
// policy. These rules are matched with XfrmState to determine encryption
// and authentication algorithms.
@@ -64,11 +83,14 @@ type XfrmPolicy struct {
Dir Dir
Priority int
Index int
+ Action PolicyAction
+ Ifindex int
+ Ifid int
Mark *XfrmMark
Tmpls []XfrmPolicyTmpl
}
func (p XfrmPolicy) String() string {
- return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Mark: %s, Tmpls: %s}",
- p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Mark, p.Tmpls)
+ return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}",
+ p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls)
}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
index fde0c2ca5..a4e132ef5 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go
@@ -27,6 +27,7 @@ func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) {
if sel.Sport != 0 {
sel.SportMask = ^uint16(0)
}
+ sel.Ifindex = int32(policy.Ifindex)
}
// XfrmPolicyAdd will add an xfrm policy to the system.
@@ -61,6 +62,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error {
msg.Priority = uint32(policy.Priority)
msg.Index = uint32(policy.Index)
msg.Dir = uint8(policy.Dir)
+ msg.Action = uint8(policy.Action)
msg.Lft.SoftByteLimit = nl.XFRM_INF
msg.Lft.HardByteLimit = nl.XFRM_INF
msg.Lft.SoftPacketLimit = nl.XFRM_INF
@@ -90,6 +92,9 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error {
req.AddData(out)
}
+ ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid)))
+ req.AddData(ifId)
+
_, err := req.Execute(unix.NETLINK_XFRM, 0)
return err
}
@@ -183,6 +188,9 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo
req.AddData(out)
}
+ ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid)))
+ req.AddData(ifId)
+
resType := nl.XFRM_MSG_NEWPOLICY
if nlProto == nl.XFRM_MSG_DELPOLICY {
resType = 0
@@ -197,12 +205,7 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo
return nil, err
}
- p, err := parseXfrmPolicy(msgs[0], FAMILY_ALL)
- if err != nil {
- return nil, err
- }
-
- return p, nil
+ return parseXfrmPolicy(msgs[0], FAMILY_ALL)
}
func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) {
@@ -220,9 +223,11 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) {
policy.Proto = Proto(msg.Sel.Proto)
policy.DstPort = int(nl.Swap16(msg.Sel.Dport))
policy.SrcPort = int(nl.Swap16(msg.Sel.Sport))
+ policy.Ifindex = int(msg.Sel.Ifindex)
policy.Priority = int(msg.Priority)
policy.Index = int(msg.Index)
policy.Dir = Dir(msg.Dir)
+ policy.Action = PolicyAction(msg.Action)
attrs, err := nl.ParseRouteAttr(m[msg.Len():])
if err != nil {
@@ -249,6 +254,8 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) {
policy.Mark = new(XfrmMark)
policy.Mark.Value = mark.Value
policy.Mark.Mask = mark.Mask
+ case nl.XFRMA_IF_ID:
+ policy.Ifid = int(native.Uint32(attr.Value))
}
}
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go
index d14740dc5..483d8934a 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_state.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_state.go
@@ -94,6 +94,8 @@ type XfrmState struct {
Limits XfrmStateLimits
Statistics XfrmStateStats
Mark *XfrmMark
+ OutputMark int
+ Ifid int
Auth *XfrmStateAlgo
Crypt *XfrmStateAlgo
Aead *XfrmStateAlgo
@@ -102,8 +104,8 @@ type XfrmState struct {
}
func (sa XfrmState) String() string {
- return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t",
- sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN)
+ return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %d, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t",
+ sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN)
}
func (sa XfrmState) Print(stats bool) string {
if !stats {
diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
index 5dfdb33e4..66c99423c 100644
--- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
+++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go
@@ -158,6 +158,13 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error {
out := nl.NewRtAttr(nl.XFRMA_REPLAY_ESN_VAL, writeReplayEsn(state.ReplayWindow))
req.AddData(out)
}
+ if state.OutputMark != 0 {
+ out := nl.NewRtAttr(nl.XFRMA_OUTPUT_MARK, nl.Uint32Attr(uint32(state.OutputMark)))
+ req.AddData(out)
+ }
+
+ ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid)))
+ req.AddData(ifId)
_, err := req.Execute(unix.NETLINK_XFRM, 0)
return err
@@ -184,12 +191,7 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) {
return nil, err
}
- s, err := parseXfrmState(msgs[0], FAMILY_ALL)
- if err != nil {
- return nil, err
- }
-
- return s, err
+ return parseXfrmState(msgs[0], FAMILY_ALL)
}
// XfrmStateDel will delete an xfrm state from the system. Note that
@@ -275,6 +277,9 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState
req.AddData(out)
}
+ ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid)))
+ req.AddData(ifId)
+
resType := nl.XFRM_MSG_NEWSA
if nlProto == nl.XFRM_MSG_DELSA {
resType = 0
@@ -372,6 +377,10 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) {
state.Mark = new(XfrmMark)
state.Mark.Value = mark.Value
state.Mark.Mask = mark.Mask
+ case nl.XFRMA_OUTPUT_MARK:
+ state.OutputMark = int(native.Uint32(attr.Value))
+ case nl.XFRMA_IF_ID:
+ state.Ifid = int(native.Uint32(attr.Value))
}
}
@@ -394,11 +403,7 @@ func (h *Handle) XfrmStateFlush(proto Proto) error {
req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)})
_, err := req.Execute(unix.NETLINK_XFRM, 0)
- if err != nil {
- return err
- }
-
- return nil
+ return err
}
func limitsToLft(lmts XfrmStateLimits, lft *nl.XfrmLifetimeCfg) {
diff --git a/vendor/github.com/vishvananda/netns/go.mod b/vendor/github.com/vishvananda/netns/go.mod
new file mode 100644
index 000000000..8221f782c
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/go.mod
@@ -0,0 +1,3 @@
+module github.com/vishvananda/netns
+
+go 1.12
diff --git a/vendor/github.com/vishvananda/netns/go.sum b/vendor/github.com/vishvananda/netns/go.sum
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/vishvananda/netns/go.sum
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
index b33bdbaec..0b9bb6030 100644
--- a/vendor/gopkg.in/yaml.v2/scannerc.go
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -626,32 +626,18 @@ func trace(args ...interface{}) func() {
func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
// While we need more tokens to fetch, do it.
for {
- // Check if we really need to fetch more tokens.
- need_more_tokens := false
-
- if parser.tokens_head == len(parser.tokens) {
- // Queue is empty.
- need_more_tokens = true
- } else {
- // Check if any potential simple key may occupy the head position.
- for i := len(parser.simple_keys) - 1; i >= 0; i-- {
- simple_key := &parser.simple_keys[i]
- if simple_key.token_number < parser.tokens_parsed {
- break
- }
- if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
- return false
- } else if valid && simple_key.token_number == parser.tokens_parsed {
- need_more_tokens = true
- break
- }
+ if parser.tokens_head != len(parser.tokens) {
+ // If queue is non-empty, check if any potential simple key may
+ // occupy the head position.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
}
}
-
- // We are finished.
- if !need_more_tokens {
- break
- }
// Fetch the next token.
if !yaml_parser_fetch_next_token(parser) {
return false
@@ -883,6 +869,7 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
return false
}
parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
}
return true
}
@@ -897,9 +884,10 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
"while scanning a simple key", parser.simple_keys[i].mark,
"could not find expected ':'")
}
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
}
- // Remove the key from the stack.
- parser.simple_keys[i].possible = false
return true
}
@@ -930,7 +918,9 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
if parser.flow_level > 0 {
parser.flow_level--
- parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
}
return true
}
@@ -1007,6 +997,8 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
// Initialize the simple key stack.
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+ parser.simple_keys_by_tok = make(map[int]int)
+
// A simple key is allowed at the beginning of the stream.
parser.simple_key_allowed = true
@@ -1310,6 +1302,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
// Remove the simple key.
simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
// A simple key cannot follow another simple key.
parser.simple_key_allowed = false
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
index e25cee563..f6a9c8e34 100644
--- a/vendor/gopkg.in/yaml.v2/yamlh.go
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -579,6 +579,7 @@ type yaml_parser_t struct {
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
// Parser stuff
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ebac0089b..6385ab250 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -54,7 +54,7 @@ github.com/containernetworking/cni/pkg/types/020
github.com/containernetworking/cni/pkg/types/current
github.com/containernetworking/cni/pkg/utils
github.com/containernetworking/cni/pkg/version
-# github.com/containernetworking/plugins v0.8.2
+# github.com/containernetworking/plugins v0.8.5
github.com/containernetworking/plugins/pkg/ip
github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/utils/hwaddr
@@ -78,7 +78,7 @@ github.com/containers/buildah/util
# github.com/containers/common v0.0.7
github.com/containers/common/pkg/cgroups
github.com/containers/common/pkg/unshare
-# github.com/containers/conmon v2.0.9+incompatible
+# github.com/containers/conmon v2.0.10+incompatible
github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.1.0
github.com/containers/image/v5/copy
@@ -138,7 +138,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.15.5
+# github.com/containers/storage v1.15.7
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -178,7 +178,7 @@ github.com/containers/storage/pkg/stringutils
github.com/containers/storage/pkg/system
github.com/containers/storage/pkg/tarlog
github.com/containers/storage/pkg/truncindex
-# github.com/coreos/go-iptables v0.4.2
+# github.com/coreos/go-iptables v0.4.5
github.com/coreos/go-iptables/iptables
# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/go-systemd/activation
@@ -261,8 +261,6 @@ github.com/docker/spdystream
github.com/docker/spdystream/spdy
# github.com/etcd-io/bbolt v1.3.3
github.com/etcd-io/bbolt
-# github.com/fatih/camelcase v1.0.0
-github.com/fatih/camelcase
# github.com/fsnotify/fsnotify v1.4.7
github.com/fsnotify/fsnotify
# github.com/fsouza/go-dockerclient v1.6.0
@@ -312,9 +310,9 @@ github.com/imdario/mergo
github.com/inconshreveable/mousetrap
# github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111
github.com/ishidawataru/sctp
-# github.com/json-iterator/go v1.1.8
+# github.com/json-iterator/go v1.1.9
github.com/json-iterator/go
-# github.com/klauspost/compress v1.9.4
+# github.com/klauspost/compress v1.9.7
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -325,7 +323,7 @@ github.com/klauspost/compress/zstd/internal/xxhash
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.2
github.com/konsorten/go-windows-terminal-sequences
-# github.com/mattn/go-shellwords v1.0.6
+# github.com/mattn/go-shellwords v1.0.7
github.com/mattn/go-shellwords
# github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/matttproud/golang_protobuf_extensions/pbutil
@@ -408,7 +406,7 @@ github.com/opencontainers/runtime-tools/validate
# github.com/opencontainers/selinux v1.3.0
github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
-# github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible
+# github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316
github.com/openshift/api/config/v1
# github.com/openshift/imagebuilder v1.1.1
github.com/openshift/imagebuilder
@@ -423,7 +421,7 @@ github.com/opentracing/opentracing-go/log
# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913
github.com/ostreedev/ostree-go/pkg/glibobject
github.com/ostreedev/ostree-go/pkg/otbuiltin
-# github.com/pkg/errors v0.9.0
+# github.com/pkg/errors v0.9.1
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib
@@ -478,11 +476,12 @@ github.com/stretchr/testify/require
github.com/syndtr/gocapability/capability
# github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tchap/go-patricia/patricia
-# github.com/uber/jaeger-client-go v2.20.1+incompatible
+# github.com/uber/jaeger-client-go v2.22.1+incompatible
github.com/uber/jaeger-client-go
github.com/uber/jaeger-client-go/config
github.com/uber/jaeger-client-go/internal/baggage
github.com/uber/jaeger-client-go/internal/baggage/remote
+github.com/uber/jaeger-client-go/internal/reporterstats
github.com/uber/jaeger-client-go/internal/spanlog
github.com/uber/jaeger-client-go/internal/throttler
github.com/uber/jaeger-client-go/internal/throttler/remote
@@ -516,10 +515,10 @@ github.com/vbauerster/mpb/v4
github.com/vbauerster/mpb/v4/cwriter
github.com/vbauerster/mpb/v4/decor
github.com/vbauerster/mpb/v4/internal
-# github.com/vishvananda/netlink v1.0.0
+# github.com/vishvananda/netlink v1.1.0
github.com/vishvananda/netlink
github.com/vishvananda/netlink/nl
-# github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f
+# github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df
github.com/vishvananda/netns
# github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b
github.com/xeipuuv/gojsonpointer
@@ -626,11 +625,11 @@ gopkg.in/square/go-jose.v2/cipher
gopkg.in/square/go-jose.v2/json
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/tomb.v1
-# gopkg.in/yaml.v2 v2.2.7
+# gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2
-# k8s.io/api v0.17.0
+# k8s.io/api v0.17.2
k8s.io/api/core/v1
-# k8s.io/apimachinery v0.17.0
+# k8s.io/apimachinery v0.17.2
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/v1