summaryrefslogtreecommitdiff
path: root/vendor/github.com/onsi/ginkgo/internal
diff options
context:
space:
mode:
authorValentin Rothberg <rothberg@redhat.com>2019-02-05 11:51:41 +0100
committerValentin Rothberg <rothberg@redhat.com>2019-02-06 11:14:06 +0100
commit9ac0ebb0791851aea81ecc847802db5a39bfb6e7 (patch)
tree30ad98bcc2c2dd1136f46a48cbc44d422adfa184 /vendor/github.com/onsi/ginkgo/internal
parent51714d5da7aaa19014fd67b48b79dfbd5f69c1f0 (diff)
downloadpodman-9ac0ebb0791851aea81ecc847802db5a39bfb6e7.tar.gz
podman-9ac0ebb0791851aea81ecc847802db5a39bfb6e7.tar.bz2
podman-9ac0ebb0791851aea81ecc847802db5a39bfb6e7.zip
Cirrus: add vendor_check_task
* Make sure that all vendored dependencies are in sync with the code and the vendor.conf by running `make vendor` with a follow-up status check of the git tree. * Vendor ginkgo and gomega to include the test dependencies. Signed-off-by: Chris Evic <cevich@redhat.com> Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
Diffstat (limited to 'vendor/github.com/onsi/ginkgo/internal')
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go32
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go80
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go151
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go213
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/failer/failer.go92
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go141
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go103
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go19
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go47
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go22
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go62
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go155
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go117
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go48
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go48
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go353
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go55
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go230
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go90
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go199
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go181
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go446
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go249
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go315
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go22
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go33
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go147
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go181
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go83
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go36
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/server.go224
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/server_test.go269
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/spec.go247
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go739
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/specs.go123
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go287
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go55
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go149
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go59
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go112
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go45
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go64
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go47
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go62
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go20
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go15
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go411
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go785
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/suite/suite.go190
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go35
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go385
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go76
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go36
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/writer/writer.go89
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go75
68 files changed, 8710 insertions, 0 deletions
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
new file mode 100644
index 000000000..fa2f0bf73
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
@@ -0,0 +1,32 @@
+package codelocation
+
+import (
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+
+ "github.com/onsi/ginkgo/types"
+)
+
+func New(skip int) types.CodeLocation {
+ _, file, line, _ := runtime.Caller(skip + 1)
+ stackTrace := PruneStack(string(debug.Stack()), skip)
+ return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
+}
+
+func PruneStack(fullStackTrace string, skip int) string {
+ stack := strings.Split(fullStackTrace, "\n")
+ if len(stack) > 2*(skip+1) {
+ stack = stack[2*(skip+1):]
+ }
+ prunedStack := []string{}
+ re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+ for i := 0; i < len(stack)/2; i++ {
+ if !re.Match([]byte(stack[i*2])) {
+ prunedStack = append(prunedStack, stack[i*2])
+ prunedStack = append(prunedStack, stack[i*2+1])
+ }
+ }
+ return strings.Join(prunedStack, "\n")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go
new file mode 100644
index 000000000..f06abf3c5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go
@@ -0,0 +1,13 @@
+package codelocation_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestCodelocation(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "CodeLocation Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go
new file mode 100644
index 000000000..cca75a449
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go
@@ -0,0 +1,80 @@
+package codelocation_test
+
+import (
+ "runtime"
+
+ . "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/types"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("CodeLocation", func() {
+ var (
+ codeLocation types.CodeLocation
+ expectedFileName string
+ expectedLineNumber int
+ )
+
+ caller0 := func() {
+ codeLocation = codelocation.New(1)
+ }
+
+ caller1 := func() {
+ _, expectedFileName, expectedLineNumber, _ = runtime.Caller(0)
+ expectedLineNumber += 2
+ caller0()
+ }
+
+ BeforeEach(func() {
+ caller1()
+ })
+
+ It("should use the passed in skip parameter to pick out the correct file & line number", func() {
+ Ω(codeLocation.FileName).Should(Equal(expectedFileName))
+ Ω(codeLocation.LineNumber).Should(Equal(expectedLineNumber))
+ })
+
+ Describe("stringer behavior", func() {
+ It("should stringify nicely", func() {
+ Ω(codeLocation.String()).Should(ContainSubstring("code_location_test.go:%d", expectedLineNumber))
+ })
+ })
+
+ //There's no better way than to test this private method as it
+ //goes out of its way to prune out ginkgo related code in the stack trace
+ Describe("PruneStack", func() {
+ It("should remove any references to ginkgo and pkg/testing and pkg/runtime", func() {
+ input := `/Skip/me
+Skip: skip()
+/Skip/me
+Skip: skip()
+/Users/whoever/gospace/src/github.com/onsi/ginkgo/whatever.go:10 (0x12314)
+Something: Func()
+/Users/whoever/gospace/src/github.com/onsi/ginkgo/whatever_else.go:10 (0x12314)
+SomethingInternalToGinkgo: Func()
+/usr/goroot/pkg/strings/oops.go:10 (0x12341)
+Oops: BlowUp()
+/Users/whoever/gospace/src/mycode/code.go:10 (0x12341)
+MyCode: Func()
+/Users/whoever/gospace/src/mycode/code_test.go:10 (0x12341)
+MyCodeTest: Func()
+/Users/whoever/gospace/src/mycode/code_suite_test.go:12 (0x37f08)
+TestFoo: RunSpecs(t, "Foo Suite")
+/usr/goroot/pkg/testing/testing.go:12 (0x37f08)
+TestingT: Blah()
+/usr/goroot/pkg/runtime/runtime.go:12 (0x37f08)
+Something: Func()
+`
+ prunedStack := codelocation.PruneStack(input, 1)
+ Ω(prunedStack).Should(Equal(`/usr/goroot/pkg/strings/oops.go:10 (0x12341)
+Oops: BlowUp()
+/Users/whoever/gospace/src/mycode/code.go:10 (0x12341)
+MyCode: Func()
+/Users/whoever/gospace/src/mycode/code_test.go:10 (0x12341)
+MyCodeTest: Func()
+/Users/whoever/gospace/src/mycode/code_suite_test.go:12 (0x37f08)
+TestFoo: RunSpecs(t, "Foo Suite")`))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
new file mode 100644
index 000000000..0737746dc
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
@@ -0,0 +1,151 @@
+package containernode
+
+import (
+ "math/rand"
+ "sort"
+
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/types"
+)
+
+type subjectOrContainerNode struct {
+ containerNode *ContainerNode
+ subjectNode leafnodes.SubjectNode
+}
+
+func (n subjectOrContainerNode) text() string {
+ if n.containerNode != nil {
+ return n.containerNode.Text()
+ } else {
+ return n.subjectNode.Text()
+ }
+}
+
+type CollatedNodes struct {
+ Containers []*ContainerNode
+ Subject leafnodes.SubjectNode
+}
+
+type ContainerNode struct {
+ text string
+ flag types.FlagType
+ codeLocation types.CodeLocation
+
+ setupNodes []leafnodes.BasicNode
+ subjectAndContainerNodes []subjectOrContainerNode
+}
+
+func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
+ return &ContainerNode{
+ text: text,
+ flag: flag,
+ codeLocation: codeLocation,
+ }
+}
+
+func (container *ContainerNode) Shuffle(r *rand.Rand) {
+ sort.Sort(container)
+ permutation := r.Perm(len(container.subjectAndContainerNodes))
+ shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
+ for i, j := range permutation {
+ shuffledNodes[i] = container.subjectAndContainerNodes[j]
+ }
+ container.subjectAndContainerNodes = shuffledNodes
+}
+
+func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
+ if node.flag == types.FlagTypePending {
+ return false
+ }
+
+ shouldUnfocus := false
+ for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
+ if subjectOrContainerNode.containerNode != nil {
+ shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
+ } else {
+ shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
+ }
+ }
+
+ if shouldUnfocus {
+ if node.flag == types.FlagTypeFocused {
+ node.flag = types.FlagTypeNone
+ }
+ return true
+ }
+
+ return node.flag == types.FlagTypeFocused
+}
+
+func (node *ContainerNode) Collate() []CollatedNodes {
+ return node.collate([]*ContainerNode{})
+}
+
+func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
+ collated := make([]CollatedNodes, 0)
+
+ containers := make([]*ContainerNode, len(enclosingContainers))
+ copy(containers, enclosingContainers)
+ containers = append(containers, node)
+
+ for _, subjectOrContainer := range node.subjectAndContainerNodes {
+ if subjectOrContainer.containerNode != nil {
+ collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
+ } else {
+ collated = append(collated, CollatedNodes{
+ Containers: containers,
+ Subject: subjectOrContainer.subjectNode,
+ })
+ }
+ }
+
+ return collated
+}
+
+func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
+ node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
+}
+
+func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
+ node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
+}
+
+func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
+ node.setupNodes = append(node.setupNodes, setupNode)
+}
+
+func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
+ nodes := []leafnodes.BasicNode{}
+ for _, setupNode := range node.setupNodes {
+ if setupNode.Type() == nodeType {
+ nodes = append(nodes, setupNode)
+ }
+ }
+ return nodes
+}
+
+func (node *ContainerNode) Text() string {
+ return node.text
+}
+
+func (node *ContainerNode) CodeLocation() types.CodeLocation {
+ return node.codeLocation
+}
+
+func (node *ContainerNode) Flag() types.FlagType {
+ return node.flag
+}
+
+//sort.Interface
+
+func (node *ContainerNode) Len() int {
+ return len(node.subjectAndContainerNodes)
+}
+
+func (node *ContainerNode) Less(i, j int) bool {
+ return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
+}
+
+func (node *ContainerNode) Swap(i, j int) {
+ node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go
new file mode 100644
index 000000000..c6fc314ff
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go
@@ -0,0 +1,13 @@
+package containernode_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestContainernode(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Containernode Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go
new file mode 100644
index 000000000..11ac9b70b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go
@@ -0,0 +1,213 @@
+package containernode_test
+
+import (
+ "math/rand"
+
+ "github.com/onsi/ginkgo/internal/leafnodes"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ . "github.com/onsi/ginkgo/internal/containernode"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("Container Node", func() {
+ var (
+ codeLocation types.CodeLocation
+ container *ContainerNode
+ )
+
+ BeforeEach(func() {
+ codeLocation = codelocation.New(0)
+ container = New("description text", types.FlagTypeFocused, codeLocation)
+ })
+
+ Describe("creating a container node", func() {
+ It("can answer questions about itself", func() {
+ Ω(container.Text()).Should(Equal("description text"))
+ Ω(container.Flag()).Should(Equal(types.FlagTypeFocused))
+ Ω(container.CodeLocation()).Should(Equal(codeLocation))
+ })
+ })
+
+ Describe("pushing setup nodes", func() {
+ It("can append setup nodes of various types and fetch them by type", func() {
+ befA := leafnodes.NewBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0)
+ befB := leafnodes.NewBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0)
+ aftA := leafnodes.NewAfterEachNode(func() {}, codelocation.New(0), 0, nil, 0)
+ aftB := leafnodes.NewAfterEachNode(func() {}, codelocation.New(0), 0, nil, 0)
+ jusBefA := leafnodes.NewJustBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0)
+ jusBefB := leafnodes.NewJustBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0)
+
+ container.PushSetupNode(befA)
+ container.PushSetupNode(befB)
+ container.PushSetupNode(aftA)
+ container.PushSetupNode(aftB)
+ container.PushSetupNode(jusBefA)
+ container.PushSetupNode(jusBefB)
+
+ subject := leafnodes.NewItNode("subject", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+ container.PushSubjectNode(subject)
+
+ Ω(container.SetupNodesOfType(types.SpecComponentTypeBeforeEach)).Should(Equal([]leafnodes.BasicNode{befA, befB}))
+ Ω(container.SetupNodesOfType(types.SpecComponentTypeAfterEach)).Should(Equal([]leafnodes.BasicNode{aftA, aftB}))
+ Ω(container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach)).Should(Equal([]leafnodes.BasicNode{jusBefA, jusBefB}))
+ Ω(container.SetupNodesOfType(types.SpecComponentTypeIt)).Should(BeEmpty()) //subjects are not setup nodes
+ })
+ })
+
+ Context("With appended containers and subject nodes", func() {
+ var (
+ itA, itB, innerItA, innerItB leafnodes.SubjectNode
+ innerContainer *ContainerNode
+ )
+
+ BeforeEach(func() {
+ itA = leafnodes.NewItNode("Banana", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+ itB = leafnodes.NewItNode("Apple", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+
+ innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+ innerItB = leafnodes.NewItNode("inner B", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+
+ innerContainer = New("Orange", types.FlagTypeNone, codelocation.New(0))
+
+ container.PushSubjectNode(itA)
+ container.PushContainerNode(innerContainer)
+ innerContainer.PushSubjectNode(innerItA)
+ innerContainer.PushSubjectNode(innerItB)
+ container.PushSubjectNode(itB)
+ })
+
+ Describe("Collating", func() {
+ It("should return a collated set of containers and subject nodes in the correct order", func() {
+ collated := container.Collate()
+ Ω(collated).Should(HaveLen(4))
+
+ Ω(collated[0]).Should(Equal(CollatedNodes{
+ Containers: []*ContainerNode{container},
+ Subject: itA,
+ }))
+
+ Ω(collated[1]).Should(Equal(CollatedNodes{
+ Containers: []*ContainerNode{container, innerContainer},
+ Subject: innerItA,
+ }))
+
+ Ω(collated[2]).Should(Equal(CollatedNodes{
+ Containers: []*ContainerNode{container, innerContainer},
+ Subject: innerItB,
+ }))
+
+ Ω(collated[3]).Should(Equal(CollatedNodes{
+ Containers: []*ContainerNode{container},
+ Subject: itB,
+ }))
+ })
+ })
+
+ Describe("Backpropagating Programmatic Focus", func() {
+ //This allows inner focused specs to override the focus of outer focussed
+ //specs and more closely maps to what a developer wants to happen
+ //when debugging a test suite
+
+ Context("when a parent is focused *and* an inner subject is focused", func() {
+ BeforeEach(func() {
+ container = New("description text", types.FlagTypeFocused, codeLocation)
+ itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+ container.PushSubjectNode(itA)
+
+ innerContainer = New("Orange", types.FlagTypeNone, codelocation.New(0))
+ container.PushContainerNode(innerContainer)
+ innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeFocused, codelocation.New(0), 0, nil, 0)
+ innerContainer.PushSubjectNode(innerItA)
+ })
+
+ It("should unfocus the parent", func() {
+ container.BackPropagateProgrammaticFocus()
+
+ Ω(container.Flag()).Should(Equal(types.FlagTypeNone))
+ Ω(itA.Flag()).Should(Equal(types.FlagTypeNone))
+ Ω(innerContainer.Flag()).Should(Equal(types.FlagTypeNone))
+ Ω(innerItA.Flag()).Should(Equal(types.FlagTypeFocused))
+ })
+ })
+
+ Context("when a parent is focused *and* an inner container is focused", func() {
+ BeforeEach(func() {
+ container = New("description text", types.FlagTypeFocused, codeLocation)
+ itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+ container.PushSubjectNode(itA)
+
+ innerContainer = New("Orange", types.FlagTypeFocused, codelocation.New(0))
+ container.PushContainerNode(innerContainer)
+ innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+ innerContainer.PushSubjectNode(innerItA)
+ })
+
+ It("should unfocus the parent", func() {
+ container.BackPropagateProgrammaticFocus()
+
+ Ω(container.Flag()).Should(Equal(types.FlagTypeNone))
+ Ω(itA.Flag()).Should(Equal(types.FlagTypeNone))
+ Ω(innerContainer.Flag()).Should(Equal(types.FlagTypeFocused))
+ Ω(innerItA.Flag()).Should(Equal(types.FlagTypeNone))
+ })
+ })
+
+ Context("when a parent is pending and a child is focused", func() {
+ BeforeEach(func() {
+ container = New("description text", types.FlagTypeFocused, codeLocation)
+ itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0)
+ container.PushSubjectNode(itA)
+
+ innerContainer = New("Orange", types.FlagTypePending, codelocation.New(0))
+ container.PushContainerNode(innerContainer)
+ innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeFocused, codelocation.New(0), 0, nil, 0)
+ innerContainer.PushSubjectNode(innerItA)
+ })
+
+ It("should not do anything", func() {
+ container.BackPropagateProgrammaticFocus()
+
+ Ω(container.Flag()).Should(Equal(types.FlagTypeFocused))
+ Ω(itA.Flag()).Should(Equal(types.FlagTypeNone))
+ Ω(innerContainer.Flag()).Should(Equal(types.FlagTypePending))
+ Ω(innerItA.Flag()).Should(Equal(types.FlagTypeFocused))
+ })
+ })
+ })
+
+ Describe("Shuffling", func() {
+ var unshuffledCollation []CollatedNodes
+ BeforeEach(func() {
+ unshuffledCollation = container.Collate()
+
+ r := rand.New(rand.NewSource(17))
+ container.Shuffle(r)
+ })
+
+ It("should sort, and then shuffle, the top level contents of the container", func() {
+ shuffledCollation := container.Collate()
+ Ω(shuffledCollation).Should(HaveLen(len(unshuffledCollation)))
+ Ω(shuffledCollation).ShouldNot(Equal(unshuffledCollation))
+
+ for _, entry := range unshuffledCollation {
+ Ω(shuffledCollation).Should(ContainElement(entry))
+ }
+
+ innerAIndex, innerBIndex := 0, 0
+ for i, entry := range shuffledCollation {
+ if entry.Subject == innerItA {
+ innerAIndex = i
+ } else if entry.Subject == innerItB {
+ innerBIndex = i
+ }
+ }
+
+ Ω(innerAIndex).Should(Equal(innerBIndex - 1))
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
new file mode 100644
index 000000000..678ea2514
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
@@ -0,0 +1,92 @@
+package failer
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/onsi/ginkgo/types"
+)
+
+type Failer struct {
+ lock *sync.Mutex
+ failure types.SpecFailure
+ state types.SpecState
+}
+
+func New() *Failer {
+ return &Failer{
+ lock: &sync.Mutex{},
+ state: types.SpecStatePassed,
+ }
+}
+
+func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStatePanicked
+ f.failure = types.SpecFailure{
+ Message: "Test Panicked",
+ Location: location,
+ ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
+ }
+ }
+}
+
+func (f *Failer) Timeout(location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateTimedOut
+ f.failure = types.SpecFailure{
+ Message: "Timed out",
+ Location: location,
+ }
+ }
+}
+
+func (f *Failer) Fail(message string, location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateFailed
+ f.failure = types.SpecFailure{
+ Message: message,
+ Location: location,
+ }
+ }
+}
+
+func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ failure := f.failure
+ outcome := f.state
+ if outcome != types.SpecStatePassed {
+ failure.ComponentType = componentType
+ failure.ComponentIndex = componentIndex
+ failure.ComponentCodeLocation = componentCodeLocation
+ }
+
+ f.state = types.SpecStatePassed
+ f.failure = types.SpecFailure{}
+
+ return failure, outcome
+}
+
+func (f *Failer) Skip(message string, location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateSkipped
+ f.failure = types.SpecFailure{
+ Message: message,
+ Location: location,
+ }
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go
new file mode 100644
index 000000000..8dce7be9a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go
@@ -0,0 +1,13 @@
+package failer_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestFailer(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Failer Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go
new file mode 100644
index 000000000..65210a40a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go
@@ -0,0 +1,141 @@
+package failer_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/failer"
+ . "github.com/onsi/gomega"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("Failer", func() {
+ var (
+ failer *Failer
+ codeLocationA types.CodeLocation
+ codeLocationB types.CodeLocation
+ )
+
+ BeforeEach(func() {
+ codeLocationA = codelocation.New(0)
+ codeLocationB = codelocation.New(0)
+ failer = New()
+ })
+
+ Context("with no failures", func() {
+ It("should return success when drained", func() {
+ failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+ Ω(failure).Should(BeZero())
+ Ω(state).Should(Equal(types.SpecStatePassed))
+ })
+ })
+
+ Describe("Skip", func() {
+ It("should handle failures", func() {
+ failer.Skip("something skipped", codeLocationA)
+ failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "something skipped",
+ Location: codeLocationA,
+ ForwardedPanic: "",
+ ComponentType: types.SpecComponentTypeIt,
+ ComponentIndex: 3,
+ ComponentCodeLocation: codeLocationB,
+ }))
+ Ω(state).Should(Equal(types.SpecStateSkipped))
+ })
+ })
+
+ Describe("Fail", func() {
+ It("should handle failures", func() {
+ failer.Fail("something failed", codeLocationA)
+ failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "something failed",
+ Location: codeLocationA,
+ ForwardedPanic: "",
+ ComponentType: types.SpecComponentTypeIt,
+ ComponentIndex: 3,
+ ComponentCodeLocation: codeLocationB,
+ }))
+ Ω(state).Should(Equal(types.SpecStateFailed))
+ })
+ })
+
+ Describe("Panic", func() {
+ It("should handle panics", func() {
+ failer.Panic(codeLocationA, "some forwarded panic")
+ failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "Test Panicked",
+ Location: codeLocationA,
+ ForwardedPanic: "some forwarded panic",
+ ComponentType: types.SpecComponentTypeIt,
+ ComponentIndex: 3,
+ ComponentCodeLocation: codeLocationB,
+ }))
+ Ω(state).Should(Equal(types.SpecStatePanicked))
+ })
+ })
+
+ Describe("Timeout", func() {
+ It("should handle timeouts", func() {
+ failer.Timeout(codeLocationA)
+ failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "Timed out",
+ Location: codeLocationA,
+ ForwardedPanic: "",
+ ComponentType: types.SpecComponentTypeIt,
+ ComponentIndex: 3,
+ ComponentCodeLocation: codeLocationB,
+ }))
+ Ω(state).Should(Equal(types.SpecStateTimedOut))
+ })
+ })
+
+ Context("when multiple failures are registered", func() {
+ BeforeEach(func() {
+ failer.Fail("something failed", codeLocationA)
+ failer.Fail("something else failed", codeLocationA)
+ })
+
+ It("should only report the first one when drained", func() {
+ failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "something failed",
+ Location: codeLocationA,
+ ForwardedPanic: "",
+ ComponentType: types.SpecComponentTypeIt,
+ ComponentIndex: 3,
+ ComponentCodeLocation: codeLocationB,
+ }))
+ Ω(state).Should(Equal(types.SpecStateFailed))
+ })
+
+ It("should report subsequent failures after being drained", func() {
+ failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+ failer.Fail("yet another thing failed", codeLocationA)
+
+ failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "yet another thing failed",
+ Location: codeLocationA,
+ ForwardedPanic: "",
+ ComponentType: types.SpecComponentTypeIt,
+ ComponentIndex: 3,
+ ComponentCodeLocation: codeLocationB,
+ }))
+ Ω(state).Should(Equal(types.SpecStateFailed))
+ })
+
+ It("should report sucess on subsequent drains if no errors occur", func() {
+ failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+ failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
+ Ω(failure).Should(BeZero())
+ Ω(state).Should(Equal(types.SpecStatePassed))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
new file mode 100644
index 000000000..d6d54234c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
@@ -0,0 +1,103 @@
+package leafnodes
+
+import (
+ "math"
+ "time"
+
+ "sync"
+
+ "github.com/onsi/ginkgo/types"
+)
+
+type benchmarker struct {
+ mu sync.Mutex
+ measurements map[string]*types.SpecMeasurement
+ orderCounter int
+}
+
+func newBenchmarker() *benchmarker {
+ return &benchmarker{
+ measurements: make(map[string]*types.SpecMeasurement, 0),
+ }
+}
+
+func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
+ t := time.Now()
+ body()
+ elapsedTime = time.Since(t)
+
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
+ measurement.Results = append(measurement.Results, elapsedTime.Seconds())
+
+ return
+}
+
+func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
+ b.mu.Lock()
+ measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
+ defer b.mu.Unlock()
+ measurement.Results = append(measurement.Results, value)
+}
+
+func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
+ b.mu.Lock()
+ measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
+ defer b.mu.Unlock()
+ measurement.Results = append(measurement.Results, value)
+}
+
+func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
+ measurement, ok := b.measurements[name]
+ if !ok {
+ var computedInfo interface{}
+ computedInfo = nil
+ if len(info) > 0 {
+ computedInfo = info[0]
+ }
+ measurement = &types.SpecMeasurement{
+ Name: name,
+ Info: computedInfo,
+ Order: b.orderCounter,
+ SmallestLabel: smallestLabel,
+ LargestLabel: largestLabel,
+ AverageLabel: averageLabel,
+ Units: units,
+ Precision: precision,
+ Results: make([]float64, 0),
+ }
+ b.measurements[name] = measurement
+ b.orderCounter++
+ }
+
+ return measurement
+}
+
+func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ for _, measurement := range b.measurements {
+ measurement.Smallest = math.MaxFloat64
+ measurement.Largest = -math.MaxFloat64
+ sum := float64(0)
+ sumOfSquares := float64(0)
+
+ for _, result := range measurement.Results {
+ if result > measurement.Largest {
+ measurement.Largest = result
+ }
+ if result < measurement.Smallest {
+ measurement.Smallest = result
+ }
+ sum += result
+ sumOfSquares += result * result
+ }
+
+ n := float64(len(measurement.Results))
+ measurement.Average = sum / n
+ measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
+ }
+
+ return b.measurements
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
new file mode 100644
index 000000000..8c3902d60
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
@@ -0,0 +1,19 @@
+package leafnodes
+
+import (
+ "github.com/onsi/ginkgo/types"
+)
+
+type BasicNode interface {
+ Type() types.SpecComponentType
+ Run() (types.SpecState, types.SpecFailure)
+ CodeLocation() types.CodeLocation
+}
+
+type SubjectNode interface {
+ BasicNode
+
+ Text() string
+ Flag() types.FlagType
+ Samples() int
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
new file mode 100644
index 000000000..6eded7b76
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
@@ -0,0 +1,47 @@
+package leafnodes
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type ItNode struct {
+ runner *runner
+
+ flag types.FlagType
+ text string
+}
+
+func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
+ return &ItNode{
+ runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
+ flag: flag,
+ text: text,
+ }
+}
+
+func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+ return node.runner.run()
+}
+
+func (node *ItNode) Type() types.SpecComponentType {
+ return types.SpecComponentTypeIt
+}
+
+func (node *ItNode) Text() string {
+ return node.text
+}
+
+func (node *ItNode) Flag() types.FlagType {
+ return node.flag
+}
+
+func (node *ItNode) CodeLocation() types.CodeLocation {
+ return node.runner.codeLocation
+}
+
+func (node *ItNode) Samples() int {
+ return 1
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go
new file mode 100644
index 000000000..29fa0c6e2
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go
@@ -0,0 +1,22 @@
+package leafnodes_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/leafnodes"
+ . "github.com/onsi/gomega"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("It Nodes", func() {
+ It("should report the correct type, text, flag, and code location", func() {
+ codeLocation := codelocation.New(0)
+ it := NewItNode("my it node", func() {}, types.FlagTypeFocused, codeLocation, 0, nil, 3)
+ Ω(it.Type()).Should(Equal(types.SpecComponentTypeIt))
+ Ω(it.Flag()).Should(Equal(types.FlagTypeFocused))
+ Ω(it.Text()).Should(Equal("my it node"))
+ Ω(it.CodeLocation()).Should(Equal(codeLocation))
+ Ω(it.Samples()).Should(Equal(1))
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go
new file mode 100644
index 000000000..a7ba9e006
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go
@@ -0,0 +1,13 @@
+package leafnodes_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestLeafNode(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "LeafNode Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
new file mode 100644
index 000000000..3ab9a6d55
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
@@ -0,0 +1,62 @@
+package leafnodes
+
+import (
+ "reflect"
+
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type MeasureNode struct {
+ runner *runner
+
+ text string
+ flag types.FlagType
+ samples int
+ benchmarker *benchmarker
+}
+
+func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
+ benchmarker := newBenchmarker()
+
+ wrappedBody := func() {
+ reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
+ }
+
+ return &MeasureNode{
+ runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
+
+ text: text,
+ flag: flag,
+ samples: samples,
+ benchmarker: benchmarker,
+ }
+}
+
+func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+ return node.runner.run()
+}
+
+func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
+ return node.benchmarker.measurementsReport()
+}
+
+func (node *MeasureNode) Type() types.SpecComponentType {
+ return types.SpecComponentTypeMeasure
+}
+
+func (node *MeasureNode) Text() string {
+ return node.text
+}
+
+func (node *MeasureNode) Flag() types.FlagType {
+ return node.flag
+}
+
+func (node *MeasureNode) CodeLocation() types.CodeLocation {
+ return node.runner.codeLocation
+}
+
+func (node *MeasureNode) Samples() int {
+ return node.samples
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go
new file mode 100644
index 000000000..1cd13336a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go
@@ -0,0 +1,155 @@
+package leafnodes_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/leafnodes"
+ . "github.com/onsi/gomega"
+
+ "time"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ Failer "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("Measure Nodes", func() {
+ It("should report the correct type, text, flag, and code location", func() {
+ codeLocation := codelocation.New(0)
+ measure := NewMeasureNode("my measure node", func(b Benchmarker) {}, types.FlagTypeFocused, codeLocation, 10, nil, 3)
+ Ω(measure.Type()).Should(Equal(types.SpecComponentTypeMeasure))
+ Ω(measure.Flag()).Should(Equal(types.FlagTypeFocused))
+ Ω(measure.Text()).Should(Equal("my measure node"))
+ Ω(measure.CodeLocation()).Should(Equal(codeLocation))
+ Ω(measure.Samples()).Should(Equal(10))
+ })
+
+ Describe("benchmarking", func() {
+ var measure *MeasureNode
+
+ Describe("Value", func() {
+ BeforeEach(func() {
+ measure = NewMeasureNode("the measurement", func(b Benchmarker) {
+ b.RecordValue("foo", 7, "info!")
+ b.RecordValue("foo", 2)
+ b.RecordValue("foo", 3)
+ b.RecordValue("bar", 0.3)
+ b.RecordValue("bar", 0.1)
+ b.RecordValue("bar", 0.5)
+ b.RecordValue("bar", 0.7)
+ }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3)
+ Ω(measure.Run()).Should(Equal(types.SpecStatePassed))
+ })
+
+ It("records passed in values and reports on them", func() {
+ report := measure.MeasurementsReport()
+ Ω(report).Should(HaveLen(2))
+ Ω(report["foo"].Name).Should(Equal("foo"))
+ Ω(report["foo"].Info).Should(Equal("info!"))
+ Ω(report["foo"].Order).Should(Equal(0))
+ Ω(report["foo"].SmallestLabel).Should(Equal("Smallest"))
+ Ω(report["foo"].LargestLabel).Should(Equal(" Largest"))
+ Ω(report["foo"].AverageLabel).Should(Equal(" Average"))
+ Ω(report["foo"].Units).Should(Equal(""))
+ Ω(report["foo"].Results).Should(Equal([]float64{7, 2, 3}))
+ Ω(report["foo"].Smallest).Should(BeNumerically("==", 2))
+ Ω(report["foo"].Largest).Should(BeNumerically("==", 7))
+ Ω(report["foo"].Average).Should(BeNumerically("==", 4))
+ Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 2.16, 0.01))
+
+ Ω(report["bar"].Name).Should(Equal("bar"))
+ Ω(report["bar"].Info).Should(BeNil())
+ Ω(report["bar"].SmallestLabel).Should(Equal("Smallest"))
+ Ω(report["bar"].Order).Should(Equal(1))
+ Ω(report["bar"].LargestLabel).Should(Equal(" Largest"))
+ Ω(report["bar"].AverageLabel).Should(Equal(" Average"))
+ Ω(report["bar"].Units).Should(Equal(""))
+ Ω(report["bar"].Results).Should(Equal([]float64{0.3, 0.1, 0.5, 0.7}))
+ Ω(report["bar"].Smallest).Should(BeNumerically("==", 0.1))
+ Ω(report["bar"].Largest).Should(BeNumerically("==", 0.7))
+ Ω(report["bar"].Average).Should(BeNumerically("==", 0.4))
+ Ω(report["bar"].StdDeviation).Should(BeNumerically("~", 0.22, 0.01))
+ })
+ })
+
+ Describe("Value with precision", func() {
+ BeforeEach(func() {
+ measure = NewMeasureNode("the measurement", func(b Benchmarker) {
+ b.RecordValueWithPrecision("foo", 7, "ms", 7, "info!")
+ b.RecordValueWithPrecision("foo", 2, "ms", 6)
+ b.RecordValueWithPrecision("foo", 3, "ms", 5)
+ b.RecordValueWithPrecision("bar", 0.3, "ns", 4)
+ b.RecordValueWithPrecision("bar", 0.1, "ns", 3)
+ b.RecordValueWithPrecision("bar", 0.5, "ns", 2)
+ b.RecordValueWithPrecision("bar", 0.7, "ns", 1)
+ }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3)
+ Ω(measure.Run()).Should(Equal(types.SpecStatePassed))
+ })
+
+ It("records passed in values and reports on them", func() {
+ report := measure.MeasurementsReport()
+ Ω(report).Should(HaveLen(2))
+ Ω(report["foo"].Name).Should(Equal("foo"))
+ Ω(report["foo"].Info).Should(Equal("info!"))
+ Ω(report["foo"].Order).Should(Equal(0))
+ Ω(report["foo"].SmallestLabel).Should(Equal("Smallest"))
+ Ω(report["foo"].LargestLabel).Should(Equal(" Largest"))
+ Ω(report["foo"].AverageLabel).Should(Equal(" Average"))
+ Ω(report["foo"].Units).Should(Equal("ms"))
+ Ω(report["foo"].Results).Should(Equal([]float64{7, 2, 3}))
+ Ω(report["foo"].Smallest).Should(BeNumerically("==", 2))
+ Ω(report["foo"].Largest).Should(BeNumerically("==", 7))
+ Ω(report["foo"].Average).Should(BeNumerically("==", 4))
+ Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 2.16, 0.01))
+
+ Ω(report["bar"].Name).Should(Equal("bar"))
+ Ω(report["bar"].Info).Should(BeNil())
+ Ω(report["bar"].SmallestLabel).Should(Equal("Smallest"))
+ Ω(report["bar"].Order).Should(Equal(1))
+ Ω(report["bar"].LargestLabel).Should(Equal(" Largest"))
+ Ω(report["bar"].AverageLabel).Should(Equal(" Average"))
+ Ω(report["bar"].Units).Should(Equal("ns"))
+ Ω(report["bar"].Results).Should(Equal([]float64{0.3, 0.1, 0.5, 0.7}))
+ Ω(report["bar"].Smallest).Should(BeNumerically("==", 0.1))
+ Ω(report["bar"].Largest).Should(BeNumerically("==", 0.7))
+ Ω(report["bar"].Average).Should(BeNumerically("==", 0.4))
+ Ω(report["bar"].StdDeviation).Should(BeNumerically("~", 0.22, 0.01))
+ })
+ })
+
+ Describe("Time", func() {
+ BeforeEach(func() {
+ measure = NewMeasureNode("the measurement", func(b Benchmarker) {
+ b.Time("foo", func() {
+ time.Sleep(200 * time.Millisecond)
+ }, "info!")
+ b.Time("foo", func() {
+ time.Sleep(300 * time.Millisecond)
+ })
+ b.Time("foo", func() {
+ time.Sleep(250 * time.Millisecond)
+ })
+ }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3)
+ Ω(measure.Run()).Should(Equal(types.SpecStatePassed))
+ })
+
+ It("records passed in values and reports on them", func() {
+ report := measure.MeasurementsReport()
+ Ω(report).Should(HaveLen(1))
+ Ω(report["foo"].Name).Should(Equal("foo"))
+ Ω(report["foo"].Info).Should(Equal("info!"))
+ Ω(report["foo"].SmallestLabel).Should(Equal("Fastest Time"))
+ Ω(report["foo"].LargestLabel).Should(Equal("Slowest Time"))
+ Ω(report["foo"].AverageLabel).Should(Equal("Average Time"))
+ Ω(report["foo"].Units).Should(Equal("s"))
+ Ω(report["foo"].Results).Should(HaveLen(3))
+ Ω(report["foo"].Results[0]).Should(BeNumerically("~", 0.2, 0.06))
+ Ω(report["foo"].Results[1]).Should(BeNumerically("~", 0.3, 0.06))
+ Ω(report["foo"].Results[2]).Should(BeNumerically("~", 0.25, 0.06))
+ Ω(report["foo"].Smallest).Should(BeNumerically("~", 0.2, 0.06))
+ Ω(report["foo"].Largest).Should(BeNumerically("~", 0.3, 0.06))
+ Ω(report["foo"].Average).Should(BeNumerically("~", 0.25, 0.06))
+ Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 0.07, 0.04))
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
new file mode 100644
index 000000000..16cb66c3e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
@@ -0,0 +1,117 @@
+package leafnodes
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type runner struct {
+ isAsync bool
+ asyncFunc func(chan<- interface{})
+ syncFunc func()
+ codeLocation types.CodeLocation
+ timeoutThreshold time.Duration
+ nodeType types.SpecComponentType
+ componentIndex int
+ failer *failer.Failer
+}
+
+func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
+ bodyType := reflect.TypeOf(body)
+ if bodyType.Kind() != reflect.Func {
+ panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
+ }
+
+ runner := &runner{
+ codeLocation: codeLocation,
+ timeoutThreshold: timeout,
+ failer: failer,
+ nodeType: nodeType,
+ componentIndex: componentIndex,
+ }
+
+ switch bodyType.NumIn() {
+ case 0:
+ runner.syncFunc = body.(func())
+ return runner
+ case 1:
+ if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
+ panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
+ }
+
+ wrappedBody := func(done chan<- interface{}) {
+ bodyValue := reflect.ValueOf(body)
+ bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
+ }
+
+ runner.isAsync = true
+ runner.asyncFunc = wrappedBody
+ return runner
+ }
+
+ panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
+}
+
+func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
+ if r.isAsync {
+ return r.runAsync()
+ } else {
+ return r.runSync()
+ }
+}
+
+func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
+ done := make(chan interface{}, 1)
+
+ go func() {
+ finished := false
+
+ defer func() {
+ if e := recover(); e != nil || !finished {
+ r.failer.Panic(codelocation.New(2), e)
+ select {
+ case <-done:
+ break
+ default:
+ close(done)
+ }
+ }
+ }()
+
+ r.asyncFunc(done)
+ finished = true
+ }()
+
+ // If this goroutine gets no CPU time before the select block,
+ // the <-done case may complete even if the test took longer than the timeoutThreshold.
+ // This can cause flaky behaviour, but we haven't seen it in the wild.
+ select {
+ case <-done:
+ case <-time.After(r.timeoutThreshold):
+ r.failer.Timeout(r.codeLocation)
+ }
+
+ failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
+ return
+}
+func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
+ finished := false
+
+ defer func() {
+ if e := recover(); e != nil || !finished {
+ r.failer.Panic(codelocation.New(2), e)
+ }
+
+ failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
+ }()
+
+ r.syncFunc()
+ finished = true
+
+ return
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
new file mode 100644
index 000000000..e3e9cb7c5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
@@ -0,0 +1,48 @@
+package leafnodes
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type SetupNode struct {
+ runner *runner
+}
+
+func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
+ return node.runner.run()
+}
+
+func (node *SetupNode) Type() types.SpecComponentType {
+ return node.runner.nodeType
+}
+
+func (node *SetupNode) CodeLocation() types.CodeLocation {
+ return node.runner.codeLocation
+}
+
+func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+ return &SetupNode{
+ runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
+ }
+}
+
+func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+ return &SetupNode{
+ runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
+ }
+}
+
+func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+ return &SetupNode{
+ runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
+ }
+}
+
+func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
+ return &SetupNode{
+ runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go
new file mode 100644
index 000000000..9810688cb
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go
@@ -0,0 +1,48 @@
+package leafnodes_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/types"
+ . "github.com/onsi/gomega"
+
+ . "github.com/onsi/ginkgo/internal/leafnodes"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+)
+
+var _ = Describe("Setup Nodes", func() {
+ Describe("BeforeEachNodes", func() {
+ It("should report the correct type and code location", func() {
+ codeLocation := codelocation.New(0)
+ beforeEach := NewBeforeEachNode(func() {}, codeLocation, 0, nil, 3)
+ Ω(beforeEach.Type()).Should(Equal(types.SpecComponentTypeBeforeEach))
+ Ω(beforeEach.CodeLocation()).Should(Equal(codeLocation))
+ })
+ })
+
+ Describe("AfterEachNodes", func() {
+ It("should report the correct type and code location", func() {
+ codeLocation := codelocation.New(0)
+ afterEach := NewAfterEachNode(func() {}, codeLocation, 0, nil, 3)
+ Ω(afterEach.Type()).Should(Equal(types.SpecComponentTypeAfterEach))
+ Ω(afterEach.CodeLocation()).Should(Equal(codeLocation))
+ })
+ })
+
+ Describe("JustBeforeEachNodes", func() {
+ It("should report the correct type and code location", func() {
+ codeLocation := codelocation.New(0)
+ justBeforeEach := NewJustBeforeEachNode(func() {}, codeLocation, 0, nil, 3)
+ Ω(justBeforeEach.Type()).Should(Equal(types.SpecComponentTypeJustBeforeEach))
+ Ω(justBeforeEach.CodeLocation()).Should(Equal(codeLocation))
+ })
+ })
+ Describe("JustAfterEachNodes", func() {
+ It("should report the correct type and code location", func() {
+ codeLocation := codelocation.New(0)
+ justAfterEach := NewJustAfterEachNode(func() {}, codeLocation, 0, nil, 3)
+ Ω(justAfterEach.Type()).Should(Equal(types.SpecComponentTypeJustAfterEach))
+ Ω(justAfterEach.CodeLocation()).Should(Equal(codeLocation))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go
new file mode 100644
index 000000000..0897836cb
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go
@@ -0,0 +1,353 @@
+package leafnodes_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/leafnodes"
+ . "github.com/onsi/gomega"
+
+ "reflect"
+ "time"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ Failer "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type runnable interface {
+ Run() (outcome types.SpecState, failure types.SpecFailure)
+ CodeLocation() types.CodeLocation
+}
+
+func SynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) {
+ var (
+ outcome types.SpecState
+ failure types.SpecFailure
+
+ failer *Failer.Failer
+
+ componentCodeLocation types.CodeLocation
+ innerCodeLocation types.CodeLocation
+
+ didRun bool
+ )
+
+ BeforeEach(func() {
+ failer = Failer.New()
+ componentCodeLocation = codelocation.New(0)
+ innerCodeLocation = codelocation.New(0)
+
+ didRun = false
+ })
+
+ Describe("synchronous functions", func() {
+ Context("when the function passes", func() {
+ BeforeEach(func() {
+ outcome, failure = build(func() {
+ didRun = true
+ }, 0, failer, componentCodeLocation).Run()
+ })
+
+ It("should have a succesful outcome", func() {
+ Ω(didRun).Should(BeTrue())
+
+ Ω(outcome).Should(Equal(types.SpecStatePassed))
+ Ω(failure).Should(BeZero())
+ })
+ })
+
+ Context("when a failure occurs", func() {
+ BeforeEach(func() {
+ outcome, failure = build(func() {
+ didRun = true
+ failer.Fail("bam", innerCodeLocation)
+ panic("should not matter")
+ }, 0, failer, componentCodeLocation).Run()
+ })
+
+ It("should return the failure", func() {
+ Ω(didRun).Should(BeTrue())
+
+ Ω(outcome).Should(Equal(types.SpecStateFailed))
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "bam",
+ Location: innerCodeLocation,
+ ForwardedPanic: "",
+ ComponentIndex: componentIndex,
+ ComponentType: componentType,
+ ComponentCodeLocation: componentCodeLocation,
+ }))
+ })
+ })
+
+ Context("when a panic occurs", func() {
+ BeforeEach(func() {
+ outcome, failure = build(func() {
+ didRun = true
+ innerCodeLocation = codelocation.New(0)
+ panic("ack!")
+ }, 0, failer, componentCodeLocation).Run()
+ })
+
+ It("should return the panic", func() {
+ Ω(didRun).Should(BeTrue())
+
+ Ω(outcome).Should(Equal(types.SpecStatePanicked))
+ Ω(failure.ForwardedPanic).Should(Equal("ack!"))
+ })
+ })
+
+ Context("when a panic occurs with a nil value", func() {
+ BeforeEach(func() {
+ outcome, failure = build(func() {
+ didRun = true
+ innerCodeLocation = codelocation.New(0)
+ panic(nil)
+ }, 0, failer, componentCodeLocation).Run()
+ })
+
+ It("should return the nil-valued panic", func() {
+ Ω(didRun).Should(BeTrue())
+
+ Ω(outcome).Should(Equal(types.SpecStatePanicked))
+ Ω(failure.ForwardedPanic).Should(Equal("<nil>"))
+ })
+ })
+
+ })
+}
+
+func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) {
+ var (
+ outcome types.SpecState
+ failure types.SpecFailure
+
+ failer *Failer.Failer
+
+ componentCodeLocation types.CodeLocation
+ innerCodeLocation types.CodeLocation
+
+ didRun bool
+ )
+
+ BeforeEach(func() {
+ failer = Failer.New()
+ componentCodeLocation = codelocation.New(0)
+ innerCodeLocation = codelocation.New(0)
+
+ didRun = false
+ })
+
+ Describe("asynchronous functions", func() {
+ var timeoutDuration time.Duration
+
+ BeforeEach(func() {
+ timeoutDuration = time.Duration(1 * float64(time.Second))
+ })
+
+ Context("when running", func() {
+ It("should run the function as a goroutine, and block until it's done", func() {
+ proveAsync := make(chan bool)
+
+ build(func(done Done) {
+ didRun = true
+ proveAsync <- true
+ close(done)
+ }, timeoutDuration, failer, componentCodeLocation).Run()
+
+ Eventually(proveAsync).Should(Receive(Equal(true)))
+ })
+ })
+
+ Context("when the function passes", func() {
+ BeforeEach(func() {
+ outcome, failure = build(func(done Done) {
+ didRun = true
+ close(done)
+ }, timeoutDuration, failer, componentCodeLocation).Run()
+ })
+
+ It("should have a succesful outcome", func() {
+ Ω(didRun).Should(BeTrue())
+ Ω(outcome).Should(Equal(types.SpecStatePassed))
+ Ω(failure).Should(BeZero())
+ })
+ })
+
+ Context("when the function fails", func() {
+ BeforeEach(func() {
+ outcome, failure = build(func(done Done) {
+ didRun = true
+ failer.Fail("bam", innerCodeLocation)
+ time.Sleep(20 * time.Millisecond)
+ defer close(done)
+ panic("doesn't matter")
+ }, 10*time.Millisecond, failer, componentCodeLocation).Run()
+ })
+
+ It("should return the failure", func() {
+ Ω(didRun).Should(BeTrue())
+
+ Ω(outcome).Should(Equal(types.SpecStateFailed))
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "bam",
+ Location: innerCodeLocation,
+ ForwardedPanic: "",
+ ComponentIndex: componentIndex,
+ ComponentType: componentType,
+ ComponentCodeLocation: componentCodeLocation,
+ }))
+ })
+ })
+
+ Context("when the function doesn't close the done channel in time", func() {
+ var guard chan struct{}
+
+ BeforeEach(func() {
+ guard = make(chan struct{})
+ outcome, failure = build(func(done Done) {
+ didRun = true
+ close(guard)
+ }, 10*time.Millisecond, failer, componentCodeLocation).Run()
+ })
+
+ It("should return a timeout", func() {
+ <-guard
+ Ω(didRun).Should(BeTrue())
+
+ Ω(outcome).Should(Equal(types.SpecStateTimedOut))
+ Ω(failure).Should(Equal(types.SpecFailure{
+ Message: "Timed out",
+ Location: componentCodeLocation,
+ ForwardedPanic: "",
+ ComponentIndex: componentIndex,
+ ComponentType: componentType,
+ ComponentCodeLocation: componentCodeLocation,
+ }))
+ })
+ })
+
+ Context("when the function panics", func() {
+ BeforeEach(func() {
+ outcome, failure = build(func(done Done) {
+ didRun = true
+ innerCodeLocation = codelocation.New(0)
+ panic("ack!")
+ }, 100*time.Millisecond, failer, componentCodeLocation).Run()
+ })
+
+ It("should return the panic", func() {
+ Ω(didRun).Should(BeTrue())
+
+ Ω(outcome).Should(Equal(types.SpecStatePanicked))
+ Ω(failure.ForwardedPanic).Should(Equal("ack!"))
+ })
+ })
+
+ Context("when the function panics with a nil value", func() {
+ BeforeEach(func() {
+ outcome, failure = build(func(done Done) {
+ didRun = true
+ innerCodeLocation = codelocation.New(0)
+ panic(nil)
+ }, 100*time.Millisecond, failer, componentCodeLocation).Run()
+ })
+
+ It("should return the nil-valued panic", func() {
+ Ω(didRun).Should(BeTrue())
+
+ Ω(outcome).Should(Equal(types.SpecStatePanicked))
+ Ω(failure.ForwardedPanic).Should(Equal("<nil>"))
+ })
+ })
+ })
+}
+
+func InvalidSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType) {
+ var (
+ failer *Failer.Failer
+ componentCodeLocation types.CodeLocation
+ )
+
+ BeforeEach(func() {
+ failer = Failer.New()
+ componentCodeLocation = codelocation.New(0)
+ })
+
+ Describe("invalid functions", func() {
+ Context("when passed something that's not a function", func() {
+ It("should panic", func() {
+ Ω(func() {
+ build("not a function", 0, failer, componentCodeLocation)
+ }).Should(Panic())
+ })
+ })
+
+ Context("when the function takes the wrong kind of argument", func() {
+ It("should panic", func() {
+ Ω(func() {
+ build(func(oops string) {}, 0, failer, componentCodeLocation)
+ }).Should(Panic())
+ })
+ })
+
+ Context("when the function takes more than one argument", func() {
+ It("should panic", func() {
+ Ω(func() {
+ build(func(done Done, oops string) {}, 0, failer, componentCodeLocation)
+ }).Should(Panic())
+ })
+ })
+ })
+}
+
+var _ = Describe("Shared RunnableNode behavior", func() {
+ Describe("It Nodes", func() {
+ build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
+ return NewItNode("", body, types.FlagTypeFocused, componentCodeLocation, timeout, failer, 3)
+ }
+
+ SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3)
+ AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3)
+ InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeIt)
+ })
+
+ Describe("Measure Nodes", func() {
+ build := func(body interface{}, _ time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
+ return NewMeasureNode("", func(Benchmarker) {
+ reflect.ValueOf(body).Call([]reflect.Value{})
+ }, types.FlagTypeFocused, componentCodeLocation, 10, failer, 3)
+ }
+
+ SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeMeasure, 3)
+ })
+
+ Describe("BeforeEach Nodes", func() {
+ build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
+ return NewBeforeEachNode(body, componentCodeLocation, timeout, failer, 3)
+ }
+
+ SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3)
+ AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3)
+ InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach)
+ })
+
+ Describe("AfterEach Nodes", func() {
+ build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
+ return NewAfterEachNode(body, componentCodeLocation, timeout, failer, 3)
+ }
+
+ SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3)
+ AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3)
+ InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach)
+ })
+
+ Describe("JustBeforeEach Nodes", func() {
+ build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
+ return NewJustBeforeEachNode(body, componentCodeLocation, timeout, failer, 3)
+ }
+
+ SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3)
+ AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3)
+ InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach)
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
new file mode 100644
index 000000000..80f16ed78
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
@@ -0,0 +1,55 @@
+package leafnodes
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type SuiteNode interface {
+ Run(parallelNode int, parallelTotal int, syncHost string) bool
+ Passed() bool
+ Summary() *types.SetupSummary
+}
+
+type simpleSuiteNode struct {
+ runner *runner
+ outcome types.SpecState
+ failure types.SpecFailure
+ runTime time.Duration
+}
+
+func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+ t := time.Now()
+ node.outcome, node.failure = node.runner.run()
+ node.runTime = time.Since(t)
+
+ return node.outcome == types.SpecStatePassed
+}
+
+func (node *simpleSuiteNode) Passed() bool {
+ return node.outcome == types.SpecStatePassed
+}
+
+func (node *simpleSuiteNode) Summary() *types.SetupSummary {
+ return &types.SetupSummary{
+ ComponentType: node.runner.nodeType,
+ CodeLocation: node.runner.codeLocation,
+ State: node.outcome,
+ RunTime: node.runTime,
+ Failure: node.failure,
+ }
+}
+
+func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+ return &simpleSuiteNode{
+ runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
+ }
+}
+
+func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+ return &simpleSuiteNode{
+ runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go
new file mode 100644
index 000000000..246b329fe
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go
@@ -0,0 +1,230 @@
+package leafnodes_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ . "github.com/onsi/ginkgo/internal/leafnodes"
+
+ "time"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ Failer "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("SuiteNodes", func() {
+ Describe("BeforeSuite nodes", func() {
+ var befSuite SuiteNode
+ var failer *Failer.Failer
+ var codeLocation types.CodeLocation
+ var innerCodeLocation types.CodeLocation
+ var outcome bool
+
+ BeforeEach(func() {
+ failer = Failer.New()
+ codeLocation = codelocation.New(0)
+ innerCodeLocation = codelocation.New(0)
+ })
+
+ Context("when the body passes", func() {
+ BeforeEach(func() {
+ befSuite = NewBeforeSuiteNode(func() {
+ time.Sleep(10 * time.Millisecond)
+ }, codeLocation, 0, failer)
+ outcome = befSuite.Run(0, 0, "")
+ })
+
+ It("should return true when run and report as passed", func() {
+ Ω(outcome).Should(BeTrue())
+ Ω(befSuite.Passed()).Should(BeTrue())
+ })
+
+ It("should have the correct summary", func() {
+ summary := befSuite.Summary()
+ Ω(summary.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
+ Ω(summary.CodeLocation).Should(Equal(codeLocation))
+ Ω(summary.State).Should(Equal(types.SpecStatePassed))
+ Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond))
+ Ω(summary.Failure).Should(BeZero())
+ })
+ })
+
+ Context("when the body fails", func() {
+ BeforeEach(func() {
+ befSuite = NewBeforeSuiteNode(func() {
+ failer.Fail("oops", innerCodeLocation)
+ }, codeLocation, 0, failer)
+ outcome = befSuite.Run(0, 0, "")
+ })
+
+ It("should return false when run and report as failed", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(befSuite.Passed()).Should(BeFalse())
+ })
+
+ It("should have the correct summary", func() {
+ summary := befSuite.Summary()
+ Ω(summary.State).Should(Equal(types.SpecStateFailed))
+ Ω(summary.Failure.Message).Should(Equal("oops"))
+ Ω(summary.Failure.Location).Should(Equal(innerCodeLocation))
+ Ω(summary.Failure.ForwardedPanic).Should(BeEmpty())
+ Ω(summary.Failure.ComponentIndex).Should(Equal(0))
+ Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
+ Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
+ })
+ })
+
+ Context("when the body times out", func() {
+ BeforeEach(func() {
+ befSuite = NewBeforeSuiteNode(func(done Done) {
+ }, codeLocation, time.Millisecond, failer)
+ outcome = befSuite.Run(0, 0, "")
+ })
+
+ It("should return false when run and report as failed", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(befSuite.Passed()).Should(BeFalse())
+ })
+
+ It("should have the correct summary", func() {
+ summary := befSuite.Summary()
+ Ω(summary.State).Should(Equal(types.SpecStateTimedOut))
+ Ω(summary.Failure.ForwardedPanic).Should(BeEmpty())
+ Ω(summary.Failure.ComponentIndex).Should(Equal(0))
+ Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
+ Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
+ })
+ })
+
+ Context("when the body panics", func() {
+ BeforeEach(func() {
+ befSuite = NewBeforeSuiteNode(func() {
+ panic("bam")
+ }, codeLocation, 0, failer)
+ outcome = befSuite.Run(0, 0, "")
+ })
+
+ It("should return false when run and report as failed", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(befSuite.Passed()).Should(BeFalse())
+ })
+
+ It("should have the correct summary", func() {
+ summary := befSuite.Summary()
+ Ω(summary.State).Should(Equal(types.SpecStatePanicked))
+ Ω(summary.Failure.ForwardedPanic).Should(Equal("bam"))
+ Ω(summary.Failure.ComponentIndex).Should(Equal(0))
+ Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
+ Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
+ })
+ })
+ })
+
+ Describe("AfterSuite nodes", func() {
+ var aftSuite SuiteNode
+ var failer *Failer.Failer
+ var codeLocation types.CodeLocation
+ var innerCodeLocation types.CodeLocation
+ var outcome bool
+
+ BeforeEach(func() {
+ failer = Failer.New()
+ codeLocation = codelocation.New(0)
+ innerCodeLocation = codelocation.New(0)
+ })
+
+ Context("when the body passes", func() {
+ BeforeEach(func() {
+ aftSuite = NewAfterSuiteNode(func() {
+ time.Sleep(10 * time.Millisecond)
+ }, codeLocation, 0, failer)
+ outcome = aftSuite.Run(0, 0, "")
+ })
+
+ It("should return true when run and report as passed", func() {
+ Ω(outcome).Should(BeTrue())
+ Ω(aftSuite.Passed()).Should(BeTrue())
+ })
+
+ It("should have the correct summary", func() {
+ summary := aftSuite.Summary()
+ Ω(summary.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite))
+ Ω(summary.CodeLocation).Should(Equal(codeLocation))
+ Ω(summary.State).Should(Equal(types.SpecStatePassed))
+ Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond))
+ Ω(summary.Failure).Should(BeZero())
+ })
+ })
+
+ Context("when the body fails", func() {
+ BeforeEach(func() {
+ aftSuite = NewAfterSuiteNode(func() {
+ failer.Fail("oops", innerCodeLocation)
+ }, codeLocation, 0, failer)
+ outcome = aftSuite.Run(0, 0, "")
+ })
+
+ It("should return false when run and report as failed", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(aftSuite.Passed()).Should(BeFalse())
+ })
+
+ It("should have the correct summary", func() {
+ summary := aftSuite.Summary()
+ Ω(summary.State).Should(Equal(types.SpecStateFailed))
+ Ω(summary.Failure.Message).Should(Equal("oops"))
+ Ω(summary.Failure.Location).Should(Equal(innerCodeLocation))
+ Ω(summary.Failure.ForwardedPanic).Should(BeEmpty())
+ Ω(summary.Failure.ComponentIndex).Should(Equal(0))
+ Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite))
+ Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
+ })
+ })
+
+ Context("when the body times out", func() {
+ BeforeEach(func() {
+ aftSuite = NewAfterSuiteNode(func(done Done) {
+ }, codeLocation, time.Millisecond, failer)
+ outcome = aftSuite.Run(0, 0, "")
+ })
+
+ It("should return false when run and report as failed", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(aftSuite.Passed()).Should(BeFalse())
+ })
+
+ It("should have the correct summary", func() {
+ summary := aftSuite.Summary()
+ Ω(summary.State).Should(Equal(types.SpecStateTimedOut))
+ Ω(summary.Failure.ForwardedPanic).Should(BeEmpty())
+ Ω(summary.Failure.ComponentIndex).Should(Equal(0))
+ Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite))
+ Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
+ })
+ })
+
+ Context("when the body panics", func() {
+ BeforeEach(func() {
+ aftSuite = NewAfterSuiteNode(func() {
+ panic("bam")
+ }, codeLocation, 0, failer)
+ outcome = aftSuite.Run(0, 0, "")
+ })
+
+ It("should return false when run and report as failed", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(aftSuite.Passed()).Should(BeFalse())
+ })
+
+ It("should have the correct summary", func() {
+ summary := aftSuite.Summary()
+ Ω(summary.State).Should(Equal(types.SpecStatePanicked))
+ Ω(summary.Failure.ForwardedPanic).Should(Equal("bam"))
+ Ω(summary.Failure.ComponentIndex).Should(Equal(0))
+ Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite))
+ Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
new file mode 100644
index 000000000..a721d0cf7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
@@ -0,0 +1,90 @@
+package leafnodes
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type synchronizedAfterSuiteNode struct {
+ runnerA *runner
+ runnerB *runner
+
+ outcome types.SpecState
+ failure types.SpecFailure
+ runTime time.Duration
+}
+
+func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+ return &synchronizedAfterSuiteNode{
+ runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+ runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
+ }
+}
+
+func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+ node.outcome, node.failure = node.runnerA.run()
+
+ if parallelNode == 1 {
+ if parallelTotal > 1 {
+ node.waitUntilOtherNodesAreDone(syncHost)
+ }
+
+ outcome, failure := node.runnerB.run()
+
+ if node.outcome == types.SpecStatePassed {
+ node.outcome, node.failure = outcome, failure
+ }
+ }
+
+ return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedAfterSuiteNode) Passed() bool {
+ return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
+ return &types.SetupSummary{
+ ComponentType: node.runnerA.nodeType,
+ CodeLocation: node.runnerA.codeLocation,
+ State: node.outcome,
+ RunTime: node.runTime,
+ Failure: node.failure,
+ }
+}
+
+func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
+ for {
+ if node.canRun(syncHost) {
+ return
+ }
+
+ time.Sleep(50 * time.Millisecond)
+ }
+}
+
+func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
+ resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
+ if err != nil || resp.StatusCode != http.StatusOK {
+ return false
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return false
+ }
+ resp.Body.Close()
+
+ afterSuiteData := types.RemoteAfterSuiteData{}
+ err = json.Unmarshal(body, &afterSuiteData)
+ if err != nil {
+ return false
+ }
+
+ return afterSuiteData.CanRun
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go
new file mode 100644
index 000000000..edbdf6ae5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go
@@ -0,0 +1,199 @@
+package leafnodes_test
+
+import (
+ "sync"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/types"
+ . "github.com/onsi/gomega"
+
+ "net/http"
+
+ "github.com/onsi/gomega/ghttp"
+
+ "time"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ Failer "github.com/onsi/ginkgo/internal/failer"
+)
+
+var _ = Describe("SynchronizedAfterSuiteNode", func() {
+ var failer *Failer.Failer
+ var node SuiteNode
+ var codeLocation types.CodeLocation
+ var innerCodeLocation types.CodeLocation
+ var outcome bool
+ var server *ghttp.Server
+ var things []string
+ var lock *sync.Mutex
+
+ BeforeEach(func() {
+ things = []string{}
+ server = ghttp.NewServer()
+ codeLocation = codelocation.New(0)
+ innerCodeLocation = codelocation.New(0)
+ failer = Failer.New()
+ lock = &sync.Mutex{}
+ })
+
+ AfterEach(func() {
+ server.Close()
+ })
+
+ newNode := func(bodyA interface{}, bodyB interface{}) SuiteNode {
+ return NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, time.Millisecond, failer)
+ }
+
+ ranThing := func(thing string) {
+ lock.Lock()
+ defer lock.Unlock()
+ things = append(things, thing)
+ }
+
+ thingsThatRan := func() []string {
+ lock.Lock()
+ defer lock.Unlock()
+ return things
+ }
+
+ Context("when not running in parallel", func() {
+ Context("when all is well", func() {
+ BeforeEach(func() {
+ node = newNode(func() {
+ ranThing("A")
+ }, func() {
+ ranThing("B")
+ })
+
+ outcome = node.Run(1, 1, server.URL())
+ })
+
+ It("should run A, then B", func() {
+ Ω(thingsThatRan()).Should(Equal([]string{"A", "B"}))
+ })
+
+ It("should report success", func() {
+ Ω(outcome).Should(BeTrue())
+ Ω(node.Passed()).Should(BeTrue())
+ Ω(node.Summary().State).Should(Equal(types.SpecStatePassed))
+ })
+ })
+
+ Context("when A fails", func() {
+ BeforeEach(func() {
+ node = newNode(func() {
+ ranThing("A")
+ failer.Fail("bam", innerCodeLocation)
+ }, func() {
+ ranThing("B")
+ })
+
+ outcome = node.Run(1, 1, server.URL())
+ })
+
+ It("should still run B", func() {
+ Ω(thingsThatRan()).Should(Equal([]string{"A", "B"}))
+ })
+
+ It("should report failure", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(node.Passed()).Should(BeFalse())
+ Ω(node.Summary().State).Should(Equal(types.SpecStateFailed))
+ })
+ })
+
+ Context("when B fails", func() {
+ BeforeEach(func() {
+ node = newNode(func() {
+ ranThing("A")
+ }, func() {
+ ranThing("B")
+ failer.Fail("bam", innerCodeLocation)
+ })
+
+ outcome = node.Run(1, 1, server.URL())
+ })
+
+ It("should run all the things", func() {
+ Ω(thingsThatRan()).Should(Equal([]string{"A", "B"}))
+ })
+
+ It("should report failure", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(node.Passed()).Should(BeFalse())
+ Ω(node.Summary().State).Should(Equal(types.SpecStateFailed))
+ })
+ })
+ })
+
+ Context("when running in parallel", func() {
+ Context("as the first node", func() {
+ BeforeEach(func() {
+ server.AppendHandlers(ghttp.CombineHandlers(
+ ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"),
+ func(writer http.ResponseWriter, request *http.Request) {
+ ranThing("Request1")
+ },
+ ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: false}),
+ ), ghttp.CombineHandlers(
+ ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"),
+ func(writer http.ResponseWriter, request *http.Request) {
+ ranThing("Request2")
+ },
+ ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: false}),
+ ), ghttp.CombineHandlers(
+ ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"),
+ func(writer http.ResponseWriter, request *http.Request) {
+ ranThing("Request3")
+ },
+ ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: true}),
+ ))
+
+ node = newNode(func() {
+ ranThing("A")
+ }, func() {
+ ranThing("B")
+ })
+
+ outcome = node.Run(1, 3, server.URL())
+ })
+
+ It("should run A and, when the server says its time, run B", func() {
+ Ω(thingsThatRan()).Should(Equal([]string{"A", "Request1", "Request2", "Request3", "B"}))
+ })
+
+ It("should report success", func() {
+ Ω(outcome).Should(BeTrue())
+ Ω(node.Passed()).Should(BeTrue())
+ Ω(node.Summary().State).Should(Equal(types.SpecStatePassed))
+ })
+ })
+
+ Context("as any other node", func() {
+ BeforeEach(func() {
+ node = newNode(func() {
+ ranThing("A")
+ }, func() {
+ ranThing("B")
+ })
+
+ outcome = node.Run(2, 3, server.URL())
+ })
+
+ It("should run A, and not run B", func() {
+ Ω(thingsThatRan()).Should(Equal([]string{"A"}))
+ })
+
+ It("should not talk to the server", func() {
+ Ω(server.ReceivedRequests()).Should(BeEmpty())
+ })
+
+ It("should report success", func() {
+ Ω(outcome).Should(BeTrue())
+ Ω(node.Passed()).Should(BeTrue())
+ Ω(node.Summary().State).Should(Equal(types.SpecStatePassed))
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
new file mode 100644
index 000000000..d5c889319
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
@@ -0,0 +1,181 @@
+package leafnodes
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "time"
+
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type synchronizedBeforeSuiteNode struct {
+ runnerA *runner
+ runnerB *runner
+
+ data []byte
+
+ outcome types.SpecState
+ failure types.SpecFailure
+ runTime time.Duration
+}
+
+func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
+ node := &synchronizedBeforeSuiteNode{}
+
+ node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
+ node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
+
+ return node
+}
+
+func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
+ t := time.Now()
+ defer func() {
+ node.runTime = time.Since(t)
+ }()
+
+ if parallelNode == 1 {
+ node.outcome, node.failure = node.runA(parallelTotal, syncHost)
+ } else {
+ node.outcome, node.failure = node.waitForA(syncHost)
+ }
+
+ if node.outcome != types.SpecStatePassed {
+ return false
+ }
+ node.outcome, node.failure = node.runnerB.run()
+
+ return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
+ outcome, failure := node.runnerA.run()
+
+ if parallelTotal > 1 {
+ state := types.RemoteBeforeSuiteStatePassed
+ if outcome != types.SpecStatePassed {
+ state = types.RemoteBeforeSuiteStateFailed
+ }
+ json := (types.RemoteBeforeSuiteData{
+ Data: node.data,
+ State: state,
+ }).ToJSON()
+ http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
+ }
+
+ return outcome, failure
+}
+
+func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
+ failure := func(message string) types.SpecFailure {
+ return types.SpecFailure{
+ Message: message,
+ Location: node.runnerA.codeLocation,
+ ComponentType: node.runnerA.nodeType,
+ ComponentIndex: node.runnerA.componentIndex,
+ ComponentCodeLocation: node.runnerA.codeLocation,
+ }
+ }
+ for {
+ resp, err := http.Get(syncHost + "/BeforeSuiteState")
+ if err != nil || resp.StatusCode != http.StatusOK {
+ return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
+ }
+ resp.Body.Close()
+
+ beforeSuiteData := types.RemoteBeforeSuiteData{}
+ err = json.Unmarshal(body, &beforeSuiteData)
+ if err != nil {
+ return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
+ }
+
+ switch beforeSuiteData.State {
+ case types.RemoteBeforeSuiteStatePassed:
+ node.data = beforeSuiteData.Data
+ return types.SpecStatePassed, types.SpecFailure{}
+ case types.RemoteBeforeSuiteStateFailed:
+ return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
+ case types.RemoteBeforeSuiteStateDisappeared:
+ return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
+ }
+
+ time.Sleep(50 * time.Millisecond)
+ }
+}
+
+func (node *synchronizedBeforeSuiteNode) Passed() bool {
+ return node.outcome == types.SpecStatePassed
+}
+
+func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
+ return &types.SetupSummary{
+ ComponentType: node.runnerA.nodeType,
+ CodeLocation: node.runnerA.codeLocation,
+ State: node.outcome,
+ RunTime: node.runTime,
+ Failure: node.failure,
+ }
+}
+
+func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
+ typeA := reflect.TypeOf(bodyA)
+ if typeA.Kind() != reflect.Func {
+ panic("SynchronizedBeforeSuite expects a function as its first argument")
+ }
+
+ takesNothing := typeA.NumIn() == 0
+ takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
+ returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
+
+ if !((takesNothing || takesADoneChannel) && returnsBytes) {
+ panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
+ }
+
+ if takesADoneChannel {
+ return func(done chan<- interface{}) {
+ out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
+ node.data = out[0].Interface().([]byte)
+ }
+ }
+
+ return func() {
+ out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
+ node.data = out[0].Interface().([]byte)
+ }
+}
+
+func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
+ typeB := reflect.TypeOf(bodyB)
+ if typeB.Kind() != reflect.Func {
+ panic("SynchronizedBeforeSuite expects a function as its second argument")
+ }
+
+ returnsNothing := typeB.NumOut() == 0
+ takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
+ takesBytesAndDone := typeB.NumIn() == 2 &&
+ typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
+ typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
+
+ if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
+ panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
+ }
+
+ if takesBytesAndDone {
+ return func(done chan<- interface{}) {
+ reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
+ }
+ }
+
+ return func() {
+ reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go
new file mode 100644
index 000000000..46c3e276b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go
@@ -0,0 +1,446 @@
+package leafnodes_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/leafnodes"
+ . "github.com/onsi/gomega"
+
+ "net/http"
+
+ "github.com/onsi/gomega/ghttp"
+
+ "time"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ Failer "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("SynchronizedBeforeSuiteNode", func() {
+ var failer *Failer.Failer
+ var node SuiteNode
+ var codeLocation types.CodeLocation
+ var innerCodeLocation types.CodeLocation
+ var outcome bool
+ var server *ghttp.Server
+
+ BeforeEach(func() {
+ server = ghttp.NewServer()
+ codeLocation = codelocation.New(0)
+ innerCodeLocation = codelocation.New(0)
+ failer = Failer.New()
+ })
+
+ AfterEach(func() {
+ server.Close()
+ })
+
+ newNode := func(bodyA interface{}, bodyB interface{}) SuiteNode {
+ return NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, time.Millisecond, failer)
+ }
+
+ Describe("when not running in parallel", func() {
+ Context("when all is well", func() {
+ var data []byte
+ BeforeEach(func() {
+ data = nil
+
+ node = newNode(func() []byte {
+ return []byte("my data")
+ }, func(d []byte) {
+ data = d
+ })
+
+ outcome = node.Run(1, 1, server.URL())
+ })
+
+ It("should run A, then B passing the output from A to B", func() {
+ Ω(data).Should(Equal([]byte("my data")))
+ })
+
+ It("should report success", func() {
+ Ω(outcome).Should(BeTrue())
+ Ω(node.Passed()).Should(BeTrue())
+ Ω(node.Summary().State).Should(Equal(types.SpecStatePassed))
+ })
+ })
+
+ Context("when A fails", func() {
+ var ranB bool
+ BeforeEach(func() {
+ ranB = false
+ node = newNode(func() []byte {
+ failer.Fail("boom", innerCodeLocation)
+ return nil
+ }, func([]byte) {
+ ranB = true
+ })
+
+ outcome = node.Run(1, 1, server.URL())
+ })
+
+ It("should not run B", func() {
+ Ω(ranB).Should(BeFalse())
+ })
+
+ It("should report failure", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(node.Passed()).Should(BeFalse())
+ Ω(node.Summary().State).Should(Equal(types.SpecStateFailed))
+ })
+ })
+
+ Context("when B fails", func() {
+ BeforeEach(func() {
+ node = newNode(func() []byte {
+ return nil
+ }, func([]byte) {
+ failer.Fail("boom", innerCodeLocation)
+ })
+
+ outcome = node.Run(1, 1, server.URL())
+ })
+
+ It("should report failure", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(node.Passed()).Should(BeFalse())
+ Ω(node.Summary().State).Should(Equal(types.SpecStateFailed))
+ })
+ })
+
+ Context("when A times out", func() {
+ var ranB bool
+ BeforeEach(func() {
+ ranB = false
+ node = newNode(func(Done) []byte {
+ time.Sleep(time.Second)
+ return nil
+ }, func([]byte) {
+ ranB = true
+ })
+
+ outcome = node.Run(1, 1, server.URL())
+ })
+
+ It("should not run B", func() {
+ Ω(ranB).Should(BeFalse())
+ })
+
+ It("should report failure", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(node.Passed()).Should(BeFalse())
+ Ω(node.Summary().State).Should(Equal(types.SpecStateTimedOut))
+ })
+ })
+
+ Context("when B times out", func() {
+ BeforeEach(func() {
+ node = newNode(func() []byte {
+ return nil
+ }, func([]byte, Done) {
+ time.Sleep(time.Second)
+ })
+
+ outcome = node.Run(1, 1, server.URL())
+ })
+
+ It("should report failure", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(node.Passed()).Should(BeFalse())
+ Ω(node.Summary().State).Should(Equal(types.SpecStateTimedOut))
+ })
+ })
+ })
+
+ Describe("when running in parallel", func() {
+ var ranB bool
+ var parallelNode, parallelTotal int
+ BeforeEach(func() {
+ ranB = false
+ parallelNode, parallelTotal = 1, 3
+ })
+
+ Context("as the first node, it runs A", func() {
+ var expectedState types.RemoteBeforeSuiteData
+
+ BeforeEach(func() {
+ parallelNode, parallelTotal = 1, 3
+ })
+
+ JustBeforeEach(func() {
+ server.AppendHandlers(ghttp.CombineHandlers(
+ ghttp.VerifyRequest("POST", "/BeforeSuiteState"),
+ ghttp.VerifyJSONRepresenting(expectedState),
+ ))
+
+ outcome = node.Run(parallelNode, parallelTotal, server.URL())
+ })
+
+ Context("when A succeeds", func() {
+ BeforeEach(func() {
+ expectedState = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStatePassed}
+
+ node = newNode(func() []byte {
+ return []byte("my data")
+ }, func([]byte) {
+ ranB = true
+ })
+ })
+
+ It("should post about A succeeding", func() {
+ Ω(server.ReceivedRequests()).Should(HaveLen(1))
+ })
+
+ It("should run B", func() {
+ Ω(ranB).Should(BeTrue())
+ })
+
+ It("should report success", func() {
+ Ω(outcome).Should(BeTrue())
+ })
+ })
+
+ Context("when A fails", func() {
+ BeforeEach(func() {
+ expectedState = types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateFailed}
+
+ node = newNode(func() []byte {
+ panic("BAM")
+ }, func([]byte) {
+ ranB = true
+ })
+ })
+
+ It("should post about A failing", func() {
+ Ω(server.ReceivedRequests()).Should(HaveLen(1))
+ })
+
+ It("should not run B", func() {
+ Ω(ranB).Should(BeFalse())
+ })
+
+ It("should report failure", func() {
+ Ω(outcome).Should(BeFalse())
+ })
+ })
+ })
+
+ Context("as the Nth node", func() {
+ var statusCode int
+ var response interface{}
+ var ranA bool
+ var bData []byte
+
+ BeforeEach(func() {
+ ranA = false
+ bData = nil
+
+ statusCode = http.StatusOK
+
+ server.AppendHandlers(ghttp.CombineHandlers(
+ ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
+ ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}).ToJSON())),
+ ), ghttp.CombineHandlers(
+ ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
+ ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}).ToJSON())),
+ ), ghttp.CombineHandlers(
+ ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
+ ghttp.RespondWithJSONEncodedPtr(&statusCode, &response),
+ ))
+
+ node = newNode(func() []byte {
+ ranA = true
+ return nil
+ }, func(data []byte) {
+ bData = data
+ })
+
+ parallelNode, parallelTotal = 2, 3
+ })
+
+ Context("when A on node1 succeeds", func() {
+ BeforeEach(func() {
+ response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStatePassed}
+ outcome = node.Run(parallelNode, parallelTotal, server.URL())
+ })
+
+ It("should not run A", func() {
+ Ω(ranA).Should(BeFalse())
+ })
+
+ It("should poll for A", func() {
+ Ω(server.ReceivedRequests()).Should(HaveLen(3))
+ })
+
+ It("should run B when the polling succeeds", func() {
+ Ω(bData).Should(Equal([]byte("my data")))
+ })
+
+ It("should succeed", func() {
+ Ω(outcome).Should(BeTrue())
+ Ω(node.Passed()).Should(BeTrue())
+ })
+ })
+
+ Context("when A on node1 fails", func() {
+ BeforeEach(func() {
+ response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStateFailed}
+ outcome = node.Run(parallelNode, parallelTotal, server.URL())
+ })
+
+ It("should not run A", func() {
+ Ω(ranA).Should(BeFalse())
+ })
+
+ It("should poll for A", func() {
+ Ω(server.ReceivedRequests()).Should(HaveLen(3))
+ })
+
+ It("should not run B", func() {
+ Ω(bData).Should(BeNil())
+ })
+
+ It("should fail", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(node.Passed()).Should(BeFalse())
+
+ summary := node.Summary()
+ Ω(summary.State).Should(Equal(types.SpecStateFailed))
+ Ω(summary.Failure.Message).Should(Equal("BeforeSuite on Node 1 failed"))
+ Ω(summary.Failure.Location).Should(Equal(codeLocation))
+ Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
+ Ω(summary.Failure.ComponentIndex).Should(Equal(0))
+ Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
+ })
+ })
+
+ Context("when node1 disappears", func() {
+ BeforeEach(func() {
+ response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStateDisappeared}
+ outcome = node.Run(parallelNode, parallelTotal, server.URL())
+ })
+
+ It("should not run A", func() {
+ Ω(ranA).Should(BeFalse())
+ })
+
+ It("should poll for A", func() {
+ Ω(server.ReceivedRequests()).Should(HaveLen(3))
+ })
+
+ It("should not run B", func() {
+ Ω(bData).Should(BeNil())
+ })
+
+ It("should fail", func() {
+ Ω(outcome).Should(BeFalse())
+ Ω(node.Passed()).Should(BeFalse())
+
+ summary := node.Summary()
+ Ω(summary.State).Should(Equal(types.SpecStateFailed))
+ Ω(summary.Failure.Message).Should(Equal("Node 1 disappeared before completing BeforeSuite"))
+ Ω(summary.Failure.Location).Should(Equal(codeLocation))
+ Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite))
+ Ω(summary.Failure.ComponentIndex).Should(Equal(0))
+ Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation))
+ })
+ })
+ })
+ })
+
+ Describe("construction", func() {
+ Describe("the first function", func() {
+ Context("when the first function returns a byte array", func() {
+ Context("and takes nothing", func() {
+ It("should be fine", func() {
+ Ω(func() {
+ newNode(func() []byte { return nil }, func([]byte) {})
+ }).ShouldNot(Panic())
+ })
+ })
+
+ Context("and takes a done function", func() {
+ It("should be fine", func() {
+ Ω(func() {
+ newNode(func(Done) []byte { return nil }, func([]byte) {})
+ }).ShouldNot(Panic())
+ })
+ })
+
+ Context("and takes more than one thing", func() {
+ It("should panic", func() {
+ Ω(func() {
+ newNode(func(Done, Done) []byte { return nil }, func([]byte) {})
+ }).Should(Panic())
+ })
+ })
+
+ Context("and takes something else", func() {
+ It("should panic", func() {
+ Ω(func() {
+ newNode(func(bool) []byte { return nil }, func([]byte) {})
+ }).Should(Panic())
+ })
+ })
+ })
+
+ Context("when the first function does not return a byte array", func() {
+ It("should panic", func() {
+ Ω(func() {
+ newNode(func() {}, func([]byte) {})
+ }).Should(Panic())
+
+ Ω(func() {
+ newNode(func() []int { return nil }, func([]byte) {})
+ }).Should(Panic())
+ })
+ })
+ })
+
+ Describe("the second function", func() {
+ Context("when the second function takes a byte array", func() {
+ It("should be fine", func() {
+ Ω(func() {
+ newNode(func() []byte { return nil }, func([]byte) {})
+ }).ShouldNot(Panic())
+ })
+ })
+
+ Context("when it also takes a done channel", func() {
+ It("should be fine", func() {
+ Ω(func() {
+ newNode(func() []byte { return nil }, func([]byte, Done) {})
+ }).ShouldNot(Panic())
+ })
+ })
+
+ Context("if it takes anything else", func() {
+ It("should panic", func() {
+ Ω(func() {
+ newNode(func() []byte { return nil }, func([]byte, chan bool) {})
+ }).Should(Panic())
+
+ Ω(func() {
+ newNode(func() []byte { return nil }, func(string) {})
+ }).Should(Panic())
+ })
+ })
+
+ Context("if it takes nothing at all", func() {
+ It("should panic", func() {
+ Ω(func() {
+ newNode(func() []byte { return nil }, func() {})
+ }).Should(Panic())
+ })
+ })
+
+ Context("if it returns something", func() {
+ It("should panic", func() {
+ Ω(func() {
+ newNode(func() []byte { return nil }, func([]byte) []byte { return nil })
+ }).Should(Panic())
+ })
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
new file mode 100644
index 000000000..6b54afe01
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
@@ -0,0 +1,249 @@
+/*
+
+Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
+coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
+
+ ginkgo -nodes=N
+
+where N is the number of nodes you desire.
+*/
+package remote
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/reporters/stenographer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type configAndSuite struct {
+ config config.GinkgoConfigType
+ summary *types.SuiteSummary
+}
+
+type Aggregator struct {
+ nodeCount int
+ config config.DefaultReporterConfigType
+ stenographer stenographer.Stenographer
+ result chan bool
+
+ suiteBeginnings chan configAndSuite
+ aggregatedSuiteBeginnings []configAndSuite
+
+ beforeSuites chan *types.SetupSummary
+ aggregatedBeforeSuites []*types.SetupSummary
+
+ afterSuites chan *types.SetupSummary
+ aggregatedAfterSuites []*types.SetupSummary
+
+ specCompletions chan *types.SpecSummary
+ completedSpecs []*types.SpecSummary
+
+ suiteEndings chan *types.SuiteSummary
+ aggregatedSuiteEndings []*types.SuiteSummary
+ specs []*types.SpecSummary
+
+ startTime time.Time
+}
+
+func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
+ aggregator := &Aggregator{
+ nodeCount: nodeCount,
+ result: result,
+ config: config,
+ stenographer: stenographer,
+
+ suiteBeginnings: make(chan configAndSuite, 0),
+ beforeSuites: make(chan *types.SetupSummary, 0),
+ afterSuites: make(chan *types.SetupSummary, 0),
+ specCompletions: make(chan *types.SpecSummary, 0),
+ suiteEndings: make(chan *types.SuiteSummary, 0),
+ }
+
+ go aggregator.mux()
+
+ return aggregator
+}
+
+func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+ aggregator.suiteBeginnings <- configAndSuite{config, summary}
+}
+
+func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+ aggregator.beforeSuites <- setupSummary
+}
+
+func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+ aggregator.afterSuites <- setupSummary
+}
+
+func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
+ //noop
+}
+
+func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
+ aggregator.specCompletions <- specSummary
+}
+
+func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+ aggregator.suiteEndings <- summary
+}
+
+func (aggregator *Aggregator) mux() {
+loop:
+ for {
+ select {
+ case configAndSuite := <-aggregator.suiteBeginnings:
+ aggregator.registerSuiteBeginning(configAndSuite)
+ case setupSummary := <-aggregator.beforeSuites:
+ aggregator.registerBeforeSuite(setupSummary)
+ case setupSummary := <-aggregator.afterSuites:
+ aggregator.registerAfterSuite(setupSummary)
+ case specSummary := <-aggregator.specCompletions:
+ aggregator.registerSpecCompletion(specSummary)
+ case suite := <-aggregator.suiteEndings:
+ finished, passed := aggregator.registerSuiteEnding(suite)
+ if finished {
+ aggregator.result <- passed
+ break loop
+ }
+ }
+ }
+}
+
+func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
+ aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
+
+ if len(aggregator.aggregatedSuiteBeginnings) == 1 {
+ aggregator.startTime = time.Now()
+ }
+
+ if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
+ return
+ }
+
+ aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
+
+ totalNumberOfSpecs := 0
+ if len(aggregator.aggregatedSuiteBeginnings) > 0 {
+ totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
+ }
+
+ aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
+ aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
+ aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
+ aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
+ aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
+ aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
+ aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
+ aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
+ aggregator.specs = append(aggregator.specs, specSummary)
+ aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) flushCompletedSpecs() {
+ if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
+ return
+ }
+
+ for _, setupSummary := range aggregator.aggregatedBeforeSuites {
+ aggregator.announceBeforeSuite(setupSummary)
+ }
+
+ for _, specSummary := range aggregator.completedSpecs {
+ aggregator.announceSpec(specSummary)
+ }
+
+ for _, setupSummary := range aggregator.aggregatedAfterSuites {
+ aggregator.announceAfterSuite(setupSummary)
+ }
+
+ aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
+ aggregator.completedSpecs = []*types.SpecSummary{}
+ aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
+}
+
+func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
+ aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
+ if setupSummary.State != types.SpecStatePassed {
+ aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ }
+}
+
+func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
+ aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
+ if setupSummary.State != types.SpecStatePassed {
+ aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ }
+}
+
+func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
+ if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
+ aggregator.stenographer.AnnounceSpecWillRun(specSummary)
+ }
+
+ aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
+
+ switch specSummary.State {
+ case types.SpecStatePassed:
+ if specSummary.IsMeasurement {
+ aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
+ } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
+ aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
+ } else {
+ aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
+ }
+
+ case types.SpecStatePending:
+ aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
+ case types.SpecStateSkipped:
+ aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
+ case types.SpecStateTimedOut:
+ aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ case types.SpecStatePanicked:
+ aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ case types.SpecStateFailed:
+ aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ }
+}
+
+func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
+ aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
+ if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
+ return false, false
+ }
+
+ aggregatedSuiteSummary := &types.SuiteSummary{}
+ aggregatedSuiteSummary.SuiteSucceeded = true
+
+ for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
+ if suiteSummary.SuiteSucceeded == false {
+ aggregatedSuiteSummary.SuiteSucceeded = false
+ }
+
+ aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
+ aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
+ aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
+ aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
+ aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
+ aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
+ aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
+ }
+
+ aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
+
+ aggregator.stenographer.SummarizeFailures(aggregator.specs)
+ aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
+
+ return true, aggregatedSuiteSummary.SuiteSucceeded
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go
new file mode 100644
index 000000000..aedf93927
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go
@@ -0,0 +1,315 @@
+package remote_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "time"
+
+ "github.com/onsi/ginkgo/config"
+ . "github.com/onsi/ginkgo/internal/remote"
+ st "github.com/onsi/ginkgo/reporters/stenographer"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("Aggregator", func() {
+ var (
+ aggregator *Aggregator
+ reporterConfig config.DefaultReporterConfigType
+ stenographer *st.FakeStenographer
+ result chan bool
+
+ ginkgoConfig1 config.GinkgoConfigType
+ ginkgoConfig2 config.GinkgoConfigType
+
+ suiteSummary1 *types.SuiteSummary
+ suiteSummary2 *types.SuiteSummary
+
+ beforeSummary *types.SetupSummary
+ afterSummary *types.SetupSummary
+ specSummary *types.SpecSummary
+
+ suiteDescription string
+ )
+
+ BeforeEach(func() {
+ reporterConfig = config.DefaultReporterConfigType{
+ NoColor: false,
+ SlowSpecThreshold: 0.1,
+ NoisyPendings: true,
+ Succinct: false,
+ Verbose: true,
+ }
+ stenographer = st.NewFakeStenographer()
+ result = make(chan bool, 1)
+ aggregator = NewAggregator(2, result, reporterConfig, stenographer)
+
+ //
+ // now set up some fixture data
+ //
+
+ ginkgoConfig1 = config.GinkgoConfigType{
+ RandomSeed: 1138,
+ RandomizeAllSpecs: true,
+ ParallelNode: 1,
+ ParallelTotal: 2,
+ }
+
+ ginkgoConfig2 = config.GinkgoConfigType{
+ RandomSeed: 1138,
+ RandomizeAllSpecs: true,
+ ParallelNode: 2,
+ ParallelTotal: 2,
+ }
+
+ suiteDescription = "My Parallel Suite"
+
+ suiteSummary1 = &types.SuiteSummary{
+ SuiteDescription: suiteDescription,
+
+ NumberOfSpecsBeforeParallelization: 30,
+ NumberOfTotalSpecs: 17,
+ NumberOfSpecsThatWillBeRun: 15,
+ NumberOfPendingSpecs: 1,
+ NumberOfSkippedSpecs: 1,
+ }
+
+ suiteSummary2 = &types.SuiteSummary{
+ SuiteDescription: suiteDescription,
+
+ NumberOfSpecsBeforeParallelization: 30,
+ NumberOfTotalSpecs: 13,
+ NumberOfSpecsThatWillBeRun: 8,
+ NumberOfPendingSpecs: 2,
+ NumberOfSkippedSpecs: 3,
+ }
+
+ beforeSummary = &types.SetupSummary{
+ State: types.SpecStatePassed,
+ CapturedOutput: "BeforeSuiteOutput",
+ }
+
+ afterSummary = &types.SetupSummary{
+ State: types.SpecStatePassed,
+ CapturedOutput: "AfterSuiteOutput",
+ }
+
+ specSummary = &types.SpecSummary{
+ State: types.SpecStatePassed,
+ CapturedOutput: "SpecOutput",
+ }
+ })
+
+ call := func(method string, args ...interface{}) st.FakeStenographerCall {
+ return st.NewFakeStenographerCall(method, args...)
+ }
+
+ beginSuite := func() {
+ stenographer.Reset()
+ aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
+ aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1)
+ Eventually(func() interface{} {
+ return len(stenographer.Calls())
+ }).Should(BeNumerically(">=", 3))
+ }
+
+ Describe("Announcing the beginning of the suite", func() {
+ Context("When one of the parallel-suites starts", func() {
+ BeforeEach(func() {
+ aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
+ })
+
+ It("should be silent", func() {
+ Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
+ })
+ })
+
+ Context("once all of the parallel-suites have started", func() {
+ BeforeEach(func() {
+ aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
+ aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(3))
+ })
+
+ It("should announce the beginning of the suite", func() {
+ Ω(stenographer.Calls()).Should(HaveLen(3))
+ Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", suiteDescription, ginkgoConfig1.RandomSeed, true, false)))
+ Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceTotalNumberOfSpecs", 30, false)))
+ Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceAggregatedParallelRun", 2, false)))
+ })
+ })
+ })
+
+ Describe("Announcing specs and before suites", func() {
+ Context("when the parallel-suites have not all started", func() {
+ BeforeEach(func() {
+ aggregator.BeforeSuiteDidRun(beforeSummary)
+ aggregator.AfterSuiteDidRun(afterSummary)
+ aggregator.SpecDidComplete(specSummary)
+ })
+
+ It("should not announce any specs", func() {
+ Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
+ })
+
+ Context("when the parallel-suites subsequently start", func() {
+ BeforeEach(func() {
+ beginSuite()
+ })
+
+ It("should announce the specs, the before suites and the after suites", func() {
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(ContainElement(call("AnnounceSuccesfulSpec", specSummary)))
+
+ Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput)))
+ Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", afterSummary.CapturedOutput)))
+ })
+ })
+ })
+
+ Context("When the parallel-suites have all started", func() {
+ BeforeEach(func() {
+ beginSuite()
+ stenographer.Reset()
+ })
+
+ Context("When a spec completes", func() {
+ BeforeEach(func() {
+ aggregator.BeforeSuiteDidRun(beforeSummary)
+ aggregator.SpecDidComplete(specSummary)
+ aggregator.AfterSuiteDidRun(afterSummary)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(5))
+ })
+
+ It("should announce the captured output of the BeforeSuite", func() {
+ Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput)))
+ })
+
+ It("should announce that the spec will run (when in verbose mode)", func() {
+ Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceSpecWillRun", specSummary)))
+ })
+
+ It("should announce the captured stdout of the spec", func() {
+ Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceCapturedOutput", specSummary.CapturedOutput)))
+ })
+
+ It("should announce completion", func() {
+ Ω(stenographer.Calls()[3]).Should(Equal(call("AnnounceSuccesfulSpec", specSummary)))
+ })
+
+ It("should announce the captured output of the AfterSuite", func() {
+ Ω(stenographer.Calls()[4]).Should(Equal(call("AnnounceCapturedOutput", afterSummary.CapturedOutput)))
+ })
+ })
+ })
+ })
+
+ Describe("Announcing the end of the suite", func() {
+ BeforeEach(func() {
+ beginSuite()
+ stenographer.Reset()
+ })
+
+ Context("When one of the parallel-suites ends", func() {
+ BeforeEach(func() {
+ aggregator.SpecSuiteDidEnd(suiteSummary2)
+ })
+
+ It("should be silent", func() {
+ Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
+ })
+
+ It("should not notify the channel", func() {
+ Ω(result).Should(BeEmpty())
+ })
+ })
+
+ Context("once all of the parallel-suites end", func() {
+ BeforeEach(func() {
+ time.Sleep(200 * time.Millisecond)
+
+ suiteSummary1.SuiteSucceeded = true
+ suiteSummary1.NumberOfPassedSpecs = 15
+ suiteSummary1.NumberOfFailedSpecs = 0
+ suiteSummary1.NumberOfFlakedSpecs = 3
+ suiteSummary2.SuiteSucceeded = false
+ suiteSummary2.NumberOfPassedSpecs = 5
+ suiteSummary2.NumberOfFailedSpecs = 3
+ suiteSummary2.NumberOfFlakedSpecs = 4
+
+ aggregator.SpecSuiteDidEnd(suiteSummary2)
+ aggregator.SpecSuiteDidEnd(suiteSummary1)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(2))
+ })
+
+ It("should announce the end of the suite", func() {
+ compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
+
+ Ω(compositeSummary.SuiteSucceeded).Should(BeFalse())
+ Ω(compositeSummary.NumberOfSpecsThatWillBeRun).Should(Equal(23))
+ Ω(compositeSummary.NumberOfTotalSpecs).Should(Equal(30))
+ Ω(compositeSummary.NumberOfPassedSpecs).Should(Equal(20))
+ Ω(compositeSummary.NumberOfFailedSpecs).Should(Equal(3))
+ Ω(compositeSummary.NumberOfPendingSpecs).Should(Equal(3))
+ Ω(compositeSummary.NumberOfSkippedSpecs).Should(Equal(4))
+ Ω(compositeSummary.NumberOfFlakedSpecs).Should(Equal(7))
+ Ω(compositeSummary.RunTime.Seconds()).Should(BeNumerically(">", 0.2))
+ })
+ })
+
+ Context("when all the parallel-suites pass", func() {
+ BeforeEach(func() {
+ suiteSummary1.SuiteSucceeded = true
+ suiteSummary2.SuiteSucceeded = true
+
+ aggregator.SpecSuiteDidEnd(suiteSummary2)
+ aggregator.SpecSuiteDidEnd(suiteSummary1)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(2))
+ })
+
+ It("should report success", func() {
+ compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
+
+ Ω(compositeSummary.SuiteSucceeded).Should(BeTrue())
+ })
+
+ It("should notify the channel that it succeded", func(done Done) {
+ Ω(<-result).Should(BeTrue())
+ close(done)
+ })
+ })
+
+ Context("when one of the parallel-suites fails", func() {
+ BeforeEach(func() {
+ suiteSummary1.SuiteSucceeded = true
+ suiteSummary2.SuiteSucceeded = false
+
+ aggregator.SpecSuiteDidEnd(suiteSummary2)
+ aggregator.SpecSuiteDidEnd(suiteSummary1)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(2))
+ })
+
+ It("should report failure", func() {
+ compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
+
+ Ω(compositeSummary.SuiteSucceeded).Should(BeFalse())
+ })
+
+ It("should notify the channel that it failed", func(done Done) {
+ Ω(<-result).Should(BeFalse())
+ close(done)
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go
new file mode 100644
index 000000000..ef54862ea
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go
@@ -0,0 +1,22 @@
+package remote_test
+
+import "os"
+
+type fakeOutputInterceptor struct {
+ DidStartInterceptingOutput bool
+ DidStopInterceptingOutput bool
+ InterceptedOutput string
+}
+
+func (interceptor *fakeOutputInterceptor) StartInterceptingOutput() error {
+ interceptor.DidStartInterceptingOutput = true
+ return nil
+}
+
+func (interceptor *fakeOutputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+ interceptor.DidStopInterceptingOutput = true
+ return interceptor.InterceptedOutput, nil
+}
+
+func (interceptor *fakeOutputInterceptor) StreamTo(*os.File) {
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go
new file mode 100644
index 000000000..3543c59c6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go
@@ -0,0 +1,33 @@
+package remote_test
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+type post struct {
+ url string
+ bodyType string
+ bodyContent []byte
+}
+
+type fakePoster struct {
+ posts []post
+}
+
+func newFakePoster() *fakePoster {
+ return &fakePoster{
+ posts: make([]post, 0),
+ }
+}
+
+func (poster *fakePoster) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
+ bodyContent, _ := ioutil.ReadAll(body)
+ poster.posts = append(poster.posts, post{
+ url: url,
+ bodyType: bodyType,
+ bodyContent: bodyContent,
+ })
+ return nil, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
new file mode 100644
index 000000000..284bc62e5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
@@ -0,0 +1,147 @@
+package remote
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+
+ "github.com/onsi/ginkgo/internal/writer"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/reporters/stenographer"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/types"
+)
+
+//An interface to net/http's client to allow the injection of fakes under test
+type Poster interface {
+ Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
+}
+
+/*
+The ForwardingReporter is a Ginkgo reporter that forwards information to
+a Ginkgo remote server.
+
+When streaming parallel test output, this repoter is automatically installed by Ginkgo.
+
+This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
+detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
+in place of Ginkgo's DefaultReporter.
+*/
+
+type ForwardingReporter struct {
+ serverHost string
+ poster Poster
+ outputInterceptor OutputInterceptor
+ debugMode bool
+ debugFile *os.File
+ nestedReporter *reporters.DefaultReporter
+}
+
+func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
+ reporter := &ForwardingReporter{
+ serverHost: serverHost,
+ poster: poster,
+ outputInterceptor: outputInterceptor,
+ }
+
+ if debugFile != "" {
+ var err error
+ reporter.debugMode = true
+ reporter.debugFile, err = os.Create(debugFile)
+ if err != nil {
+ fmt.Println(err.Error())
+ os.Exit(1)
+ }
+
+ if !config.Verbose {
+ //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
+ ginkgoWriter.AndRedirectTo(reporter.debugFile)
+ }
+ outputInterceptor.StreamTo(reporter.debugFile) //This is not working
+
+ stenographer := stenographer.New(false, true, reporter.debugFile)
+ config.Succinct = false
+ config.Verbose = true
+ config.FullTrace = true
+ reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
+ }
+
+ return reporter
+}
+
+func (reporter *ForwardingReporter) post(path string, data interface{}) {
+ encoded, _ := json.Marshal(data)
+ buffer := bytes.NewBuffer(encoded)
+ reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
+}
+
+func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
+ data := struct {
+ Config config.GinkgoConfigType `json:"config"`
+ Summary *types.SuiteSummary `json:"suite-summary"`
+ }{
+ conf,
+ summary,
+ }
+
+ reporter.outputInterceptor.StartInterceptingOutput()
+ if reporter.debugMode {
+ reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/SpecSuiteWillBegin", data)
+}
+
+func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+ output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+ reporter.outputInterceptor.StartInterceptingOutput()
+ setupSummary.CapturedOutput = output
+ if reporter.debugMode {
+ reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/BeforeSuiteDidRun", setupSummary)
+}
+
+func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
+ if reporter.debugMode {
+ reporter.nestedReporter.SpecWillRun(specSummary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/SpecWillRun", specSummary)
+}
+
+func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+ output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+ reporter.outputInterceptor.StartInterceptingOutput()
+ specSummary.CapturedOutput = output
+ if reporter.debugMode {
+ reporter.nestedReporter.SpecDidComplete(specSummary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/SpecDidComplete", specSummary)
+}
+
+func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+ output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+ reporter.outputInterceptor.StartInterceptingOutput()
+ setupSummary.CapturedOutput = output
+ if reporter.debugMode {
+ reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/AfterSuiteDidRun", setupSummary)
+}
+
+func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+ reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+ if reporter.debugMode {
+ reporter.nestedReporter.SpecSuiteDidEnd(summary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/SpecSuiteDidEnd", summary)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
new file mode 100644
index 000000000..0d7e4769c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
@@ -0,0 +1,181 @@
+package remote_test
+
+import (
+ "encoding/json"
+
+ . "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/config"
+ . "github.com/onsi/ginkgo/internal/remote"
+ "github.com/onsi/ginkgo/types"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("ForwardingReporter", func() {
+ var (
+ reporter *ForwardingReporter
+ interceptor *fakeOutputInterceptor
+ poster *fakePoster
+ suiteSummary *types.SuiteSummary
+ specSummary *types.SpecSummary
+ setupSummary *types.SetupSummary
+ serverHost string
+ )
+
+ BeforeEach(func() {
+ serverHost = "http://127.0.0.1:7788"
+
+ poster = newFakePoster()
+
+ interceptor = &fakeOutputInterceptor{
+ InterceptedOutput: "The intercepted output!",
+ }
+
+ reporter = NewForwardingReporter(config.DefaultReporterConfigType{}, serverHost, poster, interceptor, nil, "")
+
+ suiteSummary = &types.SuiteSummary{
+ SuiteDescription: "My Test Suite",
+ }
+
+ setupSummary = &types.SetupSummary{
+ State: types.SpecStatePassed,
+ }
+
+ specSummary = &types.SpecSummary{
+ ComponentTexts: []string{"My", "Spec"},
+ State: types.SpecStatePassed,
+ }
+ })
+
+ Context("When a suite begins", func() {
+ BeforeEach(func() {
+ reporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary)
+ })
+
+ It("should start intercepting output", func() {
+ Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
+ })
+
+ It("should POST the SuiteSummary and Ginkgo Config to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteWillBegin"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var sentData struct {
+ SentConfig config.GinkgoConfigType `json:"config"`
+ SentSuiteSummary *types.SuiteSummary `json:"suite-summary"`
+ }
+
+ err := json.Unmarshal(poster.posts[0].bodyContent, &sentData)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ Ω(sentData.SentConfig).Should(Equal(config.GinkgoConfig))
+ Ω(sentData.SentSuiteSummary).Should(Equal(suiteSummary))
+ })
+ })
+
+ Context("when a BeforeSuite completes", func() {
+ BeforeEach(func() {
+ reporter.BeforeSuiteDidRun(setupSummary)
+ })
+
+ It("should stop, then start intercepting output", func() {
+ Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
+ Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
+ })
+
+ It("should POST the SetupSummary to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/BeforeSuiteDidRun"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SetupSummary
+ err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+ setupSummary.CapturedOutput = interceptor.InterceptedOutput
+ Ω(summary).Should(Equal(setupSummary))
+ })
+ })
+
+ Context("when an AfterSuite completes", func() {
+ BeforeEach(func() {
+ reporter.AfterSuiteDidRun(setupSummary)
+ })
+
+ It("should stop, then start intercepting output", func() {
+ Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
+ Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
+ })
+
+ It("should POST the SetupSummary to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/AfterSuiteDidRun"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SetupSummary
+ err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+ setupSummary.CapturedOutput = interceptor.InterceptedOutput
+ Ω(summary).Should(Equal(setupSummary))
+ })
+ })
+
+ Context("When a spec will run", func() {
+ BeforeEach(func() {
+ reporter.SpecWillRun(specSummary)
+ })
+
+ It("should POST the SpecSummary to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecWillRun"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SpecSummary
+ err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+ Ω(summary).Should(Equal(specSummary))
+ })
+
+ Context("When a spec completes", func() {
+ BeforeEach(func() {
+ specSummary.State = types.SpecStatePanicked
+ reporter.SpecDidComplete(specSummary)
+ })
+
+ It("should POST the SpecSummary to the Ginkgo server and include any intercepted output", func() {
+ Ω(poster.posts).Should(HaveLen(2))
+ Ω(poster.posts[1].url).Should(Equal("http://127.0.0.1:7788/SpecDidComplete"))
+ Ω(poster.posts[1].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SpecSummary
+ err := json.Unmarshal(poster.posts[1].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+ specSummary.CapturedOutput = interceptor.InterceptedOutput
+ Ω(summary).Should(Equal(specSummary))
+ })
+
+ It("should stop, then start intercepting output", func() {
+ Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
+ Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
+ })
+ })
+ })
+
+ Context("When a suite ends", func() {
+ BeforeEach(func() {
+ reporter.SpecSuiteDidEnd(suiteSummary)
+ })
+
+ It("should POST the SuiteSummary to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteDidEnd"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SuiteSummary
+
+ err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ Ω(summary).Should(Equal(suiteSummary))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
new file mode 100644
index 000000000..5154abe87
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
@@ -0,0 +1,13 @@
+package remote
+
+import "os"
+
+/*
+The OutputInterceptor is used by the ForwardingReporter to
+intercept and capture all stdin and stderr output during a test run.
+*/
+type OutputInterceptor interface {
+ StartInterceptingOutput() error
+ StopInterceptingAndReturnOutput() (string, error)
+ StreamTo(*os.File)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
new file mode 100644
index 000000000..ab6622a29
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
@@ -0,0 +1,83 @@
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package remote
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+
+ "github.com/hpcloud/tail"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &outputInterceptor{}
+}
+
+type outputInterceptor struct {
+ redirectFile *os.File
+ streamTarget *os.File
+ intercepting bool
+ tailer *tail.Tail
+ doneTailing chan bool
+}
+
+func (interceptor *outputInterceptor) StartInterceptingOutput() error {
+ if interceptor.intercepting {
+ return errors.New("Already intercepting output!")
+ }
+ interceptor.intercepting = true
+
+ var err error
+
+ interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
+ if err != nil {
+ return err
+ }
+
+ // Call a function in ./syscall_dup_*.go
+ // If building for everything other than linux_arm64,
+ // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
+ // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
+ syscallDup(int(interceptor.redirectFile.Fd()), 1)
+ syscallDup(int(interceptor.redirectFile.Fd()), 2)
+
+ if interceptor.streamTarget != nil {
+ interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
+ interceptor.doneTailing = make(chan bool)
+
+ go func() {
+ for line := range interceptor.tailer.Lines {
+ interceptor.streamTarget.Write([]byte(line.Text + "\n"))
+ }
+ close(interceptor.doneTailing)
+ }()
+ }
+
+ return nil
+}
+
+func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+ if !interceptor.intercepting {
+ return "", errors.New("Not intercepting output!")
+ }
+
+ interceptor.redirectFile.Close()
+ output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
+ os.Remove(interceptor.redirectFile.Name())
+
+ interceptor.intercepting = false
+
+ if interceptor.streamTarget != nil {
+ interceptor.tailer.Stop()
+ interceptor.tailer.Cleanup()
+ <-interceptor.doneTailing
+ interceptor.streamTarget.Sync()
+ }
+
+ return string(output), err
+}
+
+func (interceptor *outputInterceptor) StreamTo(out *os.File) {
+ interceptor.streamTarget = out
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
new file mode 100644
index 000000000..40c790336
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package remote
+
+import (
+ "errors"
+ "os"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &outputInterceptor{}
+}
+
+type outputInterceptor struct {
+ intercepting bool
+}
+
+func (interceptor *outputInterceptor) StartInterceptingOutput() error {
+ if interceptor.intercepting {
+ return errors.New("Already intercepting output!")
+ }
+ interceptor.intercepting = true
+
+ // not working on windows...
+
+ return nil
+}
+
+func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+ // not working on windows...
+ interceptor.intercepting = false
+
+ return "", nil
+}
+
+func (interceptor *outputInterceptor) StreamTo(*os.File) {}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go
new file mode 100644
index 000000000..e6b4e9f32
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go
@@ -0,0 +1,13 @@
+package remote_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestRemote(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Remote Spec Forwarding Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
new file mode 100644
index 000000000..367c54daf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
@@ -0,0 +1,224 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package remote
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "sync"
+
+ "github.com/onsi/ginkgo/internal/spec_iterator"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/types"
+)
+
+/*
+Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type Server struct {
+ listener net.Listener
+ reporters []reporters.Reporter
+ alives []func() bool
+ lock *sync.Mutex
+ beforeSuiteData types.RemoteBeforeSuiteData
+ parallelTotal int
+ counter int
+}
+
+//Create a new server, automatically selecting a port
+func NewServer(parallelTotal int) (*Server, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &Server{
+ listener: listener,
+ lock: &sync.Mutex{},
+ alives: make([]func() bool, parallelTotal),
+ beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
+ parallelTotal: parallelTotal,
+ }, nil
+}
+
+//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *Server) Start() {
+ httpServer := &http.Server{}
+ mux := http.NewServeMux()
+ httpServer.Handler = mux
+
+ //streaming endpoints
+ mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
+ mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
+ mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
+ mux.HandleFunc("/SpecWillRun", server.specWillRun)
+ mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
+ mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
+
+ //synchronization endpoints
+ mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
+ mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
+ mux.HandleFunc("/counter", server.handleCounter)
+ mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
+
+ go httpServer.Serve(server.listener)
+}
+
+//Stop the server
+func (server *Server) Close() {
+ server.listener.Close()
+}
+
+//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *Server) Address() string {
+ return "http://" + server.listener.Addr().String()
+}
+
+//
+// Streaming Endpoints
+//
+
+//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
+func (server *Server) readAll(request *http.Request) []byte {
+ defer request.Body.Close()
+ body, _ := ioutil.ReadAll(request.Body)
+ return body
+}
+
+func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
+ server.reporters = reporters
+}
+
+func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+
+ var data struct {
+ Config config.GinkgoConfigType `json:"config"`
+ Summary *types.SuiteSummary `json:"suite-summary"`
+ }
+
+ json.Unmarshal(body, &data)
+
+ for _, reporter := range server.reporters {
+ reporter.SpecSuiteWillBegin(data.Config, data.Summary)
+ }
+}
+
+func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var setupSummary *types.SetupSummary
+ json.Unmarshal(body, &setupSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.BeforeSuiteDidRun(setupSummary)
+ }
+}
+
+func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var setupSummary *types.SetupSummary
+ json.Unmarshal(body, &setupSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.AfterSuiteDidRun(setupSummary)
+ }
+}
+
+func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var specSummary *types.SpecSummary
+ json.Unmarshal(body, &specSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.SpecWillRun(specSummary)
+ }
+}
+
+func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var specSummary *types.SpecSummary
+ json.Unmarshal(body, &specSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.SpecDidComplete(specSummary)
+ }
+}
+
+func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var suiteSummary *types.SuiteSummary
+ json.Unmarshal(body, &suiteSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.SpecSuiteDidEnd(suiteSummary)
+ }
+}
+
+//
+// Synchronization Endpoints
+//
+
+func (server *Server) RegisterAlive(node int, alive func() bool) {
+ server.lock.Lock()
+ defer server.lock.Unlock()
+ server.alives[node-1] = alive
+}
+
+func (server *Server) nodeIsAlive(node int) bool {
+ server.lock.Lock()
+ defer server.lock.Unlock()
+ alive := server.alives[node-1]
+ if alive == nil {
+ return true
+ }
+ return alive()
+}
+
+func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ if request.Method == "POST" {
+ dec := json.NewDecoder(request.Body)
+ dec.Decode(&(server.beforeSuiteData))
+ } else {
+ beforeSuiteData := server.beforeSuiteData
+ if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
+ beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
+ }
+ enc := json.NewEncoder(writer)
+ enc.Encode(beforeSuiteData)
+ }
+}
+
+func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
+ afterSuiteData := types.RemoteAfterSuiteData{
+ CanRun: true,
+ }
+ for i := 2; i <= server.parallelTotal; i++ {
+ afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
+ }
+
+ enc := json.NewEncoder(writer)
+ enc.Encode(afterSuiteData)
+}
+
+func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
+ c := spec_iterator.Counter{}
+ server.lock.Lock()
+ c.Index = server.counter
+ server.counter = server.counter + 1
+ server.lock.Unlock()
+
+ json.NewEncoder(writer).Encode(c)
+}
+
+func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
+ writer.Write([]byte(""))
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
new file mode 100644
index 000000000..36bd00355
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
@@ -0,0 +1,269 @@
+package remote_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/remote"
+ . "github.com/onsi/gomega"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/types"
+
+ "bytes"
+ "encoding/json"
+ "net/http"
+)
+
+var _ = Describe("Server", func() {
+ var (
+ server *Server
+ )
+
+ BeforeEach(func() {
+ var err error
+ server, err = NewServer(3)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ server.Start()
+ })
+
+ AfterEach(func() {
+ server.Close()
+ })
+
+ Describe("Streaming endpoints", func() {
+ var (
+ reporterA, reporterB *reporters.FakeReporter
+ forwardingReporter *ForwardingReporter
+
+ suiteSummary *types.SuiteSummary
+ setupSummary *types.SetupSummary
+ specSummary *types.SpecSummary
+ )
+
+ BeforeEach(func() {
+ reporterA = reporters.NewFakeReporter()
+ reporterB = reporters.NewFakeReporter()
+
+ server.RegisterReporters(reporterA, reporterB)
+
+ forwardingReporter = NewForwardingReporter(config.DefaultReporterConfigType{}, server.Address(), &http.Client{}, &fakeOutputInterceptor{}, nil, "")
+
+ suiteSummary = &types.SuiteSummary{
+ SuiteDescription: "My Test Suite",
+ }
+
+ setupSummary = &types.SetupSummary{
+ State: types.SpecStatePassed,
+ }
+
+ specSummary = &types.SpecSummary{
+ ComponentTexts: []string{"My", "Spec"},
+ State: types.SpecStatePassed,
+ }
+ })
+
+ It("should make its address available", func() {
+ Ω(server.Address()).Should(MatchRegexp(`http://127.0.0.1:\d{2,}`))
+ })
+
+ Describe("/SpecSuiteWillBegin", func() {
+ It("should decode and forward the Ginkgo config and suite summary", func(done Done) {
+ forwardingReporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary)
+ Ω(reporterA.Config).Should(Equal(config.GinkgoConfig))
+ Ω(reporterB.Config).Should(Equal(config.GinkgoConfig))
+ Ω(reporterA.BeginSummary).Should(Equal(suiteSummary))
+ Ω(reporterB.BeginSummary).Should(Equal(suiteSummary))
+ close(done)
+ })
+ })
+
+ Describe("/BeforeSuiteDidRun", func() {
+ It("should decode and forward the setup summary", func() {
+ forwardingReporter.BeforeSuiteDidRun(setupSummary)
+ Ω(reporterA.BeforeSuiteSummary).Should(Equal(setupSummary))
+ Ω(reporterB.BeforeSuiteSummary).Should(Equal(setupSummary))
+ })
+ })
+
+ Describe("/AfterSuiteDidRun", func() {
+ It("should decode and forward the setup summary", func() {
+ forwardingReporter.AfterSuiteDidRun(setupSummary)
+ Ω(reporterA.AfterSuiteSummary).Should(Equal(setupSummary))
+ Ω(reporterB.AfterSuiteSummary).Should(Equal(setupSummary))
+ })
+ })
+
+ Describe("/SpecWillRun", func() {
+ It("should decode and forward the spec summary", func(done Done) {
+ forwardingReporter.SpecWillRun(specSummary)
+ Ω(reporterA.SpecWillRunSummaries[0]).Should(Equal(specSummary))
+ Ω(reporterB.SpecWillRunSummaries[0]).Should(Equal(specSummary))
+ close(done)
+ })
+ })
+
+ Describe("/SpecDidComplete", func() {
+ It("should decode and forward the spec summary", func(done Done) {
+ forwardingReporter.SpecDidComplete(specSummary)
+ Ω(reporterA.SpecSummaries[0]).Should(Equal(specSummary))
+ Ω(reporterB.SpecSummaries[0]).Should(Equal(specSummary))
+ close(done)
+ })
+ })
+
+ Describe("/SpecSuiteDidEnd", func() {
+ It("should decode and forward the suite summary", func(done Done) {
+ forwardingReporter.SpecSuiteDidEnd(suiteSummary)
+ Ω(reporterA.EndSummary).Should(Equal(suiteSummary))
+ Ω(reporterB.EndSummary).Should(Equal(suiteSummary))
+ close(done)
+ })
+ })
+ })
+
+ Describe("Synchronization endpoints", func() {
+ Describe("GETting and POSTing BeforeSuiteState", func() {
+ getBeforeSuite := func() types.RemoteBeforeSuiteData {
+ resp, err := http.Get(server.Address() + "/BeforeSuiteState")
+ Ω(err).ShouldNot(HaveOccurred())
+ Ω(resp.StatusCode).Should(Equal(http.StatusOK))
+
+ r := types.RemoteBeforeSuiteData{}
+ decoder := json.NewDecoder(resp.Body)
+ err = decoder.Decode(&r)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ return r
+ }
+
+ postBeforeSuite := func(r types.RemoteBeforeSuiteData) {
+ resp, err := http.Post(server.Address()+"/BeforeSuiteState", "application/json", bytes.NewReader(r.ToJSON()))
+ Ω(err).ShouldNot(HaveOccurred())
+ Ω(resp.StatusCode).Should(Equal(http.StatusOK))
+ }
+
+ Context("when the first node's Alive has not been registered yet", func() {
+ It("should return pending", func() {
+ state := getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
+
+ state = getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
+ })
+ })
+
+ Context("when the first node is Alive but has not responded yet", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(1, func() bool {
+ return true
+ })
+ })
+
+ It("should return pending", func() {
+ state := getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
+
+ state = getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
+ })
+ })
+
+ Context("when the first node has responded", func() {
+ var state types.RemoteBeforeSuiteData
+ BeforeEach(func() {
+ server.RegisterAlive(1, func() bool {
+ return false
+ })
+
+ state = types.RemoteBeforeSuiteData{
+ Data: []byte("my data"),
+ State: types.RemoteBeforeSuiteStatePassed,
+ }
+ postBeforeSuite(state)
+ })
+
+ It("should return the passed in state", func() {
+ returnedState := getBeforeSuite()
+ Ω(returnedState).Should(Equal(state))
+ })
+ })
+
+ Context("when the first node is no longer Alive and has not responded yet", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(1, func() bool {
+ return false
+ })
+ })
+
+ It("should return disappeared", func() {
+ state := getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared}))
+
+ state = getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared}))
+ })
+ })
+ })
+
+ Describe("GETting RemoteAfterSuiteData", func() {
+ getRemoteAfterSuiteData := func() bool {
+ resp, err := http.Get(server.Address() + "/RemoteAfterSuiteData")
+ Ω(err).ShouldNot(HaveOccurred())
+ Ω(resp.StatusCode).Should(Equal(http.StatusOK))
+
+ a := types.RemoteAfterSuiteData{}
+ decoder := json.NewDecoder(resp.Body)
+ err = decoder.Decode(&a)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ return a.CanRun
+ }
+
+ Context("when there are unregistered nodes", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(2, func() bool {
+ return false
+ })
+ })
+
+ It("should return false", func() {
+ Ω(getRemoteAfterSuiteData()).Should(BeFalse())
+ })
+ })
+
+ Context("when all none-node-1 nodes are still running", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(2, func() bool {
+ return true
+ })
+
+ server.RegisterAlive(3, func() bool {
+ return false
+ })
+ })
+
+ It("should return false", func() {
+ Ω(getRemoteAfterSuiteData()).Should(BeFalse())
+ })
+ })
+
+ Context("when all none-1 nodes are done", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(2, func() bool {
+ return false
+ })
+
+ server.RegisterAlive(3, func() bool {
+ return false
+ })
+ })
+
+ It("should return true", func() {
+ Ω(getRemoteAfterSuiteData()).Should(BeTrue())
+ })
+
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
new file mode 100644
index 000000000..9550d37b3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
@@ -0,0 +1,11 @@
+// +build linux,arm64
+
+package remote
+
+import "syscall"
+
+// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
+// use the nearly identical syscall.Dup3 instead
+func syscallDup(oldfd int, newfd int) (err error) {
+ return syscall.Dup3(oldfd, newfd, 0)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
new file mode 100644
index 000000000..75ef7fb78
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
@@ -0,0 +1,9 @@
+// +build solaris
+
+package remote
+
+import "golang.org/x/sys/unix"
+
+func syscallDup(oldfd int, newfd int) (err error) {
+ return unix.Dup2(oldfd, newfd)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
new file mode 100644
index 000000000..ef6255960
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
@@ -0,0 +1,11 @@
+// +build !linux !arm64
+// +build !windows
+// +build !solaris
+
+package remote
+
+import "syscall"
+
+func syscallDup(oldfd int, newfd int) (err error) {
+ return syscall.Dup2(oldfd, newfd)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
new file mode 100644
index 000000000..7fd68ee8e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
@@ -0,0 +1,247 @@
+package spec
+
+import (
+ "fmt"
+ "io"
+ "time"
+
+ "sync"
+
+ "github.com/onsi/ginkgo/internal/containernode"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/types"
+)
+
+type Spec struct {
+ subject leafnodes.SubjectNode
+ focused bool
+ announceProgress bool
+
+ containers []*containernode.ContainerNode
+
+ state types.SpecState
+ runTime time.Duration
+ startTime time.Time
+ failure types.SpecFailure
+ previousFailures bool
+
+ stateMutex *sync.Mutex
+}
+
+func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
+ spec := &Spec{
+ subject: subject,
+ containers: containers,
+ focused: subject.Flag() == types.FlagTypeFocused,
+ announceProgress: announceProgress,
+ stateMutex: &sync.Mutex{},
+ }
+
+ spec.processFlag(subject.Flag())
+ for i := len(containers) - 1; i >= 0; i-- {
+ spec.processFlag(containers[i].Flag())
+ }
+
+ return spec
+}
+
+func (spec *Spec) processFlag(flag types.FlagType) {
+ if flag == types.FlagTypeFocused {
+ spec.focused = true
+ } else if flag == types.FlagTypePending {
+ spec.setState(types.SpecStatePending)
+ }
+}
+
+func (spec *Spec) Skip() {
+ spec.setState(types.SpecStateSkipped)
+}
+
+func (spec *Spec) Failed() bool {
+ return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
+}
+
+func (spec *Spec) Passed() bool {
+ return spec.getState() == types.SpecStatePassed
+}
+
+func (spec *Spec) Flaked() bool {
+ return spec.getState() == types.SpecStatePassed && spec.previousFailures
+}
+
+func (spec *Spec) Pending() bool {
+ return spec.getState() == types.SpecStatePending
+}
+
+func (spec *Spec) Skipped() bool {
+ return spec.getState() == types.SpecStateSkipped
+}
+
+func (spec *Spec) Focused() bool {
+ return spec.focused
+}
+
+func (spec *Spec) IsMeasurement() bool {
+ return spec.subject.Type() == types.SpecComponentTypeMeasure
+}
+
+func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
+ componentTexts := make([]string, len(spec.containers)+1)
+ componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
+
+ for i, container := range spec.containers {
+ componentTexts[i] = container.Text()
+ componentCodeLocations[i] = container.CodeLocation()
+ }
+
+ componentTexts[len(spec.containers)] = spec.subject.Text()
+ componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
+
+ runTime := spec.runTime
+ if runTime == 0 && !spec.startTime.IsZero() {
+ runTime = time.Since(spec.startTime)
+ }
+
+ return &types.SpecSummary{
+ IsMeasurement: spec.IsMeasurement(),
+ NumberOfSamples: spec.subject.Samples(),
+ ComponentTexts: componentTexts,
+ ComponentCodeLocations: componentCodeLocations,
+ State: spec.getState(),
+ RunTime: runTime,
+ Failure: spec.failure,
+ Measurements: spec.measurementsReport(),
+ SuiteID: suiteID,
+ }
+}
+
+func (spec *Spec) ConcatenatedString() string {
+ s := ""
+ for _, container := range spec.containers {
+ s += container.Text() + " "
+ }
+
+ return s + spec.subject.Text()
+}
+
+func (spec *Spec) Run(writer io.Writer) {
+ if spec.getState() == types.SpecStateFailed {
+ spec.previousFailures = true
+ }
+
+ spec.startTime = time.Now()
+ defer func() {
+ spec.runTime = time.Since(spec.startTime)
+ }()
+
+ for sample := 0; sample < spec.subject.Samples(); sample++ {
+ spec.runSample(sample, writer)
+
+ if spec.getState() != types.SpecStatePassed {
+ return
+ }
+ }
+}
+
+func (spec *Spec) getState() types.SpecState {
+ spec.stateMutex.Lock()
+ defer spec.stateMutex.Unlock()
+ return spec.state
+}
+
+func (spec *Spec) setState(state types.SpecState) {
+ spec.stateMutex.Lock()
+ defer spec.stateMutex.Unlock()
+ spec.state = state
+}
+
+func (spec *Spec) runSample(sample int, writer io.Writer) {
+ spec.setState(types.SpecStatePassed)
+ spec.failure = types.SpecFailure{}
+ innerMostContainerIndexToUnwind := -1
+
+ defer func() {
+ for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
+ container := spec.containers[i]
+ for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
+ spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
+ justAfterEachState, justAfterEachFailure := justAfterEach.Run()
+ if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
+ spec.state = justAfterEachState
+ spec.failure = justAfterEachFailure
+ }
+ }
+ }
+
+ for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
+ container := spec.containers[i]
+ for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
+ spec.announceSetupNode(writer, "AfterEach", container, afterEach)
+ afterEachState, afterEachFailure := afterEach.Run()
+ if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
+ spec.setState(afterEachState)
+ spec.failure = afterEachFailure
+ }
+ }
+ }
+ }()
+
+ for i, container := range spec.containers {
+ innerMostContainerIndexToUnwind = i
+ for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
+ spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
+ s, f := beforeEach.Run()
+ spec.failure = f
+ spec.setState(s)
+ if spec.getState() != types.SpecStatePassed {
+ return
+ }
+ }
+ }
+
+ for _, container := range spec.containers {
+ for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
+ spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
+ s, f := justBeforeEach.Run()
+ spec.failure = f
+ spec.setState(s)
+ if spec.getState() != types.SpecStatePassed {
+ return
+ }
+ }
+ }
+
+ spec.announceSubject(writer, spec.subject)
+ s, f := spec.subject.Run()
+ spec.failure = f
+ spec.setState(s)
+}
+
+func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
+ if spec.announceProgress {
+ s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
+ writer.Write([]byte(s))
+ }
+}
+
+func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
+ if spec.announceProgress {
+ nodeType := ""
+ switch subject.Type() {
+ case types.SpecComponentTypeIt:
+ nodeType = "It"
+ case types.SpecComponentTypeMeasure:
+ nodeType = "Measure"
+ }
+ s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
+ writer.Write([]byte(s))
+ }
+}
+
+func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
+ if !spec.IsMeasurement() || spec.Failed() {
+ return map[string]*types.SpecMeasurement{}
+ }
+
+ return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go
new file mode 100644
index 000000000..8681a7206
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go
@@ -0,0 +1,13 @@
+package spec_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestSpec(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Spec Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go
new file mode 100644
index 000000000..b4a2c9c79
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go
@@ -0,0 +1,739 @@
+package spec_test
+
+import (
+ "time"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/onsi/gomega/gbytes"
+
+ . "github.com/onsi/ginkgo/internal/spec"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/internal/containernode"
+ Failer "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/types"
+)
+
+var noneFlag = types.FlagTypeNone
+var focusedFlag = types.FlagTypeFocused
+var pendingFlag = types.FlagTypePending
+
+var _ = Describe("Spec", func() {
+ var (
+ failer *Failer.Failer
+ codeLocation types.CodeLocation
+ nodesThatRan []string
+ spec *Spec
+ buffer *gbytes.Buffer
+ )
+
+ newBody := func(text string, fail bool) func() {
+ return func() {
+ nodesThatRan = append(nodesThatRan, text)
+ if fail {
+ failer.Fail(text, codeLocation)
+ }
+ }
+ }
+
+ newIt := func(text string, flag types.FlagType, fail bool) *leafnodes.ItNode {
+ return leafnodes.NewItNode(text, newBody(text, fail), flag, codeLocation, 0, failer, 0)
+ }
+
+ newItWithBody := func(text string, body interface{}) *leafnodes.ItNode {
+ return leafnodes.NewItNode(text, body, noneFlag, codeLocation, 0, failer, 0)
+ }
+
+ newMeasure := func(text string, flag types.FlagType, fail bool, samples int) *leafnodes.MeasureNode {
+ return leafnodes.NewMeasureNode(text, func(Benchmarker) {
+ nodesThatRan = append(nodesThatRan, text)
+ if fail {
+ failer.Fail(text, codeLocation)
+ }
+ }, flag, codeLocation, samples, failer, 0)
+ }
+
+ newBef := func(text string, fail bool) leafnodes.BasicNode {
+ return leafnodes.NewBeforeEachNode(newBody(text, fail), codeLocation, 0, failer, 0)
+ }
+
+ newAft := func(text string, fail bool) leafnodes.BasicNode {
+ return leafnodes.NewAfterEachNode(newBody(text, fail), codeLocation, 0, failer, 0)
+ }
+
+ newJusBef := func(text string, fail bool) leafnodes.BasicNode {
+ return leafnodes.NewJustBeforeEachNode(newBody(text, fail), codeLocation, 0, failer, 0)
+ }
+
+ newJusAft := func(text string, fail bool) leafnodes.BasicNode {
+ return leafnodes.NewJustAfterEachNode(newBody(text, fail), codeLocation, 0, failer, 0)
+ }
+
+ newContainer := func(text string, flag types.FlagType, setupNodes ...leafnodes.BasicNode) *containernode.ContainerNode {
+ c := containernode.New(text, flag, codeLocation)
+ for _, node := range setupNodes {
+ c.PushSetupNode(node)
+ }
+ return c
+ }
+
+ containers := func(containers ...*containernode.ContainerNode) []*containernode.ContainerNode {
+ return containers
+ }
+
+ BeforeEach(func() {
+ buffer = gbytes.NewBuffer()
+ failer = Failer.New()
+ codeLocation = codelocation.New(0)
+ nodesThatRan = []string{}
+ })
+
+ Describe("marking specs focused and pending", func() {
+ It("should satisfy various caes", func() {
+ cases := []struct {
+ ContainerFlags []types.FlagType
+ SubjectFlag types.FlagType
+ Pending bool
+ Focused bool
+ }{
+ {[]types.FlagType{}, noneFlag, false, false},
+ {[]types.FlagType{}, focusedFlag, false, true},
+ {[]types.FlagType{}, pendingFlag, true, false},
+ {[]types.FlagType{noneFlag}, noneFlag, false, false},
+ {[]types.FlagType{focusedFlag}, noneFlag, false, true},
+ {[]types.FlagType{pendingFlag}, noneFlag, true, false},
+ {[]types.FlagType{noneFlag}, focusedFlag, false, true},
+ {[]types.FlagType{focusedFlag}, focusedFlag, false, true},
+ {[]types.FlagType{pendingFlag}, focusedFlag, true, true},
+ {[]types.FlagType{noneFlag}, pendingFlag, true, false},
+ {[]types.FlagType{focusedFlag}, pendingFlag, true, true},
+ {[]types.FlagType{pendingFlag}, pendingFlag, true, false},
+ {[]types.FlagType{focusedFlag, noneFlag}, noneFlag, false, true},
+ {[]types.FlagType{noneFlag, focusedFlag}, noneFlag, false, true},
+ {[]types.FlagType{pendingFlag, noneFlag}, noneFlag, true, false},
+ {[]types.FlagType{noneFlag, pendingFlag}, noneFlag, true, false},
+ {[]types.FlagType{focusedFlag, pendingFlag}, noneFlag, true, true},
+ }
+
+ for i, c := range cases {
+ subject := newIt("it node", c.SubjectFlag, false)
+ containers := []*containernode.ContainerNode{}
+ for _, flag := range c.ContainerFlags {
+ containers = append(containers, newContainer("container", flag))
+ }
+
+ spec := New(subject, containers, false)
+ Ω(spec.Pending()).Should(Equal(c.Pending), "Case %d: %#v", i, c)
+ Ω(spec.Focused()).Should(Equal(c.Focused), "Case %d: %#v", i, c)
+
+ if c.Pending {
+ Ω(spec.Summary("").State).Should(Equal(types.SpecStatePending))
+ }
+ }
+ })
+ })
+
+ Describe("Skip", func() {
+ It("should be skipped", func() {
+ spec := New(newIt("it node", noneFlag, false), containers(newContainer("container", noneFlag)), false)
+ Ω(spec.Skipped()).Should(BeFalse())
+ spec.Skip()
+ Ω(spec.Skipped()).Should(BeTrue())
+ Ω(spec.Summary("").State).Should(Equal(types.SpecStateSkipped))
+ })
+ })
+
+ Describe("IsMeasurement", func() {
+ It("should be true if the subject is a measurement node", func() {
+ spec := New(newIt("it node", noneFlag, false), containers(newContainer("container", noneFlag)), false)
+ Ω(spec.IsMeasurement()).Should(BeFalse())
+ Ω(spec.Summary("").IsMeasurement).Should(BeFalse())
+ Ω(spec.Summary("").NumberOfSamples).Should(Equal(1))
+
+ spec = New(newMeasure("measure node", noneFlag, false, 10), containers(newContainer("container", noneFlag)), false)
+ Ω(spec.IsMeasurement()).Should(BeTrue())
+ Ω(spec.Summary("").IsMeasurement).Should(BeTrue())
+ Ω(spec.Summary("").NumberOfSamples).Should(Equal(10))
+ })
+ })
+
+ Describe("Passed", func() {
+ It("should pass when the subject passed", func() {
+ spec := New(newIt("it node", noneFlag, false), containers(), false)
+ spec.Run(buffer)
+
+ Ω(spec.Passed()).Should(BeTrue())
+ Ω(spec.Failed()).Should(BeFalse())
+ Ω(spec.Summary("").State).Should(Equal(types.SpecStatePassed))
+ Ω(spec.Summary("").Failure).Should(BeZero())
+ })
+ })
+
+ Describe("Flaked", func() {
+ It("should work if Run is called twice and gets different results", func() {
+ i := 0
+ spec := New(newItWithBody("flaky it", func() {
+ i++
+ if i == 1 {
+ failer.Fail("oops", codeLocation)
+ }
+ }), containers(), false)
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(spec.Flaked()).Should(BeFalse())
+ Ω(spec.Summary("").State).Should(Equal(types.SpecStateFailed))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("oops"))
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeTrue())
+ Ω(spec.Failed()).Should(BeFalse())
+ Ω(spec.Flaked()).Should(BeTrue())
+ Ω(spec.Summary("").State).Should(Equal(types.SpecStatePassed))
+ })
+ })
+
+ Describe("Failed", func() {
+ It("should be failed if the failure was panic", func() {
+ spec := New(newItWithBody("panicky it", func() {
+ panic("bam")
+ }), containers(), false)
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(spec.Summary("").State).Should(Equal(types.SpecStatePanicked))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("Test Panicked"))
+ Ω(spec.Summary("").Failure.ForwardedPanic).Should(Equal("bam"))
+ })
+
+ It("should be failed if the failure was a timeout", func() {
+ spec := New(newItWithBody("sleepy it", func(done Done) {}), containers(), false)
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(spec.Summary("").State).Should(Equal(types.SpecStateTimedOut))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("Timed out"))
+ })
+
+ It("should be failed if the failure was... a failure", func() {
+ spec := New(newItWithBody("failing it", func() {
+ failer.Fail("bam", codeLocation)
+ }), containers(), false)
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(spec.Summary("").State).Should(Equal(types.SpecStateFailed))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("bam"))
+ })
+ })
+
+ Describe("Concatenated string", func() {
+ It("should concatenate the texts of the containers and the subject", func() {
+ spec := New(
+ newIt("it node", noneFlag, false),
+ containers(
+ newContainer("outer container", noneFlag),
+ newContainer("inner container", noneFlag),
+ ),
+ false,
+ )
+
+ Ω(spec.ConcatenatedString()).Should(Equal("outer container inner container it node"))
+ })
+ })
+
+ Describe("running it specs", func() {
+ Context("with just an it", func() {
+ Context("that succeeds", func() {
+ It("should run the it and report on its success", func() {
+ spec := New(newIt("it node", noneFlag, false), containers(), false)
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeTrue())
+ Ω(spec.Failed()).Should(BeFalse())
+ Ω(nodesThatRan).Should(Equal([]string{"it node"}))
+ })
+ })
+
+ Context("that fails", func() {
+ It("should run the it and report on its success", func() {
+ spec := New(newIt("it node", noneFlag, true), containers(), false)
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(spec.Summary("").Failure.Message).Should(Equal("it node"))
+ Ω(nodesThatRan).Should(Equal([]string{"it node"}))
+ })
+ })
+ })
+
+ Context("with a full set of setup nodes", func() {
+ var failingNodes map[string]bool
+
+ BeforeEach(func() {
+ failingNodes = map[string]bool{}
+ })
+
+ JustBeforeEach(func() {
+ spec = New(
+ newIt("it node", noneFlag, failingNodes["it node"]),
+ containers(
+ newContainer("outer container", noneFlag,
+ newBef("outer bef A", failingNodes["outer bef A"]),
+ newBef("outer bef B", failingNodes["outer bef B"]),
+ newJusBef("outer jusbef A", failingNodes["outer jusbef A"]),
+ newJusBef("outer jusbef B", failingNodes["outer jusbef B"]),
+ newJusAft("outer jusaft A", failingNodes["outer jusaft A"]),
+ newJusAft("outer jusaft B", failingNodes["outer jusaft B"]),
+ newAft("outer aft A", failingNodes["outer aft A"]),
+ newAft("outer aft B", failingNodes["outer aft B"]),
+ ),
+ newContainer("inner container", noneFlag,
+ newBef("inner bef A", failingNodes["inner bef A"]),
+ newBef("inner bef B", failingNodes["inner bef B"]),
+ newJusBef("inner jusbef A", failingNodes["inner jusbef A"]),
+ newJusBef("inner jusbef B", failingNodes["inner jusbef B"]),
+ newJusAft("inner jusaft A", failingNodes["inner jusaft A"]),
+ newJusAft("inner jusaft B", failingNodes["inner jusaft B"]),
+ newAft("inner aft A", failingNodes["inner aft A"]),
+ newAft("inner aft B", failingNodes["inner aft B"]),
+ ),
+ ),
+ false,
+ )
+ spec.Run(buffer)
+ })
+
+ Context("that all pass", func() {
+ It("should walk through the nodes in the correct order", func() {
+ Ω(spec.Passed()).Should(BeTrue())
+ Ω(spec.Failed()).Should(BeFalse())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "outer bef A",
+ "outer bef B",
+ "inner bef A",
+ "inner bef B",
+ "outer jusbef A",
+ "outer jusbef B",
+ "inner jusbef A",
+ "inner jusbef B",
+ "it node",
+ "inner jusaft A",
+ "inner jusaft B",
+ "outer jusaft A",
+ "outer jusaft B",
+ "inner aft A",
+ "inner aft B",
+ "outer aft A",
+ "outer aft B",
+ }))
+ })
+ })
+
+ Context("when the subject fails", func() {
+ BeforeEach(func() {
+ failingNodes["it node"] = true
+ })
+
+ It("should run the afters", func() {
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "outer bef A",
+ "outer bef B",
+ "inner bef A",
+ "inner bef B",
+ "outer jusbef A",
+ "outer jusbef B",
+ "inner jusbef A",
+ "inner jusbef B",
+ "it node",
+ "inner jusaft A",
+ "inner jusaft B",
+ "outer jusaft A",
+ "outer jusaft B",
+ "inner aft A",
+ "inner aft B",
+ "outer aft A",
+ "outer aft B",
+ }))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("it node"))
+ })
+ })
+
+ Context("when an inner before fails", func() {
+ BeforeEach(func() {
+ failingNodes["inner bef A"] = true
+ })
+
+ It("should not run any other befores, but it should run the subsequent afters", func() {
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "outer bef A",
+ "outer bef B",
+ "inner bef A",
+ "inner jusaft A",
+ "inner jusaft B",
+ "outer jusaft A",
+ "outer jusaft B",
+ "inner aft A",
+ "inner aft B",
+ "outer aft A",
+ "outer aft B",
+ }))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("inner bef A"))
+ })
+ })
+
+ Context("when an outer before fails", func() {
+ BeforeEach(func() {
+ failingNodes["outer bef B"] = true
+ })
+
+ It("should not run any other befores, but it should run the subsequent afters", func() {
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "outer bef A",
+ "outer bef B",
+ "outer jusaft A",
+ "outer jusaft B",
+ "outer aft A",
+ "outer aft B",
+ }))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("outer bef B"))
+ })
+ })
+
+ Context("when an after fails", func() {
+ BeforeEach(func() {
+ failingNodes["inner aft B"] = true
+ })
+
+ It("should run all other afters, but mark the test as failed", func() {
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "outer bef A",
+ "outer bef B",
+ "inner bef A",
+ "inner bef B",
+ "outer jusbef A",
+ "outer jusbef B",
+ "inner jusbef A",
+ "inner jusbef B",
+ "it node",
+ "inner jusaft A",
+ "inner jusaft B",
+ "outer jusaft A",
+ "outer jusaft B",
+ "inner aft A",
+ "inner aft B",
+ "outer aft A",
+ "outer aft B",
+ }))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("inner aft B"))
+ })
+ })
+
+ Context("when a just before each fails", func() {
+ BeforeEach(func() {
+ failingNodes["outer jusbef B"] = true
+ })
+
+ It("should run the afters, but not the subject", func() {
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "outer bef A",
+ "outer bef B",
+ "inner bef A",
+ "inner bef B",
+ "outer jusbef A",
+ "outer jusbef B",
+ "inner jusaft A",
+ "inner jusaft B",
+ "outer jusaft A",
+ "outer jusaft B",
+ "inner aft A",
+ "inner aft B",
+ "outer aft A",
+ "outer aft B",
+ }))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("outer jusbef B"))
+ })
+ })
+
+ Context("when a just after each fails", func() {
+ BeforeEach(func() {
+ failingNodes["outer jusaft A"] = true
+ })
+
+ It("should run all other afters, but mark the test as failed", func() {
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "outer bef A",
+ "outer bef B",
+ "inner bef A",
+ "inner bef B",
+ "outer jusbef A",
+ "outer jusbef B",
+ "inner jusbef A",
+ "inner jusbef B",
+ "it node",
+ "inner jusaft A",
+ "inner jusaft B",
+ "outer jusaft A",
+ "outer jusaft B",
+ "inner aft A",
+ "inner aft B",
+ "outer aft A",
+ "outer aft B",
+ }))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("outer jusaft A"))
+ })
+ })
+
+ Context("when an after fails after an earlier node has failed", func() {
+ BeforeEach(func() {
+ failingNodes["it node"] = true
+ failingNodes["inner aft B"] = true
+ })
+
+ It("should record the earlier failure", func() {
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "outer bef A",
+ "outer bef B",
+ "inner bef A",
+ "inner bef B",
+ "outer jusbef A",
+ "outer jusbef B",
+ "inner jusbef A",
+ "inner jusbef B",
+ "it node",
+ "inner jusaft A",
+ "inner jusaft B",
+ "outer jusaft A",
+ "outer jusaft B",
+ "inner aft A",
+ "inner aft B",
+ "outer aft A",
+ "outer aft B",
+ }))
+ Ω(spec.Summary("").Failure.Message).Should(Equal("it node"))
+ })
+ })
+ })
+ })
+
+ Describe("running measurement specs", func() {
+ Context("when the measurement succeeds", func() {
+ It("should run N samples", func() {
+ spec = New(
+ newMeasure("measure node", noneFlag, false, 3),
+ containers(
+ newContainer("container", noneFlag,
+ newBef("bef A", false),
+ newJusBef("jusbef A", false),
+ newJusAft("jusaft A", false),
+ newAft("aft A", false),
+ ),
+ ),
+ false,
+ )
+ spec.Run(buffer)
+
+ Ω(spec.Passed()).Should(BeTrue())
+ Ω(spec.Failed()).Should(BeFalse())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "bef A",
+ "jusbef A",
+ "measure node",
+ "jusaft A",
+ "aft A",
+ "bef A",
+ "jusbef A",
+ "measure node",
+ "jusaft A",
+ "aft A",
+ "bef A",
+ "jusbef A",
+ "measure node",
+ "jusaft A",
+ "aft A",
+ }))
+ })
+ })
+
+ Context("when the measurement fails", func() {
+ It("should bail after the failure occurs", func() {
+ spec = New(
+ newMeasure("measure node", noneFlag, true, 3),
+ containers(
+ newContainer("container", noneFlag,
+ newBef("bef A", false),
+ newJusBef("jusbef A", false),
+ newJusAft("jusaft A", false),
+ newAft("aft A", false),
+ ),
+ ),
+ false,
+ )
+ spec.Run(buffer)
+
+ Ω(spec.Passed()).Should(BeFalse())
+ Ω(spec.Failed()).Should(BeTrue())
+ Ω(nodesThatRan).Should(Equal([]string{
+ "bef A",
+ "jusbef A",
+ "measure node",
+ "jusaft A",
+ "aft A",
+ }))
+ })
+ })
+ })
+
+ Describe("Summary", func() {
+ var (
+ subjectCodeLocation types.CodeLocation
+ outerContainerCodeLocation types.CodeLocation
+ innerContainerCodeLocation types.CodeLocation
+ summary *types.SpecSummary
+ )
+
+ BeforeEach(func() {
+ subjectCodeLocation = codelocation.New(0)
+ outerContainerCodeLocation = codelocation.New(0)
+ innerContainerCodeLocation = codelocation.New(0)
+
+ spec = New(
+ leafnodes.NewItNode("it node", func() {
+ time.Sleep(10 * time.Millisecond)
+ }, noneFlag, subjectCodeLocation, 0, failer, 0),
+ containers(
+ containernode.New("outer container", noneFlag, outerContainerCodeLocation),
+ containernode.New("inner container", noneFlag, innerContainerCodeLocation),
+ ),
+ false,
+ )
+
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeTrue())
+ summary = spec.Summary("suite id")
+ })
+
+ It("should have the suite id", func() {
+ Ω(summary.SuiteID).Should(Equal("suite id"))
+ })
+
+ It("should have the component texts and code locations", func() {
+ Ω(summary.ComponentTexts).Should(Equal([]string{"outer container", "inner container", "it node"}))
+ Ω(summary.ComponentCodeLocations).Should(Equal([]types.CodeLocation{outerContainerCodeLocation, innerContainerCodeLocation, subjectCodeLocation}))
+ })
+
+ It("should have a runtime", func() {
+ Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond))
+ })
+
+ It("should have a runtime which remains consistent after spec run", func() {
+ totalRunTime := summary.RunTime
+ Ω(totalRunTime).Should(BeNumerically(">=", 10*time.Millisecond))
+
+ Consistently(func() time.Duration { return spec.Summary("suite id").RunTime }).Should(Equal(totalRunTime))
+ })
+
+ It("should not be a measurement, or have a measurement summary", func() {
+ Ω(summary.IsMeasurement).Should(BeFalse())
+ Ω(summary.Measurements).Should(BeEmpty())
+ })
+ })
+
+ Describe("Summaries for measurements", func() {
+ var summary *types.SpecSummary
+
+ BeforeEach(func() {
+ spec = New(leafnodes.NewMeasureNode("measure node", func(b Benchmarker) {
+ b.RecordValue("a value", 7, "some info")
+ b.RecordValueWithPrecision("another value", 8, "ns", 5, "more info")
+ }, noneFlag, codeLocation, 4, failer, 0), containers(), false)
+ spec.Run(buffer)
+ Ω(spec.Passed()).Should(BeTrue())
+ summary = spec.Summary("suite id")
+ })
+
+ It("should include the number of samples", func() {
+ Ω(summary.NumberOfSamples).Should(Equal(4))
+ })
+
+ It("should be a measurement", func() {
+ Ω(summary.IsMeasurement).Should(BeTrue())
+ })
+
+ It("should have the measurements report", func() {
+ Ω(summary.Measurements).Should(HaveKey("a value"))
+ report := summary.Measurements["a value"]
+ Ω(report.Name).Should(Equal("a value"))
+ Ω(report.Info).Should(Equal("some info"))
+ Ω(report.Results).Should(Equal([]float64{7, 7, 7, 7}))
+
+ Ω(summary.Measurements).Should(HaveKey("another value"))
+ report = summary.Measurements["another value"]
+ Ω(report.Name).Should(Equal("another value"))
+ Ω(report.Info).Should(Equal("more info"))
+ Ω(report.Results).Should(Equal([]float64{8, 8, 8, 8}))
+ Ω(report.Units).Should(Equal("ns"))
+ Ω(report.Precision).Should(Equal(5))
+ })
+ })
+
+ Describe("When told to emit progress", func() {
+ It("should emit progress to the writer as it runs Befores, JustBefores, Afters, and Its", func() {
+ spec = New(
+ newIt("it node", noneFlag, false),
+ containers(
+ newContainer("outer container", noneFlag,
+ newBef("outer bef A", false),
+ newJusBef("outer jusbef A", false),
+ newJusAft("outer jusaft A", false),
+ newAft("outer aft A", false),
+ ),
+ newContainer("inner container", noneFlag,
+ newBef("inner bef A", false),
+ newJusBef("inner jusbef A", false),
+ newJusAft("inner jusaft A", false),
+ newAft("inner aft A", false),
+ ),
+ ),
+ true,
+ )
+ spec.Run(buffer)
+
+ Ω(buffer).Should(gbytes.Say(`\[BeforeEach\] outer container`))
+ Ω(buffer).Should(gbytes.Say(`\[BeforeEach\] inner container`))
+ Ω(buffer).Should(gbytes.Say(`\[JustBeforeEach\] outer container`))
+ Ω(buffer).Should(gbytes.Say(`\[JustBeforeEach\] inner container`))
+ Ω(buffer).Should(gbytes.Say(`\[It\] it node`))
+ Ω(buffer).Should(gbytes.Say(`\[JustAfterEach\] inner container`))
+ Ω(buffer).Should(gbytes.Say(`\[JustAfterEach\] outer container`))
+ Ω(buffer).Should(gbytes.Say(`\[AfterEach\] inner container`))
+ Ω(buffer).Should(gbytes.Say(`\[AfterEach\] outer container`))
+ })
+
+ It("should emit progress to the writer as it runs Befores, JustBefores, JustAfters, Afters, and Measures", func() {
+ spec = New(
+ newMeasure("measure node", noneFlag, false, 2),
+ containers(),
+ true,
+ )
+ spec.Run(buffer)
+
+ Ω(buffer).Should(gbytes.Say(`\[Measure\] measure node`))
+ Ω(buffer).Should(gbytes.Say(`\[Measure\] measure node`))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
new file mode 100644
index 000000000..006185ab5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
@@ -0,0 +1,123 @@
+package spec
+
+import (
+ "math/rand"
+ "regexp"
+ "sort"
+)
+
+type Specs struct {
+ specs []*Spec
+ hasProgrammaticFocus bool
+ RegexScansFilePath bool
+}
+
+func NewSpecs(specs []*Spec) *Specs {
+ return &Specs{
+ specs: specs,
+ }
+}
+
+func (e *Specs) Specs() []*Spec {
+ return e.specs
+}
+
+func (e *Specs) HasProgrammaticFocus() bool {
+ return e.hasProgrammaticFocus
+}
+
+func (e *Specs) Shuffle(r *rand.Rand) {
+ sort.Sort(e)
+ permutation := r.Perm(len(e.specs))
+ shuffledSpecs := make([]*Spec, len(e.specs))
+ for i, j := range permutation {
+ shuffledSpecs[i] = e.specs[j]
+ }
+ e.specs = shuffledSpecs
+}
+
+func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
+ if focusString == "" && skipString == "" {
+ e.applyProgrammaticFocus()
+ } else {
+ e.applyRegExpFocusAndSkip(description, focusString, skipString)
+ }
+}
+
+func (e *Specs) applyProgrammaticFocus() {
+ e.hasProgrammaticFocus = false
+ for _, spec := range e.specs {
+ if spec.Focused() && !spec.Pending() {
+ e.hasProgrammaticFocus = true
+ break
+ }
+ }
+
+ if e.hasProgrammaticFocus {
+ for _, spec := range e.specs {
+ if !spec.Focused() {
+ spec.Skip()
+ }
+ }
+ }
+}
+
+// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
+// this is the place which we append to.
+func (e *Specs) toMatch(description string, spec *Spec) []byte {
+ if e.RegexScansFilePath {
+ return []byte(
+ description + " " +
+ spec.ConcatenatedString() + " " +
+ spec.subject.CodeLocation().FileName)
+ } else {
+ return []byte(
+ description + " " +
+ spec.ConcatenatedString())
+ }
+}
+
+func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) {
+ for _, spec := range e.specs {
+ matchesFocus := true
+ matchesSkip := false
+
+ toMatch := e.toMatch(description, spec)
+
+ if focusString != "" {
+ focusFilter := regexp.MustCompile(focusString)
+ matchesFocus = focusFilter.Match([]byte(toMatch))
+ }
+
+ if skipString != "" {
+ skipFilter := regexp.MustCompile(skipString)
+ matchesSkip = skipFilter.Match([]byte(toMatch))
+ }
+
+ if !matchesFocus || matchesSkip {
+ spec.Skip()
+ }
+ }
+}
+
+func (e *Specs) SkipMeasurements() {
+ for _, spec := range e.specs {
+ if spec.IsMeasurement() {
+ spec.Skip()
+ }
+ }
+}
+
+//sort.Interface
+
+func (e *Specs) Len() int {
+ return len(e.specs)
+}
+
+func (e *Specs) Less(i, j int) bool {
+ return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
+}
+
+func (e *Specs) Swap(i, j int) {
+ e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go
new file mode 100644
index 000000000..066fbbb3a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go
@@ -0,0 +1,287 @@
+package spec_test
+
+import (
+ "math/rand"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/spec"
+ . "github.com/onsi/gomega"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/internal/containernode"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("Specs", func() {
+ var specs *Specs
+
+ newSpec := func(text string, flag types.FlagType) *Spec {
+ subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
+ return New(subject, []*containernode.ContainerNode{}, false)
+ }
+
+ newMeasureSpec := func(text string, flag types.FlagType) *Spec {
+ subject := leafnodes.NewMeasureNode(text, func(Benchmarker) {}, flag, codelocation.New(0), 0, nil, 0)
+ return New(subject, []*containernode.ContainerNode{}, false)
+ }
+
+ newSpecs := func(args ...interface{}) *Specs {
+ specs := []*Spec{}
+ for index := 0; index < len(args)-1; index += 2 {
+ specs = append(specs, newSpec(args[index].(string), args[index+1].(types.FlagType)))
+ }
+ return NewSpecs(specs)
+ }
+
+ specTexts := func(specs *Specs) []string {
+ texts := []string{}
+ for _, spec := range specs.Specs() {
+ texts = append(texts, spec.ConcatenatedString())
+ }
+ return texts
+ }
+
+ willRunTexts := func(specs *Specs) []string {
+ texts := []string{}
+ for _, spec := range specs.Specs() {
+ if !(spec.Skipped() || spec.Pending()) {
+ texts = append(texts, spec.ConcatenatedString())
+ }
+ }
+ return texts
+ }
+
+ skippedTexts := func(specs *Specs) []string {
+ texts := []string{}
+ for _, spec := range specs.Specs() {
+ if spec.Skipped() {
+ texts = append(texts, spec.ConcatenatedString())
+ }
+ }
+ return texts
+ }
+
+ pendingTexts := func(specs *Specs) []string {
+ texts := []string{}
+ for _, spec := range specs.Specs() {
+ if spec.Pending() {
+ texts = append(texts, spec.ConcatenatedString())
+ }
+ }
+ return texts
+ }
+
+ Describe("Shuffling specs", func() {
+ It("should shuffle the specs using the passed in randomizer", func() {
+ specs17 := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag)
+ specs17.Shuffle(rand.New(rand.NewSource(17)))
+ texts17 := specTexts(specs17)
+
+ specs17Again := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag)
+ specs17Again.Shuffle(rand.New(rand.NewSource(17)))
+ texts17Again := specTexts(specs17Again)
+
+ specs15 := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag)
+ specs15.Shuffle(rand.New(rand.NewSource(15)))
+ texts15 := specTexts(specs15)
+
+ specsUnshuffled := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag)
+ textsUnshuffled := specTexts(specsUnshuffled)
+
+ Ω(textsUnshuffled).Should(Equal([]string{"C", "A", "B"}))
+
+ Ω(texts17).Should(Equal(texts17Again))
+ Ω(texts17).ShouldNot(Equal(texts15))
+ Ω(texts17).ShouldNot(Equal(textsUnshuffled))
+ Ω(texts15).ShouldNot(Equal(textsUnshuffled))
+
+ Ω(texts17).Should(HaveLen(3))
+ Ω(texts17).Should(ContainElement("A"))
+ Ω(texts17).Should(ContainElement("B"))
+ Ω(texts17).Should(ContainElement("C"))
+
+ Ω(texts15).Should(HaveLen(3))
+ Ω(texts15).Should(ContainElement("A"))
+ Ω(texts15).Should(ContainElement("B"))
+ Ω(texts15).Should(ContainElement("C"))
+ })
+ })
+
+ Describe("with no programmatic focus", func() {
+ BeforeEach(func() {
+ specs = newSpecs("A1", noneFlag, "A2", noneFlag, "B1", noneFlag, "B2", pendingFlag)
+ specs.ApplyFocus("", "", "")
+ })
+
+ It("should not report as having programmatic specs", func() {
+ Ω(specs.HasProgrammaticFocus()).Should(BeFalse())
+ })
+ })
+
+ Describe("Applying focus/skip", func() {
+ var description, focusString, skipString string
+
+ BeforeEach(func() {
+ description, focusString, skipString = "", "", ""
+ })
+
+ JustBeforeEach(func() {
+ specs = newSpecs("A1", focusedFlag, "A2", noneFlag, "B1", focusedFlag, "B2", pendingFlag)
+ specs.ApplyFocus(description, focusString, skipString)
+ })
+
+ Context("with neither a focus string nor a skip string", func() {
+ It("should apply the programmatic focus", func() {
+ Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "B1"}))
+ Ω(skippedTexts(specs)).Should(Equal([]string{"A2", "B2"}))
+ Ω(pendingTexts(specs)).Should(BeEmpty())
+ })
+
+ It("should report as having programmatic specs", func() {
+ Ω(specs.HasProgrammaticFocus()).Should(BeTrue())
+ })
+ })
+
+ Context("with a focus regexp", func() {
+ BeforeEach(func() {
+ focusString = "A"
+ })
+
+ It("should override the programmatic focus", func() {
+ Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "A2"}))
+ Ω(skippedTexts(specs)).Should(Equal([]string{"B1", "B2"}))
+ Ω(pendingTexts(specs)).Should(BeEmpty())
+ })
+
+ It("should not report as having programmatic specs", func() {
+ Ω(specs.HasProgrammaticFocus()).Should(BeFalse())
+ })
+ })
+
+ Context("with a focus regexp", func() {
+ BeforeEach(func() {
+ focusString = "B"
+ })
+
+ It("should not override any pendings", func() {
+ Ω(willRunTexts(specs)).Should(Equal([]string{"B1"}))
+ Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2"}))
+ Ω(pendingTexts(specs)).Should(Equal([]string{"B2"}))
+ })
+ })
+
+ Context("with a description", func() {
+ BeforeEach(func() {
+ description = "C"
+ focusString = "C"
+ })
+
+ It("should include the description in the focus determination", func() {
+ Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "A2", "B1"}))
+ Ω(skippedTexts(specs)).Should(BeEmpty())
+ Ω(pendingTexts(specs)).Should(Equal([]string{"B2"}))
+ })
+ })
+
+ Context("with a description", func() {
+ BeforeEach(func() {
+ description = "C"
+ skipString = "C"
+ })
+
+ It("should include the description in the focus determination", func() {
+ Ω(willRunTexts(specs)).Should(BeEmpty())
+ Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2", "B1", "B2"}))
+ Ω(pendingTexts(specs)).Should(BeEmpty())
+ })
+ })
+
+ Context("with a skip regexp", func() {
+ BeforeEach(func() {
+ skipString = "A"
+ })
+
+ It("should override the programmatic focus", func() {
+ Ω(willRunTexts(specs)).Should(Equal([]string{"B1"}))
+ Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2"}))
+ Ω(pendingTexts(specs)).Should(Equal([]string{"B2"}))
+ })
+
+ It("should not report as having programmatic specs", func() {
+ Ω(specs.HasProgrammaticFocus()).Should(BeFalse())
+ })
+ })
+
+ Context("with both a focus and a skip regexp", func() {
+ BeforeEach(func() {
+ focusString = "1"
+ skipString = "B"
+ })
+
+ It("should AND the two", func() {
+ Ω(willRunTexts(specs)).Should(Equal([]string{"A1"}))
+ Ω(skippedTexts(specs)).Should(Equal([]string{"A2", "B1", "B2"}))
+ Ω(pendingTexts(specs)).Should(BeEmpty())
+ })
+
+ It("should not report as having programmatic specs", func() {
+ Ω(specs.HasProgrammaticFocus()).Should(BeFalse())
+ })
+ })
+ })
+
+ Describe("With a focused spec within a pending context and a pending spec within a focused context", func() {
+ BeforeEach(func() {
+ pendingInFocused := New(
+ leafnodes.NewItNode("PendingInFocused", func() {}, pendingFlag, codelocation.New(0), 0, nil, 0),
+ []*containernode.ContainerNode{
+ containernode.New("", focusedFlag, codelocation.New(0)),
+ }, false)
+
+ focusedInPending := New(
+ leafnodes.NewItNode("FocusedInPending", func() {}, focusedFlag, codelocation.New(0), 0, nil, 0),
+ []*containernode.ContainerNode{
+ containernode.New("", pendingFlag, codelocation.New(0)),
+ }, false)
+
+ specs = NewSpecs([]*Spec{
+ newSpec("A", noneFlag),
+ newSpec("B", noneFlag),
+ pendingInFocused,
+ focusedInPending,
+ })
+ specs.ApplyFocus("", "", "")
+ })
+
+ It("should not have a programmatic focus and should run all tests", func() {
+ Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B"}))
+ Ω(skippedTexts(specs)).Should(BeEmpty())
+ Ω(pendingTexts(specs)).Should(ConsistOf(ContainSubstring("PendingInFocused"), ContainSubstring("FocusedInPending")))
+ })
+ })
+
+ Describe("skipping measurements", func() {
+ BeforeEach(func() {
+ specs = NewSpecs([]*Spec{
+ newSpec("A", noneFlag),
+ newSpec("B", noneFlag),
+ newSpec("C", pendingFlag),
+ newMeasureSpec("measurementA", noneFlag),
+ newMeasureSpec("measurementB", pendingFlag),
+ })
+ })
+
+ It("should skip measurements", func() {
+ Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B", "measurementA"}))
+ Ω(skippedTexts(specs)).Should(BeEmpty())
+ Ω(pendingTexts(specs)).Should(Equal([]string{"C", "measurementB"}))
+
+ specs.SkipMeasurements()
+
+ Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B"}))
+ Ω(skippedTexts(specs)).Should(Equal([]string{"measurementA", "measurementB"}))
+ Ω(pendingTexts(specs)).Should(Equal([]string{"C"}))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
new file mode 100644
index 000000000..82272554a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
@@ -0,0 +1,55 @@
+package spec_iterator
+
+func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
+ if length == 0 {
+ return 0, 0
+ }
+
+ // We have more nodes than tests. Trivial case.
+ if parallelTotal >= length {
+ if parallelNode > length {
+ return 0, 0
+ } else {
+ return parallelNode - 1, 1
+ }
+ }
+
+ // This is the minimum amount of tests that a node will be required to run
+ minTestsPerNode := length / parallelTotal
+
+ // This is the maximum amount of tests that a node will be required to run
+ // The algorithm guarantees that this would be equal to at least the minimum amount
+ // and at most one more
+ maxTestsPerNode := minTestsPerNode
+ if length%parallelTotal != 0 {
+ maxTestsPerNode++
+ }
+
+ // Number of nodes that will have to run the maximum amount of tests per node
+ numMaxLoadNodes := length % parallelTotal
+
+ // Number of nodes that precede the current node and will have to run the maximum amount of tests per node
+ var numPrecedingMaxLoadNodes int
+ if parallelNode > numMaxLoadNodes {
+ numPrecedingMaxLoadNodes = numMaxLoadNodes
+ } else {
+ numPrecedingMaxLoadNodes = parallelNode - 1
+ }
+
+ // Number of nodes that precede the current node and will have to run the minimum amount of tests per node
+ var numPrecedingMinLoadNodes int
+ if parallelNode <= numMaxLoadNodes {
+ numPrecedingMinLoadNodes = 0
+ } else {
+ numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
+ }
+
+ // Evaluate the test start index and number of tests to run
+ startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
+ if parallelNode > numMaxLoadNodes {
+ count = minTestsPerNode
+ } else {
+ count = maxTestsPerNode
+ }
+ return
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go
new file mode 100644
index 000000000..65da9837c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go
@@ -0,0 +1,149 @@
+package spec_iterator_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/spec_iterator"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("ParallelizedIndexRange", func() {
+ var startIndex, count int
+
+ It("should return the correct index range for 4 tests on 2 nodes", func() {
+ startIndex, count = ParallelizedIndexRange(4, 2, 1)
+ Ω(startIndex).Should(Equal(0))
+ Ω(count).Should(Equal(2))
+
+ startIndex, count = ParallelizedIndexRange(4, 2, 2)
+ Ω(startIndex).Should(Equal(2))
+ Ω(count).Should(Equal(2))
+ })
+
+ It("should return the correct index range for 5 tests on 2 nodes", func() {
+ startIndex, count = ParallelizedIndexRange(5, 2, 1)
+ Ω(startIndex).Should(Equal(0))
+ Ω(count).Should(Equal(3))
+
+ startIndex, count = ParallelizedIndexRange(5, 2, 2)
+ Ω(startIndex).Should(Equal(3))
+ Ω(count).Should(Equal(2))
+ })
+
+ It("should return the correct index range for 5 tests on 3 nodes", func() {
+ startIndex, count = ParallelizedIndexRange(5, 3, 1)
+ Ω(startIndex).Should(Equal(0))
+ Ω(count).Should(Equal(2))
+
+ startIndex, count = ParallelizedIndexRange(5, 3, 2)
+ Ω(startIndex).Should(Equal(2))
+ Ω(count).Should(Equal(2))
+
+ startIndex, count = ParallelizedIndexRange(5, 3, 3)
+ Ω(startIndex).Should(Equal(4))
+ Ω(count).Should(Equal(1))
+ })
+
+ It("should return the correct index range for 5 tests on 4 nodes", func() {
+ startIndex, count = ParallelizedIndexRange(5, 4, 1)
+ Ω(startIndex).Should(Equal(0))
+ Ω(count).Should(Equal(2))
+
+ startIndex, count = ParallelizedIndexRange(5, 4, 2)
+ Ω(startIndex).Should(Equal(2))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 4, 3)
+ Ω(startIndex).Should(Equal(3))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 4, 4)
+ Ω(startIndex).Should(Equal(4))
+ Ω(count).Should(Equal(1))
+ })
+
+ It("should return the correct index range for 5 tests on 5 nodes", func() {
+ startIndex, count = ParallelizedIndexRange(5, 5, 1)
+ Ω(startIndex).Should(Equal(0))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 5, 2)
+ Ω(startIndex).Should(Equal(1))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 5, 3)
+ Ω(startIndex).Should(Equal(2))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 5, 4)
+ Ω(startIndex).Should(Equal(3))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 5, 5)
+ Ω(startIndex).Should(Equal(4))
+ Ω(count).Should(Equal(1))
+ })
+
+ It("should return the correct index range for 5 tests on 6 nodes", func() {
+ startIndex, count = ParallelizedIndexRange(5, 6, 1)
+ Ω(startIndex).Should(Equal(0))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 6, 2)
+ Ω(startIndex).Should(Equal(1))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 6, 3)
+ Ω(startIndex).Should(Equal(2))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 6, 4)
+ Ω(startIndex).Should(Equal(3))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 6, 5)
+ Ω(startIndex).Should(Equal(4))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(5, 6, 6)
+ Ω(count).Should(Equal(0))
+ })
+
+ It("should return the correct index range for 5 tests on 7 nodes", func() {
+ startIndex, count = ParallelizedIndexRange(5, 7, 6)
+ Ω(count).Should(Equal(0))
+
+ startIndex, count = ParallelizedIndexRange(5, 7, 7)
+ Ω(count).Should(Equal(0))
+ })
+
+ It("should return the correct index range for 11 tests on 7 nodes", func() {
+ startIndex, count = ParallelizedIndexRange(11, 7, 1)
+ Ω(startIndex).Should(Equal(0))
+ Ω(count).Should(Equal(2))
+
+ startIndex, count = ParallelizedIndexRange(11, 7, 2)
+ Ω(startIndex).Should(Equal(2))
+ Ω(count).Should(Equal(2))
+
+ startIndex, count = ParallelizedIndexRange(11, 7, 3)
+ Ω(startIndex).Should(Equal(4))
+ Ω(count).Should(Equal(2))
+
+ startIndex, count = ParallelizedIndexRange(11, 7, 4)
+ Ω(startIndex).Should(Equal(6))
+ Ω(count).Should(Equal(2))
+
+ startIndex, count = ParallelizedIndexRange(11, 7, 5)
+ Ω(startIndex).Should(Equal(8))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(11, 7, 6)
+ Ω(startIndex).Should(Equal(9))
+ Ω(count).Should(Equal(1))
+
+ startIndex, count = ParallelizedIndexRange(11, 7, 7)
+ Ω(startIndex).Should(Equal(10))
+ Ω(count).Should(Equal(1))
+ })
+
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
new file mode 100644
index 000000000..99f548bca
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
@@ -0,0 +1,59 @@
+package spec_iterator
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/onsi/ginkgo/internal/spec"
+)
+
+type ParallelIterator struct {
+ specs []*spec.Spec
+ host string
+ client *http.Client
+}
+
+func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
+ return &ParallelIterator{
+ specs: specs,
+ host: host,
+ client: &http.Client{},
+ }
+}
+
+func (s *ParallelIterator) Next() (*spec.Spec, error) {
+ resp, err := s.client.Get(s.host + "/counter")
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
+ }
+
+ var counter Counter
+ err = json.NewDecoder(resp.Body).Decode(&counter)
+ if err != nil {
+ return nil, err
+ }
+
+ if counter.Index >= len(s.specs) {
+ return nil, ErrClosed
+ }
+
+ return s.specs[counter.Index], nil
+}
+
+func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
+ return len(s.specs)
+}
+
+func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
+ return -1, false
+}
+
+func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
+ return -1, false
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
new file mode 100644
index 000000000..c5a762fd5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
@@ -0,0 +1,112 @@
+package spec_iterator_test
+
+import (
+ "net/http"
+
+ . "github.com/onsi/ginkgo/internal/spec_iterator"
+ "github.com/onsi/gomega/ghttp"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/internal/containernode"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/internal/spec"
+ "github.com/onsi/ginkgo/types"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("ParallelSpecIterator", func() {
+ var specs []*spec.Spec
+ var iterator *ParallelIterator
+ var server *ghttp.Server
+
+ newSpec := func(text string, flag types.FlagType) *spec.Spec {
+ subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
+ return spec.New(subject, []*containernode.ContainerNode{}, false)
+ }
+
+ BeforeEach(func() {
+ specs = []*spec.Spec{
+ newSpec("A", types.FlagTypePending),
+ newSpec("B", types.FlagTypeNone),
+ newSpec("C", types.FlagTypeNone),
+ newSpec("D", types.FlagTypeNone),
+ }
+ specs[3].Skip()
+
+ server = ghttp.NewServer()
+
+ iterator = NewParallelIterator(specs, "http://"+server.Addr())
+ })
+
+ AfterEach(func() {
+ server.Close()
+ })
+
+ It("should report the total number of specs", func() {
+ Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
+ })
+
+ It("should not report the number to be processed", func() {
+ n, known := iterator.NumberOfSpecsToProcessIfKnown()
+ Ω(n).Should(Equal(-1))
+ Ω(known).Should(BeFalse())
+ })
+
+ It("should not report the number that will be run", func() {
+ n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
+ Ω(n).Should(Equal(-1))
+ Ω(known).Should(BeFalse())
+ })
+
+ Describe("iterating", func() {
+ Describe("when the server returns well-formed responses", func() {
+ BeforeEach(func() {
+ server.AppendHandlers(
+ ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 0}),
+ ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 1}),
+ ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 3}),
+ ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 4}),
+ )
+ })
+
+ It("should return the specs in question", func() {
+ Ω(iterator.Next()).Should(Equal(specs[0]))
+ Ω(iterator.Next()).Should(Equal(specs[1]))
+ Ω(iterator.Next()).Should(Equal(specs[3]))
+ spec, err := iterator.Next()
+ Ω(spec).Should(BeNil())
+ Ω(err).Should(MatchError(ErrClosed))
+ })
+ })
+
+ Describe("when the server 404s", func() {
+ BeforeEach(func() {
+ server.AppendHandlers(
+ ghttp.RespondWith(http.StatusNotFound, ""),
+ )
+ })
+
+ It("should return an error", func() {
+ spec, err := iterator.Next()
+ Ω(spec).Should(BeNil())
+ Ω(err).Should(MatchError("unexpected status code 404"))
+ })
+ })
+
+ Describe("when the server returns gibberish", func() {
+ BeforeEach(func() {
+ server.AppendHandlers(
+ ghttp.RespondWith(http.StatusOK, "ß"),
+ )
+ })
+
+ It("should error", func() {
+ spec, err := iterator.Next()
+ Ω(spec).Should(BeNil())
+ Ω(err).ShouldNot(BeNil())
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
new file mode 100644
index 000000000..a51c93b8b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
@@ -0,0 +1,45 @@
+package spec_iterator
+
+import (
+ "github.com/onsi/ginkgo/internal/spec"
+)
+
+type SerialIterator struct {
+ specs []*spec.Spec
+ index int
+}
+
+func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
+ return &SerialIterator{
+ specs: specs,
+ index: 0,
+ }
+}
+
+func (s *SerialIterator) Next() (*spec.Spec, error) {
+ if s.index >= len(s.specs) {
+ return nil, ErrClosed
+ }
+
+ spec := s.specs[s.index]
+ s.index += 1
+ return spec, nil
+}
+
+func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
+ return len(s.specs)
+}
+
+func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
+ return len(s.specs), true
+}
+
+func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
+ count := 0
+ for _, s := range s.specs {
+ if !s.Skipped() && !s.Pending() {
+ count += 1
+ }
+ }
+ return count, true
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go
new file mode 100644
index 000000000..dde4a344e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go
@@ -0,0 +1,64 @@
+package spec_iterator_test
+
+import (
+ . "github.com/onsi/ginkgo/internal/spec_iterator"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/internal/containernode"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/internal/spec"
+ "github.com/onsi/ginkgo/types"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("SerialSpecIterator", func() {
+ var specs []*spec.Spec
+ var iterator *SerialIterator
+
+ newSpec := func(text string, flag types.FlagType) *spec.Spec {
+ subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
+ return spec.New(subject, []*containernode.ContainerNode{}, false)
+ }
+
+ BeforeEach(func() {
+ specs = []*spec.Spec{
+ newSpec("A", types.FlagTypePending),
+ newSpec("B", types.FlagTypeNone),
+ newSpec("C", types.FlagTypeNone),
+ newSpec("D", types.FlagTypeNone),
+ }
+ specs[3].Skip()
+
+ iterator = NewSerialIterator(specs)
+ })
+
+ It("should report the total number of specs", func() {
+ Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
+ })
+
+ It("should report the number to be processed", func() {
+ n, known := iterator.NumberOfSpecsToProcessIfKnown()
+ Ω(n).Should(Equal(4))
+ Ω(known).Should(BeTrue())
+ })
+
+ It("should report the number that will be run", func() {
+ n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
+ Ω(n).Should(Equal(2))
+ Ω(known).Should(BeTrue())
+ })
+
+ Describe("iterating", func() {
+ It("should return the specs in order", func() {
+ Ω(iterator.Next()).Should(Equal(specs[0]))
+ Ω(iterator.Next()).Should(Equal(specs[1]))
+ Ω(iterator.Next()).Should(Equal(specs[2]))
+ Ω(iterator.Next()).Should(Equal(specs[3]))
+ spec, err := iterator.Next()
+ Ω(spec).Should(BeNil())
+ Ω(err).Should(MatchError(ErrClosed))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
new file mode 100644
index 000000000..ad4a3ea3c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
@@ -0,0 +1,47 @@
+package spec_iterator
+
+import "github.com/onsi/ginkgo/internal/spec"
+
+type ShardedParallelIterator struct {
+ specs []*spec.Spec
+ index int
+ maxIndex int
+}
+
+func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
+ startIndex, count := ParallelizedIndexRange(len(specs), total, node)
+
+ return &ShardedParallelIterator{
+ specs: specs,
+ index: startIndex,
+ maxIndex: startIndex + count,
+ }
+}
+
+func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
+ if s.index >= s.maxIndex {
+ return nil, ErrClosed
+ }
+
+ spec := s.specs[s.index]
+ s.index += 1
+ return spec, nil
+}
+
+func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
+ return len(s.specs)
+}
+
+func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
+ return s.maxIndex - s.index, true
+}
+
+func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
+ count := 0
+ for i := s.index; i < s.maxIndex; i += 1 {
+ if !s.specs[i].Skipped() && !s.specs[i].Pending() {
+ count += 1
+ }
+ }
+ return count, true
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go
new file mode 100644
index 000000000..c3786e03a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go
@@ -0,0 +1,62 @@
+package spec_iterator_test
+
+import (
+ . "github.com/onsi/ginkgo/internal/spec_iterator"
+
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/internal/containernode"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/internal/spec"
+ "github.com/onsi/ginkgo/types"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("ShardedParallelSpecIterator", func() {
+ var specs []*spec.Spec
+ var iterator *ShardedParallelIterator
+
+ newSpec := func(text string, flag types.FlagType) *spec.Spec {
+ subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0)
+ return spec.New(subject, []*containernode.ContainerNode{}, false)
+ }
+
+ BeforeEach(func() {
+ specs = []*spec.Spec{
+ newSpec("A", types.FlagTypePending),
+ newSpec("B", types.FlagTypeNone),
+ newSpec("C", types.FlagTypeNone),
+ newSpec("D", types.FlagTypeNone),
+ }
+ specs[3].Skip()
+
+ iterator = NewShardedParallelIterator(specs, 2, 1)
+ })
+
+ It("should report the total number of specs", func() {
+ Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4))
+ })
+
+ It("should report the number to be processed", func() {
+ n, known := iterator.NumberOfSpecsToProcessIfKnown()
+ Ω(n).Should(Equal(2))
+ Ω(known).Should(BeTrue())
+ })
+
+ It("should report the number that will be run", func() {
+ n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown()
+ Ω(n).Should(Equal(1))
+ Ω(known).Should(BeTrue())
+ })
+
+ Describe("iterating", func() {
+ It("should return the specs in order", func() {
+ Ω(iterator.Next()).Should(Equal(specs[0]))
+ Ω(iterator.Next()).Should(Equal(specs[1]))
+ spec, err := iterator.Next()
+ Ω(spec).Should(BeNil())
+ Ω(err).Should(MatchError(ErrClosed))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
new file mode 100644
index 000000000..74bffad64
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
@@ -0,0 +1,20 @@
+package spec_iterator
+
+import (
+ "errors"
+
+ "github.com/onsi/ginkgo/internal/spec"
+)
+
+var ErrClosed = errors.New("no more specs to run")
+
+type SpecIterator interface {
+ Next() (*spec.Spec, error)
+ NumberOfSpecsPriorToIteration() int
+ NumberOfSpecsToProcessIfKnown() (int, bool)
+ NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
+}
+
+type Counter struct {
+ Index int `json:"index"`
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go
new file mode 100644
index 000000000..5c08a77e3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go
@@ -0,0 +1,13 @@
+package spec_iterator_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestSpecIterator(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "SpecIterator Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
new file mode 100644
index 000000000..a0b8b62d5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
@@ -0,0 +1,15 @@
+package specrunner
+
+import (
+ "crypto/rand"
+ "fmt"
+)
+
+func randomID() string {
+ b := make([]byte, 8)
+ _, err := rand.Read(b)
+ if err != nil {
+ return ""
+ }
+ return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
new file mode 100644
index 000000000..2c683cb8b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
@@ -0,0 +1,411 @@
+package specrunner
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+
+ "github.com/onsi/ginkgo/internal/spec_iterator"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/internal/spec"
+ Writer "github.com/onsi/ginkgo/internal/writer"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/types"
+
+ "time"
+)
+
+type SpecRunner struct {
+ description string
+ beforeSuiteNode leafnodes.SuiteNode
+ iterator spec_iterator.SpecIterator
+ afterSuiteNode leafnodes.SuiteNode
+ reporters []reporters.Reporter
+ startTime time.Time
+ suiteID string
+ runningSpec *spec.Spec
+ writer Writer.WriterInterface
+ config config.GinkgoConfigType
+ interrupted bool
+ processedSpecs []*spec.Spec
+ lock *sync.Mutex
+}
+
+func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
+ return &SpecRunner{
+ description: description,
+ beforeSuiteNode: beforeSuiteNode,
+ iterator: iterator,
+ afterSuiteNode: afterSuiteNode,
+ reporters: reporters,
+ writer: writer,
+ config: config,
+ suiteID: randomID(),
+ lock: &sync.Mutex{},
+ }
+}
+
+func (runner *SpecRunner) Run() bool {
+ if runner.config.DryRun {
+ runner.performDryRun()
+ return true
+ }
+
+ runner.reportSuiteWillBegin()
+ signalRegistered := make(chan struct{})
+ go runner.registerForInterrupts(signalRegistered)
+ <-signalRegistered
+
+ suitePassed := runner.runBeforeSuite()
+
+ if suitePassed {
+ suitePassed = runner.runSpecs()
+ }
+
+ runner.blockForeverIfInterrupted()
+
+ suitePassed = runner.runAfterSuite() && suitePassed
+
+ runner.reportSuiteDidEnd(suitePassed)
+
+ return suitePassed
+}
+
+func (runner *SpecRunner) performDryRun() {
+ runner.reportSuiteWillBegin()
+
+ if runner.beforeSuiteNode != nil {
+ summary := runner.beforeSuiteNode.Summary()
+ summary.State = types.SpecStatePassed
+ runner.reportBeforeSuite(summary)
+ }
+
+ for {
+ spec, err := runner.iterator.Next()
+ if err == spec_iterator.ErrClosed {
+ break
+ }
+ if err != nil {
+ fmt.Println("failed to iterate over tests:\n" + err.Error())
+ break
+ }
+
+ runner.processedSpecs = append(runner.processedSpecs, spec)
+
+ summary := spec.Summary(runner.suiteID)
+ runner.reportSpecWillRun(summary)
+ if summary.State == types.SpecStateInvalid {
+ summary.State = types.SpecStatePassed
+ }
+ runner.reportSpecDidComplete(summary, false)
+ }
+
+ if runner.afterSuiteNode != nil {
+ summary := runner.afterSuiteNode.Summary()
+ summary.State = types.SpecStatePassed
+ runner.reportAfterSuite(summary)
+ }
+
+ runner.reportSuiteDidEnd(true)
+}
+
+func (runner *SpecRunner) runBeforeSuite() bool {
+ if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
+ return true
+ }
+
+ runner.writer.Truncate()
+ conf := runner.config
+ passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
+ if !passed {
+ runner.writer.DumpOut()
+ }
+ runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
+ return passed
+}
+
+func (runner *SpecRunner) runAfterSuite() bool {
+ if runner.afterSuiteNode == nil {
+ return true
+ }
+
+ runner.writer.Truncate()
+ conf := runner.config
+ passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
+ if !passed {
+ runner.writer.DumpOut()
+ }
+ runner.reportAfterSuite(runner.afterSuiteNode.Summary())
+ return passed
+}
+
+func (runner *SpecRunner) runSpecs() bool {
+ suiteFailed := false
+ skipRemainingSpecs := false
+ for {
+ spec, err := runner.iterator.Next()
+ if err == spec_iterator.ErrClosed {
+ break
+ }
+ if err != nil {
+ fmt.Println("failed to iterate over tests:\n" + err.Error())
+ suiteFailed = true
+ break
+ }
+
+ runner.processedSpecs = append(runner.processedSpecs, spec)
+
+ if runner.wasInterrupted() {
+ break
+ }
+ if skipRemainingSpecs {
+ spec.Skip()
+ }
+
+ if !spec.Skipped() && !spec.Pending() {
+ if passed := runner.runSpec(spec); !passed {
+ suiteFailed = true
+ }
+ } else if spec.Pending() && runner.config.FailOnPending {
+ runner.reportSpecWillRun(spec.Summary(runner.suiteID))
+ suiteFailed = true
+ runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
+ } else {
+ runner.reportSpecWillRun(spec.Summary(runner.suiteID))
+ runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
+ }
+
+ if spec.Failed() && runner.config.FailFast {
+ skipRemainingSpecs = true
+ }
+ }
+
+ return !suiteFailed
+}
+
+func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
+ maxAttempts := 1
+ if runner.config.FlakeAttempts > 0 {
+ // uninitialized configs count as 1
+ maxAttempts = runner.config.FlakeAttempts
+ }
+
+ for i := 0; i < maxAttempts; i++ {
+ runner.reportSpecWillRun(spec.Summary(runner.suiteID))
+ runner.runningSpec = spec
+ spec.Run(runner.writer)
+ runner.runningSpec = nil
+ runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
+ if !spec.Failed() {
+ return true
+ }
+ }
+ return false
+}
+
+func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
+ if runner.runningSpec == nil {
+ return nil, false
+ }
+
+ return runner.runningSpec.Summary(runner.suiteID), true
+}
+
+func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+ close(signalRegistered)
+
+ <-c
+ signal.Stop(c)
+ runner.markInterrupted()
+ go runner.registerForHardInterrupts()
+ runner.writer.DumpOutWithHeader(`
+Received interrupt. Emitting contents of GinkgoWriter...
+---------------------------------------------------------
+`)
+ if runner.afterSuiteNode != nil {
+ fmt.Fprint(os.Stderr, `
+---------------------------------------------------------
+Received interrupt. Running AfterSuite...
+^C again to terminate immediately
+`)
+ runner.runAfterSuite()
+ }
+ runner.reportSuiteDidEnd(false)
+ os.Exit(1)
+}
+
+func (runner *SpecRunner) registerForHardInterrupts() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+
+ <-c
+ fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
+ os.Exit(1)
+}
+
+func (runner *SpecRunner) blockForeverIfInterrupted() {
+ runner.lock.Lock()
+ interrupted := runner.interrupted
+ runner.lock.Unlock()
+
+ if interrupted {
+ select {}
+ }
+}
+
+func (runner *SpecRunner) markInterrupted() {
+ runner.lock.Lock()
+ defer runner.lock.Unlock()
+ runner.interrupted = true
+}
+
+func (runner *SpecRunner) wasInterrupted() bool {
+ runner.lock.Lock()
+ defer runner.lock.Unlock()
+ return runner.interrupted
+}
+
+func (runner *SpecRunner) reportSuiteWillBegin() {
+ runner.startTime = time.Now()
+ summary := runner.suiteWillBeginSummary()
+ for _, reporter := range runner.reporters {
+ reporter.SpecSuiteWillBegin(runner.config, summary)
+ }
+}
+
+func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
+ for _, reporter := range runner.reporters {
+ reporter.BeforeSuiteDidRun(summary)
+ }
+}
+
+func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
+ for _, reporter := range runner.reporters {
+ reporter.AfterSuiteDidRun(summary)
+ }
+}
+
+func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
+ runner.writer.Truncate()
+
+ for _, reporter := range runner.reporters {
+ reporter.SpecWillRun(summary)
+ }
+}
+
+func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
+ if failed && len(summary.CapturedOutput) == 0 {
+ summary.CapturedOutput = string(runner.writer.Bytes())
+ }
+ for i := len(runner.reporters) - 1; i >= 1; i-- {
+ runner.reporters[i].SpecDidComplete(summary)
+ }
+
+ if failed {
+ runner.writer.DumpOut()
+ }
+
+ runner.reporters[0].SpecDidComplete(summary)
+}
+
+func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
+ summary := runner.suiteDidEndSummary(success)
+ summary.RunTime = time.Since(runner.startTime)
+ for _, reporter := range runner.reporters {
+ reporter.SpecSuiteDidEnd(summary)
+ }
+}
+
+func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
+ count = 0
+
+ for _, spec := range runner.processedSpecs {
+ if filter(spec) {
+ count++
+ }
+ }
+
+ return count
+}
+
+func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
+ numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
+ return !ex.Skipped() && !ex.Pending()
+ })
+
+ numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
+ return ex.Pending()
+ })
+
+ numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
+ return ex.Skipped()
+ })
+
+ numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
+ return ex.Passed()
+ })
+
+ numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
+ return ex.Flaked()
+ })
+
+ numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
+ return ex.Failed()
+ })
+
+ if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
+ var known bool
+ numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
+ if !known {
+ numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
+ }
+ numberOfFailedSpecs = numberOfSpecsThatWillBeRun
+ }
+
+ return &types.SuiteSummary{
+ SuiteDescription: runner.description,
+ SuiteSucceeded: success,
+ SuiteID: runner.suiteID,
+
+ NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
+ NumberOfTotalSpecs: len(runner.processedSpecs),
+ NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
+ NumberOfPendingSpecs: numberOfPendingSpecs,
+ NumberOfSkippedSpecs: numberOfSkippedSpecs,
+ NumberOfPassedSpecs: numberOfPassedSpecs,
+ NumberOfFailedSpecs: numberOfFailedSpecs,
+ NumberOfFlakedSpecs: numberOfFlakedSpecs,
+ }
+}
+
+func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
+ numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
+ if !known {
+ numTotal = -1
+ }
+
+ numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
+ if !known {
+ numToRun = -1
+ }
+
+ return &types.SuiteSummary{
+ SuiteDescription: runner.description,
+ SuiteID: runner.suiteID,
+
+ NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
+ NumberOfTotalSpecs: numTotal,
+ NumberOfSpecsThatWillBeRun: numToRun,
+ NumberOfPendingSpecs: -1,
+ NumberOfSkippedSpecs: -1,
+ NumberOfPassedSpecs: -1,
+ NumberOfFailedSpecs: -1,
+ NumberOfFlakedSpecs: -1,
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go
new file mode 100644
index 000000000..c8388fb6f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go
@@ -0,0 +1,13 @@
+package specrunner_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestSpecRunner(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Spec Runner Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go
new file mode 100644
index 000000000..a41437922
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go
@@ -0,0 +1,785 @@
+package specrunner_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/internal/spec_iterator"
+ . "github.com/onsi/ginkgo/internal/specrunner"
+ "github.com/onsi/ginkgo/types"
+ . "github.com/onsi/gomega"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/internal/codelocation"
+ "github.com/onsi/ginkgo/internal/containernode"
+ Failer "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/internal/spec"
+ Writer "github.com/onsi/ginkgo/internal/writer"
+ "github.com/onsi/ginkgo/reporters"
+)
+
+var noneFlag = types.FlagTypeNone
+var pendingFlag = types.FlagTypePending
+
+var _ = Describe("Spec Runner", func() {
+ var (
+ reporter1 *reporters.FakeReporter
+ reporter2 *reporters.FakeReporter
+ failer *Failer.Failer
+ writer *Writer.FakeGinkgoWriter
+
+ thingsThatRan []string
+
+ runner *SpecRunner
+ )
+
+ newBefSuite := func(text string, fail bool) leafnodes.SuiteNode {
+ return leafnodes.NewBeforeSuiteNode(func() {
+ writer.AddEvent(text)
+ thingsThatRan = append(thingsThatRan, text)
+ if fail {
+ failer.Fail(text, codelocation.New(0))
+ }
+ }, codelocation.New(0), 0, failer)
+ }
+
+ newAftSuite := func(text string, fail bool) leafnodes.SuiteNode {
+ return leafnodes.NewAfterSuiteNode(func() {
+ writer.AddEvent(text)
+ thingsThatRan = append(thingsThatRan, text)
+ if fail {
+ failer.Fail(text, codelocation.New(0))
+ }
+ }, codelocation.New(0), 0, failer)
+ }
+
+ newSpec := func(text string, flag types.FlagType, fail bool) *spec.Spec {
+ subject := leafnodes.NewItNode(text, func() {
+ writer.AddEvent(text)
+ thingsThatRan = append(thingsThatRan, text)
+ if fail {
+ failer.Fail(text, codelocation.New(0))
+ }
+ }, flag, codelocation.New(0), 0, failer, 0)
+
+ return spec.New(subject, []*containernode.ContainerNode{}, false)
+ }
+
+ newFlakySpec := func(text string, flag types.FlagType, failures int) *spec.Spec {
+ runs := 0
+ subject := leafnodes.NewItNode(text, func() {
+ writer.AddEvent(text)
+ thingsThatRan = append(thingsThatRan, text)
+ runs++
+ if runs < failures {
+ failer.Fail(text, codelocation.New(0))
+ }
+ }, flag, codelocation.New(0), 0, failer, 0)
+
+ return spec.New(subject, []*containernode.ContainerNode{}, false)
+ }
+
+ newSpecWithBody := func(text string, body interface{}) *spec.Spec {
+ subject := leafnodes.NewItNode(text, body, noneFlag, codelocation.New(0), 0, failer, 0)
+
+ return spec.New(subject, []*containernode.ContainerNode{}, false)
+ }
+
+ newRunner := func(config config.GinkgoConfigType, beforeSuiteNode leafnodes.SuiteNode, afterSuiteNode leafnodes.SuiteNode, specs ...*spec.Spec) *SpecRunner {
+ iterator := spec_iterator.NewSerialIterator(specs)
+ return New("description", beforeSuiteNode, iterator, afterSuiteNode, []reporters.Reporter{reporter1, reporter2}, writer, config)
+ }
+
+ BeforeEach(func() {
+ reporter1 = reporters.NewFakeReporter()
+ reporter2 = reporters.NewFakeReporter()
+ writer = Writer.NewFake()
+ failer = Failer.New()
+
+ thingsThatRan = []string{}
+ })
+
+ Describe("Running and Reporting", func() {
+ var specA, pendingSpec, anotherPendingSpec, failedSpec, specB, skippedSpec *spec.Spec
+ var willRunCalls, didCompleteCalls []string
+ var conf config.GinkgoConfigType
+
+ JustBeforeEach(func() {
+ willRunCalls = []string{}
+ didCompleteCalls = []string{}
+ specA = newSpec("spec A", noneFlag, false)
+ pendingSpec = newSpec("pending spec", pendingFlag, false)
+ anotherPendingSpec = newSpec("another pending spec", pendingFlag, false)
+ failedSpec = newSpec("failed spec", noneFlag, true)
+ specB = newSpec("spec B", noneFlag, false)
+ skippedSpec = newSpec("skipped spec", noneFlag, false)
+ skippedSpec.Skip()
+
+ reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) {
+ willRunCalls = append(willRunCalls, "Reporter1")
+ }
+ reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) {
+ willRunCalls = append(willRunCalls, "Reporter2")
+ }
+
+ reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
+ didCompleteCalls = append(didCompleteCalls, "Reporter1")
+ }
+ reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
+ didCompleteCalls = append(didCompleteCalls, "Reporter2")
+ }
+
+ runner = newRunner(conf, newBefSuite("BefSuite", false), newAftSuite("AftSuite", false), specA, pendingSpec, anotherPendingSpec, failedSpec, specB, skippedSpec)
+ runner.Run()
+ })
+
+ BeforeEach(func() {
+ conf = config.GinkgoConfigType{RandomSeed: 17}
+ })
+
+ It("should skip skipped/pending tests", func() {
+ Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "spec A", "failed spec", "spec B", "AftSuite"}))
+ })
+
+ It("should report to any attached reporters", func() {
+ Ω(reporter1.Config).Should(Equal(reporter2.Config))
+ Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary))
+ Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary))
+ Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries))
+ Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries))
+ Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary))
+ Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary))
+ })
+
+ It("should report that a spec did end in reverse order", func() {
+ Ω(willRunCalls[0:4]).Should(Equal([]string{"Reporter1", "Reporter2", "Reporter1", "Reporter2"}))
+ Ω(didCompleteCalls[0:4]).Should(Equal([]string{"Reporter2", "Reporter1", "Reporter2", "Reporter1"}))
+ })
+
+ It("should report the passed in config", func() {
+ Ω(reporter1.Config.RandomSeed).Should(BeNumerically("==", 17))
+ })
+
+ It("should report the beginning of the suite", func() {
+ Ω(reporter1.BeginSummary.SuiteDescription).Should(Equal("description"))
+ Ω(reporter1.BeginSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
+ Ω(reporter1.BeginSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
+ Ω(reporter1.BeginSummary.NumberOfTotalSpecs).Should(Equal(6))
+ Ω(reporter1.BeginSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3))
+ Ω(reporter1.BeginSummary.NumberOfPendingSpecs).Should(Equal(-1))
+ Ω(reporter1.BeginSummary.NumberOfSkippedSpecs).Should(Equal(-1))
+ })
+
+ It("should report the end of the suite", func() {
+ Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description"))
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
+ Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
+ Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
+ Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6))
+ Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3))
+ Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(2))
+ Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1))
+ Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(2))
+ Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1))
+ })
+
+ Context("when told to perform a dry run", func() {
+ BeforeEach(func() {
+ conf.DryRun = true
+ })
+
+ It("should report to the reporters", func() {
+ Ω(reporter1.Config).Should(Equal(reporter2.Config))
+ Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary))
+ Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary))
+ Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries))
+ Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries))
+ Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary))
+ Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary))
+ })
+
+ It("should not actually run anything", func() {
+ Ω(thingsThatRan).Should(BeEmpty())
+ })
+
+ It("report before and after suites as passed", func() {
+ Ω(reporter1.BeforeSuiteSummary.State).Should(Equal(types.SpecStatePassed))
+ Ω(reporter1.AfterSuiteSummary.State).Should(Equal(types.SpecStatePassed))
+ })
+
+ It("should report specs as passed", func() {
+ summaries := reporter1.SpecSummaries
+ Ω(summaries).Should(HaveLen(6))
+ Ω(summaries[0].ComponentTexts).Should(ContainElement("spec A"))
+ Ω(summaries[0].State).Should(Equal(types.SpecStatePassed))
+ Ω(summaries[1].ComponentTexts).Should(ContainElement("pending spec"))
+ Ω(summaries[1].State).Should(Equal(types.SpecStatePending))
+ Ω(summaries[2].ComponentTexts).Should(ContainElement("another pending spec"))
+ Ω(summaries[2].State).Should(Equal(types.SpecStatePending))
+ Ω(summaries[3].ComponentTexts).Should(ContainElement("failed spec"))
+ Ω(summaries[3].State).Should(Equal(types.SpecStatePassed))
+ Ω(summaries[4].ComponentTexts).Should(ContainElement("spec B"))
+ Ω(summaries[4].State).Should(Equal(types.SpecStatePassed))
+ Ω(summaries[5].ComponentTexts).Should(ContainElement("skipped spec"))
+ Ω(summaries[5].State).Should(Equal(types.SpecStateSkipped))
+ })
+
+ It("should report the end of the suite", func() {
+ Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description"))
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
+ Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
+ Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
+ Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6))
+ Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3))
+ Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(2))
+ Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1))
+ Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(0))
+ Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0))
+ })
+
+ It("should not report a slow test", func() {
+ summaries := reporter1.SpecSummaries
+ for _, s := range summaries {
+ Expect(s.RunTime).To(BeZero())
+ }
+ })
+ })
+ })
+
+ Describe("reporting on specs", func() {
+ var proceed chan bool
+ var ready chan bool
+ var finished chan bool
+ BeforeEach(func() {
+ ready = make(chan bool)
+ proceed = make(chan bool)
+ finished = make(chan bool)
+ skippedSpec := newSpec("SKIP", noneFlag, false)
+ skippedSpec.Skip()
+
+ runner = newRunner(
+ config.GinkgoConfigType{},
+ newBefSuite("BefSuite", false),
+ newAftSuite("AftSuite", false),
+ skippedSpec,
+ newSpec("PENDING", pendingFlag, false),
+ newSpecWithBody("RUN", func() {
+ close(ready)
+ <-proceed
+ }),
+ )
+ go func() {
+ runner.Run()
+ close(finished)
+ }()
+ })
+
+ It("should report about pending/skipped specs", func() {
+ <-ready
+ Ω(reporter1.SpecWillRunSummaries).Should(HaveLen(3))
+
+ Ω(reporter1.SpecWillRunSummaries[0].ComponentTexts[0]).Should(Equal("SKIP"))
+ Ω(reporter1.SpecWillRunSummaries[1].ComponentTexts[0]).Should(Equal("PENDING"))
+ Ω(reporter1.SpecWillRunSummaries[2].ComponentTexts[0]).Should(Equal("RUN"))
+
+ Ω(reporter1.SpecSummaries[0].ComponentTexts[0]).Should(Equal("SKIP"))
+ Ω(reporter1.SpecSummaries[1].ComponentTexts[0]).Should(Equal("PENDING"))
+ Ω(reporter1.SpecSummaries).Should(HaveLen(2))
+
+ close(proceed)
+ <-finished
+
+ Ω(reporter1.SpecSummaries).Should(HaveLen(3))
+ Ω(reporter1.SpecSummaries[2].ComponentTexts[0]).Should(Equal("RUN"))
+ })
+ })
+
+ Describe("Running and Reporting when there's flakes", func() {
+ var specA, pendingSpec, flakySpec, failedSpec, specB, skippedSpec *spec.Spec
+ var willRunCalls, didCompleteCalls []string
+ var conf config.GinkgoConfigType
+ var failedSpecFlag = noneFlag
+
+ JustBeforeEach(func() {
+ willRunCalls = []string{}
+ didCompleteCalls = []string{}
+ specA = newSpec("spec A", noneFlag, false)
+ pendingSpec = newSpec("pending spec", pendingFlag, false)
+ flakySpec = newFlakySpec("flaky spec", noneFlag, 3)
+ failedSpec = newSpec("failed spec", failedSpecFlag, true)
+ specB = newSpec("spec B", noneFlag, false)
+ skippedSpec = newSpec("skipped spec", noneFlag, false)
+ skippedSpec.Skip()
+
+ reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) {
+ willRunCalls = append(willRunCalls, "Reporter1")
+ }
+ reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) {
+ willRunCalls = append(willRunCalls, "Reporter2")
+ }
+
+ reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
+ didCompleteCalls = append(didCompleteCalls, "Reporter1")
+ }
+ reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
+ didCompleteCalls = append(didCompleteCalls, "Reporter2")
+ }
+
+ runner = newRunner(conf, newBefSuite("BefSuite", false), newAftSuite("AftSuite", false), specA, pendingSpec, flakySpec, failedSpec, specB, skippedSpec)
+ runner.Run()
+ })
+
+ BeforeEach(func() {
+ failedSpecFlag = noneFlag
+ conf = config.GinkgoConfigType{
+ RandomSeed: 17,
+ FlakeAttempts: 5,
+ }
+ })
+
+ It("should skip skipped/pending tests", func() {
+ Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "spec A", "flaky spec", "flaky spec", "flaky spec", "failed spec", "failed spec", "failed spec", "failed spec", "failed spec", "spec B", "AftSuite"}))
+ })
+
+ It("should report to any attached reporters", func() {
+ Ω(reporter1.Config).Should(Equal(reporter2.Config))
+ Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary))
+ Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary))
+ Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries))
+ Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries))
+ Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary))
+ Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary))
+ })
+
+ It("should report that a spec did end in reverse order", func() {
+ Ω(willRunCalls[0:4]).Should(Equal([]string{"Reporter1", "Reporter2", "Reporter1", "Reporter2"}))
+ Ω(didCompleteCalls[0:4]).Should(Equal([]string{"Reporter2", "Reporter1", "Reporter2", "Reporter1"}))
+ })
+
+ It("should report the passed in config", func() {
+ Ω(reporter1.Config.RandomSeed).Should(BeNumerically("==", 17))
+ })
+
+ It("should report the beginning of the suite", func() {
+ Ω(reporter1.BeginSummary.SuiteDescription).Should(Equal("description"))
+ Ω(reporter1.BeginSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
+ Ω(reporter1.BeginSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
+ Ω(reporter1.BeginSummary.NumberOfTotalSpecs).Should(Equal(6))
+ Ω(reporter1.BeginSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4))
+ Ω(reporter1.BeginSummary.NumberOfPendingSpecs).Should(Equal(-1))
+ Ω(reporter1.BeginSummary.NumberOfSkippedSpecs).Should(Equal(-1))
+ })
+
+ It("should report the end of the suite", func() {
+ Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description"))
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
+ Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
+ Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
+ Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6))
+ Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4))
+ Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(1))
+ Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1))
+ Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(3))
+ Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1))
+ Ω(reporter1.EndSummary.NumberOfFlakedSpecs).Should(Equal(1))
+ })
+
+ Context("when nothing fails", func() {
+ BeforeEach(func() {
+ failedSpecFlag = pendingFlag
+ })
+
+ It("the suite should pass even with flakes", func() {
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
+ Ω(reporter1.EndSummary.NumberOfFlakedSpecs).Should(Equal(1))
+ })
+ })
+
+ Context("when told to perform a dry run", func() {
+ BeforeEach(func() {
+ conf.DryRun = true
+ })
+
+ It("should report to the reporters", func() {
+ Ω(reporter1.Config).Should(Equal(reporter2.Config))
+ Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary))
+ Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary))
+ Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries))
+ Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries))
+ Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary))
+ Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary))
+ })
+
+ It("should not actually run anything", func() {
+ Ω(thingsThatRan).Should(BeEmpty())
+ })
+
+ It("report before and after suites as passed", func() {
+ Ω(reporter1.BeforeSuiteSummary.State).Should(Equal(types.SpecStatePassed))
+ Ω(reporter1.AfterSuiteSummary.State).Should(Equal(types.SpecStatePassed))
+ })
+
+ It("should report specs as passed", func() {
+ summaries := reporter1.SpecSummaries
+ Ω(summaries).Should(HaveLen(6))
+ Ω(summaries[0].ComponentTexts).Should(ContainElement("spec A"))
+ Ω(summaries[0].State).Should(Equal(types.SpecStatePassed))
+ Ω(summaries[1].ComponentTexts).Should(ContainElement("pending spec"))
+ Ω(summaries[1].State).Should(Equal(types.SpecStatePending))
+ Ω(summaries[2].ComponentTexts).Should(ContainElement("flaky spec"))
+ Ω(summaries[2].State).Should(Equal(types.SpecStatePassed))
+ Ω(summaries[3].ComponentTexts).Should(ContainElement("failed spec"))
+ Ω(summaries[3].State).Should(Equal(types.SpecStatePassed))
+ Ω(summaries[4].ComponentTexts).Should(ContainElement("spec B"))
+ Ω(summaries[4].State).Should(Equal(types.SpecStatePassed))
+ Ω(summaries[5].ComponentTexts).Should(ContainElement("skipped spec"))
+ Ω(summaries[5].State).Should(Equal(types.SpecStateSkipped))
+ })
+
+ It("should report the end of the suite", func() {
+ Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description"))
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
+ Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"))
+ Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6))
+ Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6))
+ Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4))
+ Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(1))
+ Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1))
+ Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(0))
+ Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0))
+ })
+ })
+ })
+
+ Describe("Running BeforeSuite & AfterSuite", func() {
+ var success bool
+ var befSuite leafnodes.SuiteNode
+ var aftSuite leafnodes.SuiteNode
+ Context("with a nil BeforeSuite & AfterSuite", func() {
+ BeforeEach(func() {
+ runner = newRunner(
+ config.GinkgoConfigType{},
+ nil,
+ nil,
+ newSpec("A", noneFlag, false),
+ newSpec("B", noneFlag, false),
+ )
+ success = runner.Run()
+ })
+
+ It("should not report about the BeforeSuite", func() {
+ Ω(reporter1.BeforeSuiteSummary).Should(BeNil())
+ })
+
+ It("should not report about the AfterSuite", func() {
+ Ω(reporter1.AfterSuiteSummary).Should(BeNil())
+ })
+
+ It("should run the specs", func() {
+ Ω(thingsThatRan).Should(Equal([]string{"A", "B"}))
+ })
+ })
+
+ Context("when the BeforeSuite & AfterSuite pass", func() {
+ BeforeEach(func() {
+ befSuite = newBefSuite("BefSuite", false)
+ aftSuite = newBefSuite("AftSuite", false)
+ runner = newRunner(
+ config.GinkgoConfigType{},
+ befSuite,
+ aftSuite,
+ newSpec("A", noneFlag, false),
+ newSpec("B", noneFlag, false),
+ )
+ success = runner.Run()
+ })
+
+ It("should run the BeforeSuite, the AfterSuite and the specs", func() {
+ Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "A", "B", "AftSuite"}))
+ })
+
+ It("should report about the BeforeSuite", func() {
+ Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary()))
+ })
+
+ It("should report about the AfterSuite", func() {
+ Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary()))
+ })
+
+ It("should report success", func() {
+ Ω(success).Should(BeTrue())
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
+ Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0))
+ })
+
+ It("should not dump the writer", func() {
+ Ω(writer.EventStream).ShouldNot(ContainElement("DUMP"))
+ })
+ })
+
+ Context("when the BeforeSuite fails", func() {
+ BeforeEach(func() {
+ befSuite = newBefSuite("BefSuite", true)
+ aftSuite = newBefSuite("AftSuite", false)
+
+ skipped := newSpec("Skipped", noneFlag, false)
+ skipped.Skip()
+
+ runner = newRunner(
+ config.GinkgoConfigType{},
+ befSuite,
+ aftSuite,
+ newSpec("A", noneFlag, false),
+ newSpec("B", noneFlag, false),
+ newSpec("Pending", pendingFlag, false),
+ skipped,
+ )
+ success = runner.Run()
+ })
+
+ It("should not run the specs, but it should run the AfterSuite", func() {
+ Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "AftSuite"}))
+ })
+
+ It("should report about the BeforeSuite", func() {
+ Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary()))
+ })
+
+ It("should report about the AfterSuite", func() {
+ Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary()))
+ })
+
+ It("should report failure", func() {
+ Ω(success).Should(BeFalse())
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
+ Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(2))
+ Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(2))
+ })
+
+ It("should dump the writer", func() {
+ Ω(writer.EventStream).Should(ContainElement("DUMP"))
+ })
+ })
+
+ Context("when some other test fails", func() {
+ BeforeEach(func() {
+ aftSuite = newBefSuite("AftSuite", false)
+
+ runner = newRunner(
+ config.GinkgoConfigType{},
+ nil,
+ aftSuite,
+ newSpec("A", noneFlag, true),
+ )
+ success = runner.Run()
+ })
+
+ It("should still run the AfterSuite", func() {
+ Ω(thingsThatRan).Should(Equal([]string{"A", "AftSuite"}))
+ })
+
+ It("should report about the AfterSuite", func() {
+ Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary()))
+ })
+
+ It("should report failure", func() {
+ Ω(success).Should(BeFalse())
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
+ Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1))
+ Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(1))
+ })
+ })
+
+ Context("when the AfterSuite fails", func() {
+ BeforeEach(func() {
+ befSuite = newBefSuite("BefSuite", false)
+ aftSuite = newBefSuite("AftSuite", true)
+ runner = newRunner(
+ config.GinkgoConfigType{},
+ befSuite,
+ aftSuite,
+ newSpec("A", noneFlag, false),
+ newSpec("B", noneFlag, false),
+ )
+ success = runner.Run()
+ })
+
+ It("should run everything", func() {
+ Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "A", "B", "AftSuite"}))
+ })
+
+ It("should report about the BeforeSuite", func() {
+ Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary()))
+ })
+
+ It("should report about the AfterSuite", func() {
+ Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary()))
+ })
+
+ It("should report failure", func() {
+ Ω(success).Should(BeFalse())
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
+ Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0))
+ })
+
+ It("should dump the writer", func() {
+ Ω(writer.EventStream).Should(ContainElement("DUMP"))
+ })
+ })
+ })
+
+ Describe("When instructed to fail fast", func() {
+ BeforeEach(func() {
+ conf := config.GinkgoConfigType{
+ FailFast: true,
+ }
+ runner = newRunner(conf, nil, newAftSuite("after-suite", false), newSpec("passing", noneFlag, false), newSpec("failing", noneFlag, true), newSpec("dont-see", noneFlag, true), newSpec("dont-see", noneFlag, true))
+ })
+
+ It("should return false, report failure, and not run anything past the failing test", func() {
+ Ω(runner.Run()).Should(BeFalse())
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
+ Ω(thingsThatRan).Should(Equal([]string{"passing", "failing", "after-suite"}))
+ })
+
+ It("should announce the subsequent specs as skipped", func() {
+ runner.Run()
+ Ω(reporter1.SpecSummaries).Should(HaveLen(4))
+ Ω(reporter1.SpecSummaries[2].State).Should(Equal(types.SpecStateSkipped))
+ Ω(reporter1.SpecSummaries[3].State).Should(Equal(types.SpecStateSkipped))
+ })
+
+ It("should mark all subsequent specs as skipped", func() {
+ runner.Run()
+ Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(2))
+ })
+ })
+
+ Describe("Marking failure and success", func() {
+ Context("when all tests pass", func() {
+ BeforeEach(func() {
+ runner = newRunner(config.GinkgoConfigType{}, nil, nil, newSpec("passing", noneFlag, false), newSpec("pending", pendingFlag, false))
+ })
+
+ It("should return true and report success", func() {
+ Ω(runner.Run()).Should(BeTrue())
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue())
+ })
+ })
+
+ Context("when a test fails", func() {
+ BeforeEach(func() {
+ runner = newRunner(config.GinkgoConfigType{}, nil, nil, newSpec("failing", noneFlag, true), newSpec("pending", pendingFlag, false))
+ })
+
+ It("should return false and report failure", func() {
+ Ω(runner.Run()).Should(BeFalse())
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
+ })
+ })
+
+ Context("when there is a pending test, but pendings count as failures", func() {
+ BeforeEach(func() {
+ runner = newRunner(config.GinkgoConfigType{FailOnPending: true}, nil, nil, newSpec("passing", noneFlag, false), newSpec("pending", pendingFlag, false))
+ })
+
+ It("should return false and report failure", func() {
+ Ω(runner.Run()).Should(BeFalse())
+ Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse())
+ })
+ })
+ })
+
+ Describe("Managing the writer", func() {
+ BeforeEach(func() {
+ runner = newRunner(
+ config.GinkgoConfigType{},
+ nil,
+ nil,
+ newSpec("A", noneFlag, false),
+ newSpec("B", noneFlag, true),
+ newSpec("C", noneFlag, false),
+ )
+ reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) {
+ writer.AddEvent("R1.WillRun")
+ }
+ reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) {
+ writer.AddEvent("R2.WillRun")
+ }
+ reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
+ writer.AddEvent("R1.DidComplete")
+ }
+ reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) {
+ writer.AddEvent("R2.DidComplete")
+ }
+ runner.Run()
+ })
+
+ It("should truncate between tests, but only dump if a test fails", func() {
+ Ω(writer.EventStream).Should(Equal([]string{
+ "TRUNCATE",
+ "R1.WillRun",
+ "R2.WillRun",
+ "A",
+ "R2.DidComplete",
+ "R1.DidComplete",
+ "TRUNCATE",
+ "R1.WillRun",
+ "R2.WillRun",
+ "B",
+ "BYTES",
+ "R2.DidComplete",
+ "DUMP",
+ "R1.DidComplete",
+ "TRUNCATE",
+ "R1.WillRun",
+ "R2.WillRun",
+ "C",
+ "R2.DidComplete",
+ "R1.DidComplete",
+ }))
+ })
+ })
+
+ Describe("CurrentSpecSummary", func() {
+ It("should return the spec summary for the currently running spec", func() {
+ var summary *types.SpecSummary
+ runner = newRunner(
+ config.GinkgoConfigType{},
+ nil,
+ nil,
+ newSpec("A", noneFlag, false),
+ newSpecWithBody("B", func() {
+ var ok bool
+ summary, ok = runner.CurrentSpecSummary()
+ Ω(ok).Should(BeTrue())
+ }),
+ newSpec("C", noneFlag, false),
+ )
+ runner.Run()
+
+ Ω(summary.ComponentTexts).Should(Equal([]string{"B"}))
+
+ summary, ok := runner.CurrentSpecSummary()
+ Ω(summary).Should(BeNil())
+ Ω(ok).Should(BeFalse())
+ })
+ })
+
+ Describe("generating a suite id", func() {
+ It("should generate an id randomly", func() {
+ runnerA := newRunner(config.GinkgoConfigType{}, nil, nil)
+ runnerA.Run()
+ IDA := reporter1.BeginSummary.SuiteID
+
+ runnerB := newRunner(config.GinkgoConfigType{}, nil, nil)
+ runnerB.Run()
+ IDB := reporter1.BeginSummary.SuiteID
+
+ IDRegexp := "[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}"
+ Ω(IDA).Should(MatchRegexp(IDRegexp))
+ Ω(IDB).Should(MatchRegexp(IDRegexp))
+
+ Ω(IDA).ShouldNot(Equal(IDB))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
new file mode 100644
index 000000000..3104bbc88
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
@@ -0,0 +1,190 @@
+package suite
+
+import (
+ "math/rand"
+ "net/http"
+ "time"
+
+ "github.com/onsi/ginkgo/internal/spec_iterator"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/internal/containernode"
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+ "github.com/onsi/ginkgo/internal/spec"
+ "github.com/onsi/ginkgo/internal/specrunner"
+ "github.com/onsi/ginkgo/internal/writer"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/types"
+)
+
+type ginkgoTestingT interface {
+ Fail()
+}
+
+type Suite struct {
+ topLevelContainer *containernode.ContainerNode
+ currentContainer *containernode.ContainerNode
+ containerIndex int
+ beforeSuiteNode leafnodes.SuiteNode
+ afterSuiteNode leafnodes.SuiteNode
+ runner *specrunner.SpecRunner
+ failer *failer.Failer
+ running bool
+}
+
+func New(failer *failer.Failer) *Suite {
+ topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
+
+ return &Suite{
+ topLevelContainer: topLevelContainer,
+ currentContainer: topLevelContainer,
+ failer: failer,
+ containerIndex: 1,
+ }
+}
+
+func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
+ if config.ParallelTotal < 1 {
+ panic("ginkgo.parallel.total must be >= 1")
+ }
+
+ if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
+ panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
+ }
+
+ r := rand.New(rand.NewSource(config.RandomSeed))
+ suite.topLevelContainer.Shuffle(r)
+ iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
+ suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
+
+ suite.running = true
+ success := suite.runner.Run()
+ if !success {
+ t.Fail()
+ }
+ return success, hasProgrammaticFocus
+}
+
+func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
+ specsSlice := []*spec.Spec{}
+ suite.topLevelContainer.BackPropagateProgrammaticFocus()
+ for _, collatedNodes := range suite.topLevelContainer.Collate() {
+ specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
+ }
+
+ specs := spec.NewSpecs(specsSlice)
+ specs.RegexScansFilePath = config.RegexScansFilePath
+
+ if config.RandomizeAllSpecs {
+ specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
+ }
+
+ specs.ApplyFocus(description, config.FocusString, config.SkipString)
+
+ if config.SkipMeasurements {
+ specs.SkipMeasurements()
+ }
+
+ var iterator spec_iterator.SpecIterator
+
+ if config.ParallelTotal > 1 {
+ iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
+ resp, err := http.Get(config.SyncHost + "/has-counter")
+ if err != nil || resp.StatusCode != http.StatusOK {
+ iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
+ }
+ } else {
+ iterator = spec_iterator.NewSerialIterator(specs.Specs())
+ }
+
+ return iterator, specs.HasProgrammaticFocus()
+}
+
+func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
+ return suite.runner.CurrentSpecSummary()
+}
+
+func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.beforeSuiteNode != nil {
+ panic("You may only call BeforeSuite once!")
+ }
+ suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.afterSuiteNode != nil {
+ panic("You may only call AfterSuite once!")
+ }
+ suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.beforeSuiteNode != nil {
+ panic("You may only call BeforeSuite once!")
+ }
+ suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.afterSuiteNode != nil {
+ panic("You may only call AfterSuite once!")
+ }
+ suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
+}
+
+func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
+ container := containernode.New(text, flag, codeLocation)
+ suite.currentContainer.PushContainerNode(container)
+
+ previousContainer := suite.currentContainer
+ suite.currentContainer = container
+ suite.containerIndex++
+
+ body()
+
+ suite.containerIndex--
+ suite.currentContainer = previousContainer
+}
+
+func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.running {
+ suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
+ }
+ suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
+ if suite.running {
+ suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
+ }
+ suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.running {
+ suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
+ }
+ suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.running {
+ suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
+ }
+ suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.running {
+ suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
+ }
+ suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
+
+func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
+ if suite.running {
+ suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
+ }
+ suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go
new file mode 100644
index 000000000..06fe1d12a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go
@@ -0,0 +1,35 @@
+package suite_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func Test(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Suite")
+}
+
+var numBeforeSuiteRuns = 0
+var numAfterSuiteRuns = 0
+
+var _ = BeforeSuite(func() {
+ numBeforeSuiteRuns++
+})
+
+var _ = AfterSuite(func() {
+ numAfterSuiteRuns++
+ Ω(numBeforeSuiteRuns).Should(Equal(1))
+ Ω(numAfterSuiteRuns).Should(Equal(1))
+})
+
+//Fakes
+type fakeTestingT struct {
+ didFail bool
+}
+
+func (fakeT *fakeTestingT) Fail() {
+ fakeT.didFail = true
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go
new file mode 100644
index 000000000..fd2d11dc3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go
@@ -0,0 +1,385 @@
+package suite_test
+
+import (
+ "bytes"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/suite"
+ . "github.com/onsi/gomega"
+
+ "math/rand"
+ "time"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/internal/codelocation"
+ Failer "github.com/onsi/ginkgo/internal/failer"
+ Writer "github.com/onsi/ginkgo/internal/writer"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("Suite", func() {
+ var (
+ specSuite *Suite
+ fakeT *fakeTestingT
+ fakeR *reporters.FakeReporter
+ writer *Writer.FakeGinkgoWriter
+ failer *Failer.Failer
+ )
+
+ BeforeEach(func() {
+ writer = Writer.NewFake()
+ fakeT = &fakeTestingT{}
+ fakeR = reporters.NewFakeReporter()
+ failer = Failer.New()
+ specSuite = New(failer)
+ })
+
+ Describe("running a suite", func() {
+ var (
+ runOrder []string
+ randomizeAllSpecs bool
+ randomSeed int64
+ focusString string
+ parallelNode int
+ parallelTotal int
+ runResult bool
+ hasProgrammaticFocus bool
+ )
+
+ var f = func(runText string) func() {
+ return func() {
+ runOrder = append(runOrder, runText)
+ }
+ }
+
+ BeforeEach(func() {
+ randomizeAllSpecs = false
+ randomSeed = 11
+ parallelNode = 1
+ parallelTotal = 1
+ focusString = ""
+
+ runOrder = make([]string, 0)
+ specSuite.SetBeforeSuiteNode(f("BeforeSuite"), codelocation.New(0), 0)
+ specSuite.PushBeforeEachNode(f("top BE"), codelocation.New(0), 0)
+ specSuite.PushJustBeforeEachNode(f("top JBE"), codelocation.New(0), 0)
+ specSuite.PushAfterEachNode(f("top AE"), codelocation.New(0), 0)
+
+ specSuite.PushContainerNode("container", func() {
+ specSuite.PushBeforeEachNode(f("BE"), codelocation.New(0), 0)
+ specSuite.PushJustBeforeEachNode(f("JBE"), codelocation.New(0), 0)
+ specSuite.PushAfterEachNode(f("AE"), codelocation.New(0), 0)
+ specSuite.PushItNode("it", f("IT"), types.FlagTypeNone, codelocation.New(0), 0)
+
+ specSuite.PushContainerNode("inner container", func() {
+ specSuite.PushItNode("inner it", f("inner IT"), types.FlagTypeNone, codelocation.New(0), 0)
+ }, types.FlagTypeNone, codelocation.New(0))
+ }, types.FlagTypeNone, codelocation.New(0))
+
+ specSuite.PushContainerNode("container 2", func() {
+ specSuite.PushBeforeEachNode(f("BE 2"), codelocation.New(0), 0)
+ specSuite.PushItNode("it 2", f("IT 2"), types.FlagTypeNone, codelocation.New(0), 0)
+ }, types.FlagTypeNone, codelocation.New(0))
+
+ specSuite.PushItNode("top level it", f("top IT"), types.FlagTypeNone, codelocation.New(0), 0)
+
+ specSuite.SetAfterSuiteNode(f("AfterSuite"), codelocation.New(0), 0)
+ })
+
+ JustBeforeEach(func() {
+ runResult, hasProgrammaticFocus = specSuite.Run(fakeT, "suite description", []reporters.Reporter{fakeR}, writer, config.GinkgoConfigType{
+ RandomSeed: randomSeed,
+ RandomizeAllSpecs: randomizeAllSpecs,
+ FocusString: focusString,
+ ParallelNode: parallelNode,
+ ParallelTotal: parallelTotal,
+ })
+ })
+
+ It("provides the config and suite description to the reporter", func() {
+ Ω(fakeR.Config.RandomSeed).Should(Equal(int64(randomSeed)))
+ Ω(fakeR.Config.RandomizeAllSpecs).Should(Equal(randomizeAllSpecs))
+ Ω(fakeR.BeginSummary.SuiteDescription).Should(Equal("suite description"))
+ })
+
+ It("reports that the BeforeSuite node ran", func() {
+ Ω(fakeR.BeforeSuiteSummary).ShouldNot(BeNil())
+ })
+
+ It("reports that the AfterSuite node ran", func() {
+ Ω(fakeR.AfterSuiteSummary).ShouldNot(BeNil())
+ })
+
+ It("provides information about the current test", func() {
+ description := CurrentGinkgoTestDescription()
+ Ω(description.ComponentTexts).Should(Equal([]string{"Suite", "running a suite", "provides information about the current test"}))
+ Ω(description.FullTestText).Should(Equal("Suite running a suite provides information about the current test"))
+ Ω(description.TestText).Should(Equal("provides information about the current test"))
+ Ω(description.IsMeasurement).Should(BeFalse())
+ Ω(description.FileName).Should(ContainSubstring("suite_test.go"))
+ Ω(description.LineNumber).Should(BeNumerically(">", 50))
+ Ω(description.LineNumber).Should(BeNumerically("<", 150))
+ Ω(description.Failed).Should(BeFalse())
+ Ω(description.Duration).Should(BeNumerically(">", 0))
+ })
+
+ Measure("should run measurements", func(b Benchmarker) {
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ runtime := b.Time("sleeping", func() {
+ sleepTime := time.Duration(r.Float64() * 0.01 * float64(time.Second))
+ time.Sleep(sleepTime)
+ })
+ Ω(runtime.Seconds()).Should(BeNumerically("<=", 1))
+ Ω(runtime.Seconds()).Should(BeNumerically(">=", 0))
+
+ randomValue := r.Float64() * 10.0
+ b.RecordValue("random value", randomValue)
+ Ω(randomValue).Should(BeNumerically("<=", 10.0))
+ Ω(randomValue).Should(BeNumerically(">=", 0.0))
+
+ b.RecordValueWithPrecision("specific value", 123.4567, "ms", 2)
+ b.RecordValueWithPrecision("specific value", 234.5678, "ms", 2)
+ }, 10)
+
+ It("creates a node hierarchy, converts it to a spec collection, and runs it", func() {
+ Ω(runOrder).Should(Equal([]string{
+ "BeforeSuite",
+ "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE",
+ "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE",
+ "top BE", "BE 2", "top JBE", "IT 2", "top AE",
+ "top BE", "top JBE", "top IT", "top AE",
+ "AfterSuite",
+ }))
+ })
+ Context("when in an AfterEach block", func() {
+ AfterEach(func() {
+ description := CurrentGinkgoTestDescription()
+ Ω(description.IsMeasurement).Should(BeFalse())
+ Ω(description.FileName).Should(ContainSubstring("suite_test.go"))
+ Ω(description.Failed).Should(BeFalse())
+ Ω(description.Duration).Should(BeNumerically(">", 0))
+ })
+
+ It("still provides information about the current test", func() {
+ Ω(true).To(BeTrue())
+ })
+ })
+
+ Context("when told to randomize all specs", func() {
+ BeforeEach(func() {
+ randomizeAllSpecs = true
+ })
+
+ It("does", func() {
+ Ω(runOrder).Should(Equal([]string{
+ "BeforeSuite",
+ "top BE", "top JBE", "top IT", "top AE",
+ "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE",
+ "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE",
+ "top BE", "BE 2", "top JBE", "IT 2", "top AE",
+ "AfterSuite",
+ }))
+ })
+ })
+
+ Context("when provided with a filter", func() {
+ BeforeEach(func() {
+ focusString = `inner|\d`
+ })
+
+ It("converts the filter to a regular expression and uses it to filter the running specs", func() {
+ Ω(runOrder).Should(Equal([]string{
+ "BeforeSuite",
+ "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE",
+ "top BE", "BE 2", "top JBE", "IT 2", "top AE",
+ "AfterSuite",
+ }))
+ })
+
+ It("should not report a programmatic focus", func() {
+ Ω(hasProgrammaticFocus).Should(BeFalse())
+ })
+ })
+
+ Context("with a programatically focused spec", func() {
+ BeforeEach(func() {
+ specSuite.PushItNode("focused it", f("focused it"), types.FlagTypeFocused, codelocation.New(0), 0)
+
+ specSuite.PushContainerNode("focused container", func() {
+ specSuite.PushItNode("inner focused it", f("inner focused it"), types.FlagTypeFocused, codelocation.New(0), 0)
+ specSuite.PushItNode("inner unfocused it", f("inner unfocused it"), types.FlagTypeNone, codelocation.New(0), 0)
+ }, types.FlagTypeFocused, codelocation.New(0))
+
+ })
+
+ It("should only run the focused test, applying backpropagation to favor most deeply focused leaf nodes", func() {
+ Ω(runOrder).Should(Equal([]string{
+ "BeforeSuite",
+ "top BE", "top JBE", "focused it", "top AE",
+ "top BE", "top JBE", "inner focused it", "top AE",
+ "AfterSuite",
+ }))
+ })
+
+ It("should report a programmatic focus", func() {
+ Ω(hasProgrammaticFocus).Should(BeTrue())
+ })
+ })
+
+ Context("when the specs pass", func() {
+ It("doesn't report a failure", func() {
+ Ω(fakeT.didFail).Should(BeFalse())
+ })
+
+ It("should return true", func() {
+ Ω(runResult).Should(BeTrue())
+ })
+ })
+
+ Context("when a spec fails", func() {
+ var location types.CodeLocation
+ BeforeEach(func() {
+ specSuite.PushItNode("top level it", func() {
+ location = codelocation.New(0)
+ failer.Fail("oops!", location)
+ }, types.FlagTypeNone, codelocation.New(0), 0)
+ })
+
+ It("should return false", func() {
+ Ω(runResult).Should(BeFalse())
+ })
+
+ It("reports a failure", func() {
+ Ω(fakeT.didFail).Should(BeTrue())
+ })
+
+ It("generates the correct failure data", func() {
+ Ω(fakeR.SpecSummaries[0].Failure.Message).Should(Equal("oops!"))
+ Ω(fakeR.SpecSummaries[0].Failure.Location).Should(Equal(location))
+ })
+ })
+
+ Context("when runnable nodes are nested within other runnable nodes", func() {
+ Context("when an It is nested", func() {
+ BeforeEach(func() {
+ specSuite.PushItNode("top level it", func() {
+ specSuite.PushItNode("nested it", f("oops"), types.FlagTypeNone, codelocation.New(0), 0)
+ }, types.FlagTypeNone, codelocation.New(0), 0)
+ })
+
+ It("should fail", func() {
+ Ω(fakeT.didFail).Should(BeTrue())
+ })
+ })
+
+ Context("when a Measure is nested", func() {
+ BeforeEach(func() {
+ specSuite.PushItNode("top level it", func() {
+ specSuite.PushMeasureNode("nested measure", func(Benchmarker) {}, types.FlagTypeNone, codelocation.New(0), 10)
+ }, types.FlagTypeNone, codelocation.New(0), 0)
+ })
+
+ It("should fail", func() {
+ Ω(fakeT.didFail).Should(BeTrue())
+ })
+ })
+
+ Context("when a BeforeEach is nested", func() {
+ BeforeEach(func() {
+ specSuite.PushItNode("top level it", func() {
+ specSuite.PushBeforeEachNode(f("nested bef"), codelocation.New(0), 0)
+ }, types.FlagTypeNone, codelocation.New(0), 0)
+ })
+
+ It("should fail", func() {
+ Ω(fakeT.didFail).Should(BeTrue())
+ })
+ })
+
+ Context("when a JustBeforeEach is nested", func() {
+ BeforeEach(func() {
+ specSuite.PushItNode("top level it", func() {
+ specSuite.PushJustBeforeEachNode(f("nested jbef"), codelocation.New(0), 0)
+ }, types.FlagTypeNone, codelocation.New(0), 0)
+ })
+
+ It("should fail", func() {
+ Ω(fakeT.didFail).Should(BeTrue())
+ })
+ })
+
+ Context("when a AfterEach is nested", func() {
+ BeforeEach(func() {
+ specSuite.PushItNode("top level it", func() {
+ specSuite.PushAfterEachNode(f("nested aft"), codelocation.New(0), 0)
+ }, types.FlagTypeNone, codelocation.New(0), 0)
+ })
+
+ It("should fail", func() {
+ Ω(fakeT.didFail).Should(BeTrue())
+ })
+ })
+ })
+ })
+
+ Describe("BeforeSuite", func() {
+ Context("when setting BeforeSuite more than once", func() {
+ It("should panic", func() {
+ specSuite.SetBeforeSuiteNode(func() {}, codelocation.New(0), 0)
+
+ Ω(func() {
+ specSuite.SetBeforeSuiteNode(func() {}, codelocation.New(0), 0)
+ }).Should(Panic())
+
+ })
+ })
+ })
+
+ Describe("AfterSuite", func() {
+ Context("when setting AfterSuite more than once", func() {
+ It("should panic", func() {
+ specSuite.SetAfterSuiteNode(func() {}, codelocation.New(0), 0)
+
+ Ω(func() {
+ specSuite.SetAfterSuiteNode(func() {}, codelocation.New(0), 0)
+ }).Should(Panic())
+ })
+ })
+ })
+
+ Describe("By", func() {
+ It("writes to the GinkgoWriter", func() {
+ originalGinkgoWriter := GinkgoWriter
+ buffer := &bytes.Buffer{}
+
+ GinkgoWriter = buffer
+ By("Saying Hello GinkgoWriter")
+ GinkgoWriter = originalGinkgoWriter
+
+ Ω(buffer.String()).Should(ContainSubstring("STEP"))
+ Ω(buffer.String()).Should(ContainSubstring(": Saying Hello GinkgoWriter\n"))
+ })
+
+ It("calls the passed-in callback if present", func() {
+ a := 0
+ By("calling the callback", func() {
+ a = 1
+ })
+ Ω(a).Should(Equal(1))
+ })
+
+ It("panics if there is more than one callback", func() {
+ Ω(func() {
+ By("registering more than one callback", func() {}, func() {})
+ }).Should(Panic())
+ })
+ })
+
+ Describe("GinkgoRandomSeed", func() {
+ It("returns the current config's random seed", func() {
+ Ω(GinkgoRandomSeed()).Should(Equal(config.GinkgoConfig.RandomSeed))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
new file mode 100644
index 000000000..090445d08
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
@@ -0,0 +1,76 @@
+package testingtproxy
+
+import (
+ "fmt"
+ "io"
+)
+
+type failFunc func(message string, callerSkip ...int)
+
+func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
+ return &ginkgoTestingTProxy{
+ fail: fail,
+ offset: offset,
+ writer: writer,
+ }
+}
+
+type ginkgoTestingTProxy struct {
+ fail failFunc
+ offset int
+ writer io.Writer
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fail() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) FailNow() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+ fmt.Fprintln(t.writer, args...)
+}
+
+func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+ t.Log(fmt.Sprintf(format, args...))
+}
+
+func (t *ginkgoTestingTProxy) Failed() bool {
+ return false
+}
+
+func (t *ginkgoTestingTProxy) Parallel() {
+}
+
+func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+ fmt.Println(args...)
+}
+
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+ t.Skip(fmt.Sprintf(format, args...))
+}
+
+func (t *ginkgoTestingTProxy) SkipNow() {
+}
+
+func (t *ginkgoTestingTProxy) Skipped() bool {
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
new file mode 100644
index 000000000..6739c3f60
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
@@ -0,0 +1,36 @@
+package writer
+
+type FakeGinkgoWriter struct {
+ EventStream []string
+}
+
+func NewFake() *FakeGinkgoWriter {
+ return &FakeGinkgoWriter{
+ EventStream: []string{},
+ }
+}
+
+func (writer *FakeGinkgoWriter) AddEvent(event string) {
+ writer.EventStream = append(writer.EventStream, event)
+}
+
+func (writer *FakeGinkgoWriter) Truncate() {
+ writer.EventStream = append(writer.EventStream, "TRUNCATE")
+}
+
+func (writer *FakeGinkgoWriter) DumpOut() {
+ writer.EventStream = append(writer.EventStream, "DUMP")
+}
+
+func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
+ writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
+}
+
+func (writer *FakeGinkgoWriter) Bytes() []byte {
+ writer.EventStream = append(writer.EventStream, "BYTES")
+ return nil
+}
+
+func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
+ return 0, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
new file mode 100644
index 000000000..98eca3bdd
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
@@ -0,0 +1,89 @@
+package writer
+
+import (
+ "bytes"
+ "io"
+ "sync"
+)
+
+type WriterInterface interface {
+ io.Writer
+
+ Truncate()
+ DumpOut()
+ DumpOutWithHeader(header string)
+ Bytes() []byte
+}
+
+type Writer struct {
+ buffer *bytes.Buffer
+ outWriter io.Writer
+ lock *sync.Mutex
+ stream bool
+ redirector io.Writer
+}
+
+func New(outWriter io.Writer) *Writer {
+ return &Writer{
+ buffer: &bytes.Buffer{},
+ lock: &sync.Mutex{},
+ outWriter: outWriter,
+ stream: true,
+ }
+}
+
+func (w *Writer) AndRedirectTo(writer io.Writer) {
+ w.redirector = writer
+}
+
+func (w *Writer) SetStream(stream bool) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.stream = stream
+}
+
+func (w *Writer) Write(b []byte) (n int, err error) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ n, err = w.buffer.Write(b)
+ if w.redirector != nil {
+ w.redirector.Write(b)
+ }
+ if w.stream {
+ return w.outWriter.Write(b)
+ }
+ return n, err
+}
+
+func (w *Writer) Truncate() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.buffer.Reset()
+}
+
+func (w *Writer) DumpOut() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if !w.stream {
+ w.buffer.WriteTo(w.outWriter)
+ }
+}
+
+func (w *Writer) Bytes() []byte {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ b := w.buffer.Bytes()
+ copied := make([]byte, len(b))
+ copy(copied, b)
+ return copied
+}
+
+func (w *Writer) DumpOutWithHeader(header string) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if !w.stream && w.buffer.Len() > 0 {
+ w.outWriter.Write([]byte(header))
+ w.buffer.WriteTo(w.outWriter)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go
new file mode 100644
index 000000000..e20657791
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go
@@ -0,0 +1,13 @@
+package writer_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestWriter(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Writer Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go
new file mode 100644
index 000000000..3e1d17c6d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go
@@ -0,0 +1,75 @@
+package writer_test
+
+import (
+ "github.com/onsi/gomega/gbytes"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/writer"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Writer", func() {
+ var writer *Writer
+ var out *gbytes.Buffer
+
+ BeforeEach(func() {
+ out = gbytes.NewBuffer()
+ writer = New(out)
+ })
+
+ It("should stream directly to the outbuffer by default", func() {
+ writer.Write([]byte("foo"))
+ Ω(out).Should(gbytes.Say("foo"))
+ })
+
+ It("should not emit the header when asked to DumpOutWitHeader", func() {
+ writer.Write([]byte("foo"))
+ writer.DumpOutWithHeader("my header")
+ Ω(out).ShouldNot(gbytes.Say("my header"))
+ Ω(out).Should(gbytes.Say("foo"))
+ })
+
+ Context("when told not to stream", func() {
+ BeforeEach(func() {
+ writer.SetStream(false)
+ })
+
+ It("should only write to the buffer when told to DumpOut", func() {
+ writer.Write([]byte("foo"))
+ Ω(out).ShouldNot(gbytes.Say("foo"))
+ writer.DumpOut()
+ Ω(out).Should(gbytes.Say("foo"))
+ })
+
+ It("should truncate the internal buffer when told to truncate", func() {
+ writer.Write([]byte("foo"))
+ writer.Truncate()
+ writer.DumpOut()
+ Ω(out).ShouldNot(gbytes.Say("foo"))
+
+ writer.Write([]byte("bar"))
+ writer.DumpOut()
+ Ω(out).Should(gbytes.Say("bar"))
+ })
+
+ Describe("emitting a header", func() {
+ Context("when the buffer has content", func() {
+ It("should emit the header followed by the content", func() {
+ writer.Write([]byte("foo"))
+ writer.DumpOutWithHeader("my header")
+
+ Ω(out).Should(gbytes.Say("my header"))
+ Ω(out).Should(gbytes.Say("foo"))
+ })
+ })
+
+ Context("when the buffer has no content", func() {
+ It("should not emit the header", func() {
+ writer.DumpOutWithHeader("my header")
+
+ Ω(out).ShouldNot(gbytes.Say("my header"))
+ })
+ })
+ })
+ })
+})