diff options
author | Valentin Rothberg <rothberg@redhat.com> | 2019-02-05 11:51:41 +0100 |
---|---|---|
committer | Valentin Rothberg <rothberg@redhat.com> | 2019-02-06 11:14:06 +0100 |
commit | 9ac0ebb0791851aea81ecc847802db5a39bfb6e7 (patch) | |
tree | 30ad98bcc2c2dd1136f46a48cbc44d422adfa184 /vendor/github.com/onsi/ginkgo/internal/spec_iterator | |
parent | 51714d5da7aaa19014fd67b48b79dfbd5f69c1f0 (diff) | |
download | podman-9ac0ebb0791851aea81ecc847802db5a39bfb6e7.tar.gz podman-9ac0ebb0791851aea81ecc847802db5a39bfb6e7.tar.bz2 podman-9ac0ebb0791851aea81ecc847802db5a39bfb6e7.zip |
Cirrus: add vendor_check_task
* Make sure that all vendored dependencies are in sync with the code and
the vendor.conf by running `make vendor` with a follow-up status check
of the git tree.
* Vendor ginkgo and gomega to include the test dependencies.
Signed-off-by: Chris Evic <cevich@redhat.com>
Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
Diffstat (limited to 'vendor/github.com/onsi/ginkgo/internal/spec_iterator')
10 files changed, 626 insertions, 0 deletions
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go new file mode 100644 index 000000000..82272554a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go @@ -0,0 +1,55 @@ +package spec_iterator + +func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { + if length == 0 { + return 0, 0 + } + + // We have more nodes than tests. Trivial case. + if parallelTotal >= length { + if parallelNode > length { + return 0, 0 + } else { + return parallelNode - 1, 1 + } + } + + // This is the minimum amount of tests that a node will be required to run + minTestsPerNode := length / parallelTotal + + // This is the maximum amount of tests that a node will be required to run + // The algorithm guarantees that this would be equal to at least the minimum amount + // and at most one more + maxTestsPerNode := minTestsPerNode + if length%parallelTotal != 0 { + maxTestsPerNode++ + } + + // Number of nodes that will have to run the maximum amount of tests per node + numMaxLoadNodes := length % parallelTotal + + // Number of nodes that precede the current node and will have to run the maximum amount of tests per node + var numPrecedingMaxLoadNodes int + if parallelNode > numMaxLoadNodes { + numPrecedingMaxLoadNodes = numMaxLoadNodes + } else { + numPrecedingMaxLoadNodes = parallelNode - 1 + } + + // Number of nodes that precede the current node and will have to run the minimum amount of tests per node + var numPrecedingMinLoadNodes int + if parallelNode <= numMaxLoadNodes { + numPrecedingMinLoadNodes = 0 + } else { + numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 + } + + // Evaluate the test start index and number of tests to run + startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode + if parallelNode > numMaxLoadNodes { + count = minTestsPerNode + } else { + count = maxTestsPerNode + } + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go new file mode 100644 index 000000000..65da9837c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go @@ -0,0 +1,149 @@ +package spec_iterator_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/spec_iterator" + . "github.com/onsi/gomega" +) + +var _ = Describe("ParallelizedIndexRange", func() { + var startIndex, count int + + It("should return the correct index range for 4 tests on 2 nodes", func() { + startIndex, count = ParallelizedIndexRange(4, 2, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(4, 2, 2) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(2)) + }) + + It("should return the correct index range for 5 tests on 2 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 2, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(3)) + + startIndex, count = ParallelizedIndexRange(5, 2, 2) + Ω(startIndex).Should(Equal(3)) + Ω(count).Should(Equal(2)) + }) + + It("should return the correct index range for 5 tests on 3 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 3, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(5, 3, 2) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(5, 3, 3) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(1)) + }) + + It("should return the correct index range for 5 tests on 4 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 4, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(5, 4, 2) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 4, 3) + Ω(startIndex).Should(Equal(3)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 4, 4) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(1)) + }) + + It("should return the correct index range for 5 tests on 5 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 5, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 5, 2) + Ω(startIndex).Should(Equal(1)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 5, 3) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 5, 4) + Ω(startIndex).Should(Equal(3)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 5, 5) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(1)) + }) + + It("should return the correct index range for 5 tests on 6 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 6, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 2) + Ω(startIndex).Should(Equal(1)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 3) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 4) + Ω(startIndex).Should(Equal(3)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 5) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 6) + Ω(count).Should(Equal(0)) + }) + + It("should return the correct index range for 5 tests on 7 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 7, 6) + Ω(count).Should(Equal(0)) + + startIndex, count = ParallelizedIndexRange(5, 7, 7) + Ω(count).Should(Equal(0)) + }) + + It("should return the correct index range for 11 tests on 7 nodes", func() { + startIndex, count = ParallelizedIndexRange(11, 7, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(11, 7, 2) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(11, 7, 3) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(11, 7, 4) + Ω(startIndex).Should(Equal(6)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(11, 7, 5) + Ω(startIndex).Should(Equal(8)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(11, 7, 6) + Ω(startIndex).Should(Equal(9)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(11, 7, 7) + Ω(startIndex).Should(Equal(10)) + Ω(count).Should(Equal(1)) + }) + +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go new file mode 100644 index 000000000..99f548bca --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go @@ -0,0 +1,59 @@ +package spec_iterator + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/onsi/ginkgo/internal/spec" +) + +type ParallelIterator struct { + specs []*spec.Spec + host string + client *http.Client +} + +func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator { + return &ParallelIterator{ + specs: specs, + host: host, + client: &http.Client{}, + } +} + +func (s *ParallelIterator) Next() (*spec.Spec, error) { + resp, err := s.client.Get(s.host + "/counter") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode) + } + + var counter Counter + err = json.NewDecoder(resp.Body).Decode(&counter) + if err != nil { + return nil, err + } + + if counter.Index >= len(s.specs) { + return nil, ErrClosed + } + + return s.specs[counter.Index], nil +} + +func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return -1, false +} + +func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + return -1, false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go new file mode 100644 index 000000000..c5a762fd5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go @@ -0,0 +1,112 @@ +package spec_iterator_test + +import ( + "net/http" + + . "github.com/onsi/ginkgo/internal/spec_iterator" + "github.com/onsi/gomega/ghttp" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ParallelSpecIterator", func() { + var specs []*spec.Spec + var iterator *ParallelIterator + var server *ghttp.Server + + newSpec := func(text string, flag types.FlagType) *spec.Spec { + subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0) + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + BeforeEach(func() { + specs = []*spec.Spec{ + newSpec("A", types.FlagTypePending), + newSpec("B", types.FlagTypeNone), + newSpec("C", types.FlagTypeNone), + newSpec("D", types.FlagTypeNone), + } + specs[3].Skip() + + server = ghttp.NewServer() + + iterator = NewParallelIterator(specs, "http://"+server.Addr()) + }) + + AfterEach(func() { + server.Close() + }) + + It("should report the total number of specs", func() { + Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4)) + }) + + It("should not report the number to be processed", func() { + n, known := iterator.NumberOfSpecsToProcessIfKnown() + Ω(n).Should(Equal(-1)) + Ω(known).Should(BeFalse()) + }) + + It("should not report the number that will be run", func() { + n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown() + Ω(n).Should(Equal(-1)) + Ω(known).Should(BeFalse()) + }) + + Describe("iterating", func() { + Describe("when the server returns well-formed responses", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 0}), + ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 1}), + ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 3}), + ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 4}), + ) + }) + + It("should return the specs in question", func() { + Ω(iterator.Next()).Should(Equal(specs[0])) + Ω(iterator.Next()).Should(Equal(specs[1])) + Ω(iterator.Next()).Should(Equal(specs[3])) + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).Should(MatchError(ErrClosed)) + }) + }) + + Describe("when the server 404s", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWith(http.StatusNotFound, ""), + ) + }) + + It("should return an error", func() { + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).Should(MatchError("unexpected status code 404")) + }) + }) + + Describe("when the server returns gibberish", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWith(http.StatusOK, "ß"), + ) + }) + + It("should error", func() { + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).ShouldNot(BeNil()) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go new file mode 100644 index 000000000..a51c93b8b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go @@ -0,0 +1,45 @@ +package spec_iterator + +import ( + "github.com/onsi/ginkgo/internal/spec" +) + +type SerialIterator struct { + specs []*spec.Spec + index int +} + +func NewSerialIterator(specs []*spec.Spec) *SerialIterator { + return &SerialIterator{ + specs: specs, + index: 0, + } +} + +func (s *SerialIterator) Next() (*spec.Spec, error) { + if s.index >= len(s.specs) { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *SerialIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return len(s.specs), true +} + +func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for _, s := range s.specs { + if !s.Skipped() && !s.Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go new file mode 100644 index 000000000..dde4a344e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go @@ -0,0 +1,64 @@ +package spec_iterator_test + +import ( + . "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("SerialSpecIterator", func() { + var specs []*spec.Spec + var iterator *SerialIterator + + newSpec := func(text string, flag types.FlagType) *spec.Spec { + subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0) + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + BeforeEach(func() { + specs = []*spec.Spec{ + newSpec("A", types.FlagTypePending), + newSpec("B", types.FlagTypeNone), + newSpec("C", types.FlagTypeNone), + newSpec("D", types.FlagTypeNone), + } + specs[3].Skip() + + iterator = NewSerialIterator(specs) + }) + + It("should report the total number of specs", func() { + Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4)) + }) + + It("should report the number to be processed", func() { + n, known := iterator.NumberOfSpecsToProcessIfKnown() + Ω(n).Should(Equal(4)) + Ω(known).Should(BeTrue()) + }) + + It("should report the number that will be run", func() { + n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown() + Ω(n).Should(Equal(2)) + Ω(known).Should(BeTrue()) + }) + + Describe("iterating", func() { + It("should return the specs in order", func() { + Ω(iterator.Next()).Should(Equal(specs[0])) + Ω(iterator.Next()).Should(Equal(specs[1])) + Ω(iterator.Next()).Should(Equal(specs[2])) + Ω(iterator.Next()).Should(Equal(specs[3])) + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).Should(MatchError(ErrClosed)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go new file mode 100644 index 000000000..ad4a3ea3c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go @@ -0,0 +1,47 @@ +package spec_iterator + +import "github.com/onsi/ginkgo/internal/spec" + +type ShardedParallelIterator struct { + specs []*spec.Spec + index int + maxIndex int +} + +func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator { + startIndex, count := ParallelizedIndexRange(len(specs), total, node) + + return &ShardedParallelIterator{ + specs: specs, + index: startIndex, + maxIndex: startIndex + count, + } +} + +func (s *ShardedParallelIterator) Next() (*spec.Spec, error) { + if s.index >= s.maxIndex { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return s.maxIndex - s.index, true +} + +func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for i := s.index; i < s.maxIndex; i += 1 { + if !s.specs[i].Skipped() && !s.specs[i].Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go new file mode 100644 index 000000000..c3786e03a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go @@ -0,0 +1,62 @@ +package spec_iterator_test + +import ( + . "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ShardedParallelSpecIterator", func() { + var specs []*spec.Spec + var iterator *ShardedParallelIterator + + newSpec := func(text string, flag types.FlagType) *spec.Spec { + subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0) + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + BeforeEach(func() { + specs = []*spec.Spec{ + newSpec("A", types.FlagTypePending), + newSpec("B", types.FlagTypeNone), + newSpec("C", types.FlagTypeNone), + newSpec("D", types.FlagTypeNone), + } + specs[3].Skip() + + iterator = NewShardedParallelIterator(specs, 2, 1) + }) + + It("should report the total number of specs", func() { + Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4)) + }) + + It("should report the number to be processed", func() { + n, known := iterator.NumberOfSpecsToProcessIfKnown() + Ω(n).Should(Equal(2)) + Ω(known).Should(BeTrue()) + }) + + It("should report the number that will be run", func() { + n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown() + Ω(n).Should(Equal(1)) + Ω(known).Should(BeTrue()) + }) + + Describe("iterating", func() { + It("should return the specs in order", func() { + Ω(iterator.Next()).Should(Equal(specs[0])) + Ω(iterator.Next()).Should(Equal(specs[1])) + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).Should(MatchError(ErrClosed)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go new file mode 100644 index 000000000..74bffad64 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go @@ -0,0 +1,20 @@ +package spec_iterator + +import ( + "errors" + + "github.com/onsi/ginkgo/internal/spec" +) + +var ErrClosed = errors.New("no more specs to run") + +type SpecIterator interface { + Next() (*spec.Spec, error) + NumberOfSpecsPriorToIteration() int + NumberOfSpecsToProcessIfKnown() (int, bool) + NumberOfSpecsThatWillBeRunIfKnown() (int, bool) +} + +type Counter struct { + Index int `json:"index"` +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go new file mode 100644 index 000000000..5c08a77e3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go @@ -0,0 +1,13 @@ +package spec_iterator_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestSpecIterator(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "SpecIterator Suite") +} |