summaryrefslogtreecommitdiff
path: root/vendor/github.com/onsi/ginkgo/internal/remote
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/onsi/ginkgo/internal/remote')
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go249
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go315
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go22
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go33
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go147
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go181
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go83
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go36
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/server.go224
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/server_test.go269
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go11
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go11
15 files changed, 1616 insertions, 0 deletions
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
new file mode 100644
index 000000000..6b54afe01
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
@@ -0,0 +1,249 @@
+/*
+
+Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
+coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
+
+ ginkgo -nodes=N
+
+where N is the number of nodes you desire.
+*/
+package remote
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/reporters/stenographer"
+ "github.com/onsi/ginkgo/types"
+)
+
+type configAndSuite struct {
+ config config.GinkgoConfigType
+ summary *types.SuiteSummary
+}
+
+type Aggregator struct {
+ nodeCount int
+ config config.DefaultReporterConfigType
+ stenographer stenographer.Stenographer
+ result chan bool
+
+ suiteBeginnings chan configAndSuite
+ aggregatedSuiteBeginnings []configAndSuite
+
+ beforeSuites chan *types.SetupSummary
+ aggregatedBeforeSuites []*types.SetupSummary
+
+ afterSuites chan *types.SetupSummary
+ aggregatedAfterSuites []*types.SetupSummary
+
+ specCompletions chan *types.SpecSummary
+ completedSpecs []*types.SpecSummary
+
+ suiteEndings chan *types.SuiteSummary
+ aggregatedSuiteEndings []*types.SuiteSummary
+ specs []*types.SpecSummary
+
+ startTime time.Time
+}
+
+func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
+ aggregator := &Aggregator{
+ nodeCount: nodeCount,
+ result: result,
+ config: config,
+ stenographer: stenographer,
+
+ suiteBeginnings: make(chan configAndSuite, 0),
+ beforeSuites: make(chan *types.SetupSummary, 0),
+ afterSuites: make(chan *types.SetupSummary, 0),
+ specCompletions: make(chan *types.SpecSummary, 0),
+ suiteEndings: make(chan *types.SuiteSummary, 0),
+ }
+
+ go aggregator.mux()
+
+ return aggregator
+}
+
+func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
+ aggregator.suiteBeginnings <- configAndSuite{config, summary}
+}
+
+func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+ aggregator.beforeSuites <- setupSummary
+}
+
+func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+ aggregator.afterSuites <- setupSummary
+}
+
+func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
+ //noop
+}
+
+func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
+ aggregator.specCompletions <- specSummary
+}
+
+func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+ aggregator.suiteEndings <- summary
+}
+
+func (aggregator *Aggregator) mux() {
+loop:
+ for {
+ select {
+ case configAndSuite := <-aggregator.suiteBeginnings:
+ aggregator.registerSuiteBeginning(configAndSuite)
+ case setupSummary := <-aggregator.beforeSuites:
+ aggregator.registerBeforeSuite(setupSummary)
+ case setupSummary := <-aggregator.afterSuites:
+ aggregator.registerAfterSuite(setupSummary)
+ case specSummary := <-aggregator.specCompletions:
+ aggregator.registerSpecCompletion(specSummary)
+ case suite := <-aggregator.suiteEndings:
+ finished, passed := aggregator.registerSuiteEnding(suite)
+ if finished {
+ aggregator.result <- passed
+ break loop
+ }
+ }
+ }
+}
+
+func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
+ aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
+
+ if len(aggregator.aggregatedSuiteBeginnings) == 1 {
+ aggregator.startTime = time.Now()
+ }
+
+ if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
+ return
+ }
+
+ aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
+
+ totalNumberOfSpecs := 0
+ if len(aggregator.aggregatedSuiteBeginnings) > 0 {
+ totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
+ }
+
+ aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
+ aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
+ aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
+ aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
+ aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
+ aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
+ aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
+ aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
+ aggregator.specs = append(aggregator.specs, specSummary)
+ aggregator.flushCompletedSpecs()
+}
+
+func (aggregator *Aggregator) flushCompletedSpecs() {
+ if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
+ return
+ }
+
+ for _, setupSummary := range aggregator.aggregatedBeforeSuites {
+ aggregator.announceBeforeSuite(setupSummary)
+ }
+
+ for _, specSummary := range aggregator.completedSpecs {
+ aggregator.announceSpec(specSummary)
+ }
+
+ for _, setupSummary := range aggregator.aggregatedAfterSuites {
+ aggregator.announceAfterSuite(setupSummary)
+ }
+
+ aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
+ aggregator.completedSpecs = []*types.SpecSummary{}
+ aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
+}
+
+func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
+ aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
+ if setupSummary.State != types.SpecStatePassed {
+ aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ }
+}
+
+func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
+ aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
+ if setupSummary.State != types.SpecStatePassed {
+ aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ }
+}
+
+func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
+ if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
+ aggregator.stenographer.AnnounceSpecWillRun(specSummary)
+ }
+
+ aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
+
+ switch specSummary.State {
+ case types.SpecStatePassed:
+ if specSummary.IsMeasurement {
+ aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
+ } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
+ aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
+ } else {
+ aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
+ }
+
+ case types.SpecStatePending:
+ aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
+ case types.SpecStateSkipped:
+ aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
+ case types.SpecStateTimedOut:
+ aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ case types.SpecStatePanicked:
+ aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ case types.SpecStateFailed:
+ aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ }
+}
+
+func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
+ aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
+ if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
+ return false, false
+ }
+
+ aggregatedSuiteSummary := &types.SuiteSummary{}
+ aggregatedSuiteSummary.SuiteSucceeded = true
+
+ for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
+ if suiteSummary.SuiteSucceeded == false {
+ aggregatedSuiteSummary.SuiteSucceeded = false
+ }
+
+ aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
+ aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
+ aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
+ aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
+ aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
+ aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
+ aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
+ }
+
+ aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
+
+ aggregator.stenographer.SummarizeFailures(aggregator.specs)
+ aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
+
+ return true, aggregatedSuiteSummary.SuiteSucceeded
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go
new file mode 100644
index 000000000..aedf93927
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go
@@ -0,0 +1,315 @@
+package remote_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "time"
+
+ "github.com/onsi/ginkgo/config"
+ . "github.com/onsi/ginkgo/internal/remote"
+ st "github.com/onsi/ginkgo/reporters/stenographer"
+ "github.com/onsi/ginkgo/types"
+)
+
+var _ = Describe("Aggregator", func() {
+ var (
+ aggregator *Aggregator
+ reporterConfig config.DefaultReporterConfigType
+ stenographer *st.FakeStenographer
+ result chan bool
+
+ ginkgoConfig1 config.GinkgoConfigType
+ ginkgoConfig2 config.GinkgoConfigType
+
+ suiteSummary1 *types.SuiteSummary
+ suiteSummary2 *types.SuiteSummary
+
+ beforeSummary *types.SetupSummary
+ afterSummary *types.SetupSummary
+ specSummary *types.SpecSummary
+
+ suiteDescription string
+ )
+
+ BeforeEach(func() {
+ reporterConfig = config.DefaultReporterConfigType{
+ NoColor: false,
+ SlowSpecThreshold: 0.1,
+ NoisyPendings: true,
+ Succinct: false,
+ Verbose: true,
+ }
+ stenographer = st.NewFakeStenographer()
+ result = make(chan bool, 1)
+ aggregator = NewAggregator(2, result, reporterConfig, stenographer)
+
+ //
+ // now set up some fixture data
+ //
+
+ ginkgoConfig1 = config.GinkgoConfigType{
+ RandomSeed: 1138,
+ RandomizeAllSpecs: true,
+ ParallelNode: 1,
+ ParallelTotal: 2,
+ }
+
+ ginkgoConfig2 = config.GinkgoConfigType{
+ RandomSeed: 1138,
+ RandomizeAllSpecs: true,
+ ParallelNode: 2,
+ ParallelTotal: 2,
+ }
+
+ suiteDescription = "My Parallel Suite"
+
+ suiteSummary1 = &types.SuiteSummary{
+ SuiteDescription: suiteDescription,
+
+ NumberOfSpecsBeforeParallelization: 30,
+ NumberOfTotalSpecs: 17,
+ NumberOfSpecsThatWillBeRun: 15,
+ NumberOfPendingSpecs: 1,
+ NumberOfSkippedSpecs: 1,
+ }
+
+ suiteSummary2 = &types.SuiteSummary{
+ SuiteDescription: suiteDescription,
+
+ NumberOfSpecsBeforeParallelization: 30,
+ NumberOfTotalSpecs: 13,
+ NumberOfSpecsThatWillBeRun: 8,
+ NumberOfPendingSpecs: 2,
+ NumberOfSkippedSpecs: 3,
+ }
+
+ beforeSummary = &types.SetupSummary{
+ State: types.SpecStatePassed,
+ CapturedOutput: "BeforeSuiteOutput",
+ }
+
+ afterSummary = &types.SetupSummary{
+ State: types.SpecStatePassed,
+ CapturedOutput: "AfterSuiteOutput",
+ }
+
+ specSummary = &types.SpecSummary{
+ State: types.SpecStatePassed,
+ CapturedOutput: "SpecOutput",
+ }
+ })
+
+ call := func(method string, args ...interface{}) st.FakeStenographerCall {
+ return st.NewFakeStenographerCall(method, args...)
+ }
+
+ beginSuite := func() {
+ stenographer.Reset()
+ aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
+ aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1)
+ Eventually(func() interface{} {
+ return len(stenographer.Calls())
+ }).Should(BeNumerically(">=", 3))
+ }
+
+ Describe("Announcing the beginning of the suite", func() {
+ Context("When one of the parallel-suites starts", func() {
+ BeforeEach(func() {
+ aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
+ })
+
+ It("should be silent", func() {
+ Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
+ })
+ })
+
+ Context("once all of the parallel-suites have started", func() {
+ BeforeEach(func() {
+ aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2)
+ aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(3))
+ })
+
+ It("should announce the beginning of the suite", func() {
+ Ω(stenographer.Calls()).Should(HaveLen(3))
+ Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", suiteDescription, ginkgoConfig1.RandomSeed, true, false)))
+ Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceTotalNumberOfSpecs", 30, false)))
+ Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceAggregatedParallelRun", 2, false)))
+ })
+ })
+ })
+
+ Describe("Announcing specs and before suites", func() {
+ Context("when the parallel-suites have not all started", func() {
+ BeforeEach(func() {
+ aggregator.BeforeSuiteDidRun(beforeSummary)
+ aggregator.AfterSuiteDidRun(afterSummary)
+ aggregator.SpecDidComplete(specSummary)
+ })
+
+ It("should not announce any specs", func() {
+ Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
+ })
+
+ Context("when the parallel-suites subsequently start", func() {
+ BeforeEach(func() {
+ beginSuite()
+ })
+
+ It("should announce the specs, the before suites and the after suites", func() {
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(ContainElement(call("AnnounceSuccesfulSpec", specSummary)))
+
+ Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput)))
+ Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", afterSummary.CapturedOutput)))
+ })
+ })
+ })
+
+ Context("When the parallel-suites have all started", func() {
+ BeforeEach(func() {
+ beginSuite()
+ stenographer.Reset()
+ })
+
+ Context("When a spec completes", func() {
+ BeforeEach(func() {
+ aggregator.BeforeSuiteDidRun(beforeSummary)
+ aggregator.SpecDidComplete(specSummary)
+ aggregator.AfterSuiteDidRun(afterSummary)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(5))
+ })
+
+ It("should announce the captured output of the BeforeSuite", func() {
+ Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput)))
+ })
+
+ It("should announce that the spec will run (when in verbose mode)", func() {
+ Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceSpecWillRun", specSummary)))
+ })
+
+ It("should announce the captured stdout of the spec", func() {
+ Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceCapturedOutput", specSummary.CapturedOutput)))
+ })
+
+ It("should announce completion", func() {
+ Ω(stenographer.Calls()[3]).Should(Equal(call("AnnounceSuccesfulSpec", specSummary)))
+ })
+
+ It("should announce the captured output of the AfterSuite", func() {
+ Ω(stenographer.Calls()[4]).Should(Equal(call("AnnounceCapturedOutput", afterSummary.CapturedOutput)))
+ })
+ })
+ })
+ })
+
+ Describe("Announcing the end of the suite", func() {
+ BeforeEach(func() {
+ beginSuite()
+ stenographer.Reset()
+ })
+
+ Context("When one of the parallel-suites ends", func() {
+ BeforeEach(func() {
+ aggregator.SpecSuiteDidEnd(suiteSummary2)
+ })
+
+ It("should be silent", func() {
+ Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty())
+ })
+
+ It("should not notify the channel", func() {
+ Ω(result).Should(BeEmpty())
+ })
+ })
+
+ Context("once all of the parallel-suites end", func() {
+ BeforeEach(func() {
+ time.Sleep(200 * time.Millisecond)
+
+ suiteSummary1.SuiteSucceeded = true
+ suiteSummary1.NumberOfPassedSpecs = 15
+ suiteSummary1.NumberOfFailedSpecs = 0
+ suiteSummary1.NumberOfFlakedSpecs = 3
+ suiteSummary2.SuiteSucceeded = false
+ suiteSummary2.NumberOfPassedSpecs = 5
+ suiteSummary2.NumberOfFailedSpecs = 3
+ suiteSummary2.NumberOfFlakedSpecs = 4
+
+ aggregator.SpecSuiteDidEnd(suiteSummary2)
+ aggregator.SpecSuiteDidEnd(suiteSummary1)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(2))
+ })
+
+ It("should announce the end of the suite", func() {
+ compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
+
+ Ω(compositeSummary.SuiteSucceeded).Should(BeFalse())
+ Ω(compositeSummary.NumberOfSpecsThatWillBeRun).Should(Equal(23))
+ Ω(compositeSummary.NumberOfTotalSpecs).Should(Equal(30))
+ Ω(compositeSummary.NumberOfPassedSpecs).Should(Equal(20))
+ Ω(compositeSummary.NumberOfFailedSpecs).Should(Equal(3))
+ Ω(compositeSummary.NumberOfPendingSpecs).Should(Equal(3))
+ Ω(compositeSummary.NumberOfSkippedSpecs).Should(Equal(4))
+ Ω(compositeSummary.NumberOfFlakedSpecs).Should(Equal(7))
+ Ω(compositeSummary.RunTime.Seconds()).Should(BeNumerically(">", 0.2))
+ })
+ })
+
+ Context("when all the parallel-suites pass", func() {
+ BeforeEach(func() {
+ suiteSummary1.SuiteSucceeded = true
+ suiteSummary2.SuiteSucceeded = true
+
+ aggregator.SpecSuiteDidEnd(suiteSummary2)
+ aggregator.SpecSuiteDidEnd(suiteSummary1)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(2))
+ })
+
+ It("should report success", func() {
+ compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
+
+ Ω(compositeSummary.SuiteSucceeded).Should(BeTrue())
+ })
+
+ It("should notify the channel that it succeded", func(done Done) {
+ Ω(<-result).Should(BeTrue())
+ close(done)
+ })
+ })
+
+ Context("when one of the parallel-suites fails", func() {
+ BeforeEach(func() {
+ suiteSummary1.SuiteSucceeded = true
+ suiteSummary2.SuiteSucceeded = false
+
+ aggregator.SpecSuiteDidEnd(suiteSummary2)
+ aggregator.SpecSuiteDidEnd(suiteSummary1)
+ Eventually(func() interface{} {
+ return stenographer.Calls()
+ }).Should(HaveLen(2))
+ })
+
+ It("should report failure", func() {
+ compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary)
+
+ Ω(compositeSummary.SuiteSucceeded).Should(BeFalse())
+ })
+
+ It("should notify the channel that it failed", func(done Done) {
+ Ω(<-result).Should(BeFalse())
+ close(done)
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go
new file mode 100644
index 000000000..ef54862ea
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go
@@ -0,0 +1,22 @@
+package remote_test
+
+import "os"
+
+type fakeOutputInterceptor struct {
+ DidStartInterceptingOutput bool
+ DidStopInterceptingOutput bool
+ InterceptedOutput string
+}
+
+func (interceptor *fakeOutputInterceptor) StartInterceptingOutput() error {
+ interceptor.DidStartInterceptingOutput = true
+ return nil
+}
+
+func (interceptor *fakeOutputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+ interceptor.DidStopInterceptingOutput = true
+ return interceptor.InterceptedOutput, nil
+}
+
+func (interceptor *fakeOutputInterceptor) StreamTo(*os.File) {
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go
new file mode 100644
index 000000000..3543c59c6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go
@@ -0,0 +1,33 @@
+package remote_test
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+type post struct {
+ url string
+ bodyType string
+ bodyContent []byte
+}
+
+type fakePoster struct {
+ posts []post
+}
+
+func newFakePoster() *fakePoster {
+ return &fakePoster{
+ posts: make([]post, 0),
+ }
+}
+
+func (poster *fakePoster) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
+ bodyContent, _ := ioutil.ReadAll(body)
+ poster.posts = append(poster.posts, post{
+ url: url,
+ bodyType: bodyType,
+ bodyContent: bodyContent,
+ })
+ return nil, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
new file mode 100644
index 000000000..284bc62e5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
@@ -0,0 +1,147 @@
+package remote
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+
+ "github.com/onsi/ginkgo/internal/writer"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/reporters/stenographer"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/types"
+)
+
+//An interface to net/http's client to allow the injection of fakes under test
+type Poster interface {
+ Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
+}
+
+/*
+The ForwardingReporter is a Ginkgo reporter that forwards information to
+a Ginkgo remote server.
+
+When streaming parallel test output, this repoter is automatically installed by Ginkgo.
+
+This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
+detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
+in place of Ginkgo's DefaultReporter.
+*/
+
+type ForwardingReporter struct {
+ serverHost string
+ poster Poster
+ outputInterceptor OutputInterceptor
+ debugMode bool
+ debugFile *os.File
+ nestedReporter *reporters.DefaultReporter
+}
+
+func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
+ reporter := &ForwardingReporter{
+ serverHost: serverHost,
+ poster: poster,
+ outputInterceptor: outputInterceptor,
+ }
+
+ if debugFile != "" {
+ var err error
+ reporter.debugMode = true
+ reporter.debugFile, err = os.Create(debugFile)
+ if err != nil {
+ fmt.Println(err.Error())
+ os.Exit(1)
+ }
+
+ if !config.Verbose {
+ //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
+ ginkgoWriter.AndRedirectTo(reporter.debugFile)
+ }
+ outputInterceptor.StreamTo(reporter.debugFile) //This is not working
+
+ stenographer := stenographer.New(false, true, reporter.debugFile)
+ config.Succinct = false
+ config.Verbose = true
+ config.FullTrace = true
+ reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
+ }
+
+ return reporter
+}
+
+func (reporter *ForwardingReporter) post(path string, data interface{}) {
+ encoded, _ := json.Marshal(data)
+ buffer := bytes.NewBuffer(encoded)
+ reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
+}
+
+func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
+ data := struct {
+ Config config.GinkgoConfigType `json:"config"`
+ Summary *types.SuiteSummary `json:"suite-summary"`
+ }{
+ conf,
+ summary,
+ }
+
+ reporter.outputInterceptor.StartInterceptingOutput()
+ if reporter.debugMode {
+ reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/SpecSuiteWillBegin", data)
+}
+
+func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
+ output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+ reporter.outputInterceptor.StartInterceptingOutput()
+ setupSummary.CapturedOutput = output
+ if reporter.debugMode {
+ reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/BeforeSuiteDidRun", setupSummary)
+}
+
+func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
+ if reporter.debugMode {
+ reporter.nestedReporter.SpecWillRun(specSummary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/SpecWillRun", specSummary)
+}
+
+func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
+ output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+ reporter.outputInterceptor.StartInterceptingOutput()
+ specSummary.CapturedOutput = output
+ if reporter.debugMode {
+ reporter.nestedReporter.SpecDidComplete(specSummary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/SpecDidComplete", specSummary)
+}
+
+func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
+ output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+ reporter.outputInterceptor.StartInterceptingOutput()
+ setupSummary.CapturedOutput = output
+ if reporter.debugMode {
+ reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/AfterSuiteDidRun", setupSummary)
+}
+
+func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
+ reporter.outputInterceptor.StopInterceptingAndReturnOutput()
+ if reporter.debugMode {
+ reporter.nestedReporter.SpecSuiteDidEnd(summary)
+ reporter.debugFile.Sync()
+ }
+ reporter.post("/SpecSuiteDidEnd", summary)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
new file mode 100644
index 000000000..0d7e4769c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
@@ -0,0 +1,181 @@
+package remote_test
+
+import (
+ "encoding/json"
+
+ . "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/config"
+ . "github.com/onsi/ginkgo/internal/remote"
+ "github.com/onsi/ginkgo/types"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("ForwardingReporter", func() {
+ var (
+ reporter *ForwardingReporter
+ interceptor *fakeOutputInterceptor
+ poster *fakePoster
+ suiteSummary *types.SuiteSummary
+ specSummary *types.SpecSummary
+ setupSummary *types.SetupSummary
+ serverHost string
+ )
+
+ BeforeEach(func() {
+ serverHost = "http://127.0.0.1:7788"
+
+ poster = newFakePoster()
+
+ interceptor = &fakeOutputInterceptor{
+ InterceptedOutput: "The intercepted output!",
+ }
+
+ reporter = NewForwardingReporter(config.DefaultReporterConfigType{}, serverHost, poster, interceptor, nil, "")
+
+ suiteSummary = &types.SuiteSummary{
+ SuiteDescription: "My Test Suite",
+ }
+
+ setupSummary = &types.SetupSummary{
+ State: types.SpecStatePassed,
+ }
+
+ specSummary = &types.SpecSummary{
+ ComponentTexts: []string{"My", "Spec"},
+ State: types.SpecStatePassed,
+ }
+ })
+
+ Context("When a suite begins", func() {
+ BeforeEach(func() {
+ reporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary)
+ })
+
+ It("should start intercepting output", func() {
+ Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
+ })
+
+ It("should POST the SuiteSummary and Ginkgo Config to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteWillBegin"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var sentData struct {
+ SentConfig config.GinkgoConfigType `json:"config"`
+ SentSuiteSummary *types.SuiteSummary `json:"suite-summary"`
+ }
+
+ err := json.Unmarshal(poster.posts[0].bodyContent, &sentData)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ Ω(sentData.SentConfig).Should(Equal(config.GinkgoConfig))
+ Ω(sentData.SentSuiteSummary).Should(Equal(suiteSummary))
+ })
+ })
+
+ Context("when a BeforeSuite completes", func() {
+ BeforeEach(func() {
+ reporter.BeforeSuiteDidRun(setupSummary)
+ })
+
+ It("should stop, then start intercepting output", func() {
+ Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
+ Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
+ })
+
+ It("should POST the SetupSummary to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/BeforeSuiteDidRun"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SetupSummary
+ err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+ setupSummary.CapturedOutput = interceptor.InterceptedOutput
+ Ω(summary).Should(Equal(setupSummary))
+ })
+ })
+
+ Context("when an AfterSuite completes", func() {
+ BeforeEach(func() {
+ reporter.AfterSuiteDidRun(setupSummary)
+ })
+
+ It("should stop, then start intercepting output", func() {
+ Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
+ Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
+ })
+
+ It("should POST the SetupSummary to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/AfterSuiteDidRun"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SetupSummary
+ err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+ setupSummary.CapturedOutput = interceptor.InterceptedOutput
+ Ω(summary).Should(Equal(setupSummary))
+ })
+ })
+
+ Context("When a spec will run", func() {
+ BeforeEach(func() {
+ reporter.SpecWillRun(specSummary)
+ })
+
+ It("should POST the SpecSummary to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecWillRun"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SpecSummary
+ err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+ Ω(summary).Should(Equal(specSummary))
+ })
+
+ Context("When a spec completes", func() {
+ BeforeEach(func() {
+ specSummary.State = types.SpecStatePanicked
+ reporter.SpecDidComplete(specSummary)
+ })
+
+ It("should POST the SpecSummary to the Ginkgo server and include any intercepted output", func() {
+ Ω(poster.posts).Should(HaveLen(2))
+ Ω(poster.posts[1].url).Should(Equal("http://127.0.0.1:7788/SpecDidComplete"))
+ Ω(poster.posts[1].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SpecSummary
+ err := json.Unmarshal(poster.posts[1].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+ specSummary.CapturedOutput = interceptor.InterceptedOutput
+ Ω(summary).Should(Equal(specSummary))
+ })
+
+ It("should stop, then start intercepting output", func() {
+ Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue())
+ Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue())
+ })
+ })
+ })
+
+ Context("When a suite ends", func() {
+ BeforeEach(func() {
+ reporter.SpecSuiteDidEnd(suiteSummary)
+ })
+
+ It("should POST the SuiteSummary to the Ginkgo server", func() {
+ Ω(poster.posts).Should(HaveLen(1))
+ Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteDidEnd"))
+ Ω(poster.posts[0].bodyType).Should(Equal("application/json"))
+
+ var summary *types.SuiteSummary
+
+ err := json.Unmarshal(poster.posts[0].bodyContent, &summary)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ Ω(summary).Should(Equal(suiteSummary))
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
new file mode 100644
index 000000000..5154abe87
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
@@ -0,0 +1,13 @@
+package remote
+
+import "os"
+
+/*
+The OutputInterceptor is used by the ForwardingReporter to
+intercept and capture all stdin and stderr output during a test run.
+*/
+type OutputInterceptor interface {
+ StartInterceptingOutput() error
+ StopInterceptingAndReturnOutput() (string, error)
+ StreamTo(*os.File)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
new file mode 100644
index 000000000..ab6622a29
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
@@ -0,0 +1,83 @@
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package remote
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+
+ "github.com/hpcloud/tail"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &outputInterceptor{}
+}
+
+type outputInterceptor struct {
+ redirectFile *os.File
+ streamTarget *os.File
+ intercepting bool
+ tailer *tail.Tail
+ doneTailing chan bool
+}
+
+func (interceptor *outputInterceptor) StartInterceptingOutput() error {
+ if interceptor.intercepting {
+ return errors.New("Already intercepting output!")
+ }
+ interceptor.intercepting = true
+
+ var err error
+
+ interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
+ if err != nil {
+ return err
+ }
+
+ // Call a function in ./syscall_dup_*.go
+ // If building for everything other than linux_arm64,
+ // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
+ // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
+ syscallDup(int(interceptor.redirectFile.Fd()), 1)
+ syscallDup(int(interceptor.redirectFile.Fd()), 2)
+
+ if interceptor.streamTarget != nil {
+ interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
+ interceptor.doneTailing = make(chan bool)
+
+ go func() {
+ for line := range interceptor.tailer.Lines {
+ interceptor.streamTarget.Write([]byte(line.Text + "\n"))
+ }
+ close(interceptor.doneTailing)
+ }()
+ }
+
+ return nil
+}
+
+func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+ if !interceptor.intercepting {
+ return "", errors.New("Not intercepting output!")
+ }
+
+ interceptor.redirectFile.Close()
+ output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
+ os.Remove(interceptor.redirectFile.Name())
+
+ interceptor.intercepting = false
+
+ if interceptor.streamTarget != nil {
+ interceptor.tailer.Stop()
+ interceptor.tailer.Cleanup()
+ <-interceptor.doneTailing
+ interceptor.streamTarget.Sync()
+ }
+
+ return string(output), err
+}
+
+func (interceptor *outputInterceptor) StreamTo(out *os.File) {
+ interceptor.streamTarget = out
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
new file mode 100644
index 000000000..40c790336
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package remote
+
+import (
+ "errors"
+ "os"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &outputInterceptor{}
+}
+
+type outputInterceptor struct {
+ intercepting bool
+}
+
+func (interceptor *outputInterceptor) StartInterceptingOutput() error {
+ if interceptor.intercepting {
+ return errors.New("Already intercepting output!")
+ }
+ interceptor.intercepting = true
+
+ // not working on windows...
+
+ return nil
+}
+
+func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
+ // not working on windows...
+ interceptor.intercepting = false
+
+ return "", nil
+}
+
+func (interceptor *outputInterceptor) StreamTo(*os.File) {}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go
new file mode 100644
index 000000000..e6b4e9f32
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go
@@ -0,0 +1,13 @@
+package remote_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "testing"
+)
+
+func TestRemote(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Remote Spec Forwarding Suite")
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
new file mode 100644
index 000000000..367c54daf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
@@ -0,0 +1,224 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package remote
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "sync"
+
+ "github.com/onsi/ginkgo/internal/spec_iterator"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/types"
+)
+
+/*
+Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type Server struct {
+ listener net.Listener
+ reporters []reporters.Reporter
+ alives []func() bool
+ lock *sync.Mutex
+ beforeSuiteData types.RemoteBeforeSuiteData
+ parallelTotal int
+ counter int
+}
+
+//Create a new server, automatically selecting a port
+func NewServer(parallelTotal int) (*Server, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &Server{
+ listener: listener,
+ lock: &sync.Mutex{},
+ alives: make([]func() bool, parallelTotal),
+ beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
+ parallelTotal: parallelTotal,
+ }, nil
+}
+
+//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *Server) Start() {
+ httpServer := &http.Server{}
+ mux := http.NewServeMux()
+ httpServer.Handler = mux
+
+ //streaming endpoints
+ mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
+ mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
+ mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
+ mux.HandleFunc("/SpecWillRun", server.specWillRun)
+ mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
+ mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
+
+ //synchronization endpoints
+ mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
+ mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
+ mux.HandleFunc("/counter", server.handleCounter)
+ mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
+
+ go httpServer.Serve(server.listener)
+}
+
+//Stop the server
+func (server *Server) Close() {
+ server.listener.Close()
+}
+
+//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *Server) Address() string {
+ return "http://" + server.listener.Addr().String()
+}
+
+//
+// Streaming Endpoints
+//
+
+//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
+func (server *Server) readAll(request *http.Request) []byte {
+ defer request.Body.Close()
+ body, _ := ioutil.ReadAll(request.Body)
+ return body
+}
+
+func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
+ server.reporters = reporters
+}
+
+func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+
+ var data struct {
+ Config config.GinkgoConfigType `json:"config"`
+ Summary *types.SuiteSummary `json:"suite-summary"`
+ }
+
+ json.Unmarshal(body, &data)
+
+ for _, reporter := range server.reporters {
+ reporter.SpecSuiteWillBegin(data.Config, data.Summary)
+ }
+}
+
+func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var setupSummary *types.SetupSummary
+ json.Unmarshal(body, &setupSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.BeforeSuiteDidRun(setupSummary)
+ }
+}
+
+func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var setupSummary *types.SetupSummary
+ json.Unmarshal(body, &setupSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.AfterSuiteDidRun(setupSummary)
+ }
+}
+
+func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var specSummary *types.SpecSummary
+ json.Unmarshal(body, &specSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.SpecWillRun(specSummary)
+ }
+}
+
+func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var specSummary *types.SpecSummary
+ json.Unmarshal(body, &specSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.SpecDidComplete(specSummary)
+ }
+}
+
+func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
+ body := server.readAll(request)
+ var suiteSummary *types.SuiteSummary
+ json.Unmarshal(body, &suiteSummary)
+
+ for _, reporter := range server.reporters {
+ reporter.SpecSuiteDidEnd(suiteSummary)
+ }
+}
+
+//
+// Synchronization Endpoints
+//
+
+func (server *Server) RegisterAlive(node int, alive func() bool) {
+ server.lock.Lock()
+ defer server.lock.Unlock()
+ server.alives[node-1] = alive
+}
+
+func (server *Server) nodeIsAlive(node int) bool {
+ server.lock.Lock()
+ defer server.lock.Unlock()
+ alive := server.alives[node-1]
+ if alive == nil {
+ return true
+ }
+ return alive()
+}
+
+func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ if request.Method == "POST" {
+ dec := json.NewDecoder(request.Body)
+ dec.Decode(&(server.beforeSuiteData))
+ } else {
+ beforeSuiteData := server.beforeSuiteData
+ if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
+ beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
+ }
+ enc := json.NewEncoder(writer)
+ enc.Encode(beforeSuiteData)
+ }
+}
+
+func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
+ afterSuiteData := types.RemoteAfterSuiteData{
+ CanRun: true,
+ }
+ for i := 2; i <= server.parallelTotal; i++ {
+ afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
+ }
+
+ enc := json.NewEncoder(writer)
+ enc.Encode(afterSuiteData)
+}
+
+func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
+ c := spec_iterator.Counter{}
+ server.lock.Lock()
+ c.Index = server.counter
+ server.counter = server.counter + 1
+ server.lock.Unlock()
+
+ json.NewEncoder(writer).Encode(c)
+}
+
+func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
+ writer.Write([]byte(""))
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
new file mode 100644
index 000000000..36bd00355
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
@@ -0,0 +1,269 @@
+package remote_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/ginkgo/internal/remote"
+ . "github.com/onsi/gomega"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/reporters"
+ "github.com/onsi/ginkgo/types"
+
+ "bytes"
+ "encoding/json"
+ "net/http"
+)
+
+var _ = Describe("Server", func() {
+ var (
+ server *Server
+ )
+
+ BeforeEach(func() {
+ var err error
+ server, err = NewServer(3)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ server.Start()
+ })
+
+ AfterEach(func() {
+ server.Close()
+ })
+
+ Describe("Streaming endpoints", func() {
+ var (
+ reporterA, reporterB *reporters.FakeReporter
+ forwardingReporter *ForwardingReporter
+
+ suiteSummary *types.SuiteSummary
+ setupSummary *types.SetupSummary
+ specSummary *types.SpecSummary
+ )
+
+ BeforeEach(func() {
+ reporterA = reporters.NewFakeReporter()
+ reporterB = reporters.NewFakeReporter()
+
+ server.RegisterReporters(reporterA, reporterB)
+
+ forwardingReporter = NewForwardingReporter(config.DefaultReporterConfigType{}, server.Address(), &http.Client{}, &fakeOutputInterceptor{}, nil, "")
+
+ suiteSummary = &types.SuiteSummary{
+ SuiteDescription: "My Test Suite",
+ }
+
+ setupSummary = &types.SetupSummary{
+ State: types.SpecStatePassed,
+ }
+
+ specSummary = &types.SpecSummary{
+ ComponentTexts: []string{"My", "Spec"},
+ State: types.SpecStatePassed,
+ }
+ })
+
+ It("should make its address available", func() {
+ Ω(server.Address()).Should(MatchRegexp(`http://127.0.0.1:\d{2,}`))
+ })
+
+ Describe("/SpecSuiteWillBegin", func() {
+ It("should decode and forward the Ginkgo config and suite summary", func(done Done) {
+ forwardingReporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary)
+ Ω(reporterA.Config).Should(Equal(config.GinkgoConfig))
+ Ω(reporterB.Config).Should(Equal(config.GinkgoConfig))
+ Ω(reporterA.BeginSummary).Should(Equal(suiteSummary))
+ Ω(reporterB.BeginSummary).Should(Equal(suiteSummary))
+ close(done)
+ })
+ })
+
+ Describe("/BeforeSuiteDidRun", func() {
+ It("should decode and forward the setup summary", func() {
+ forwardingReporter.BeforeSuiteDidRun(setupSummary)
+ Ω(reporterA.BeforeSuiteSummary).Should(Equal(setupSummary))
+ Ω(reporterB.BeforeSuiteSummary).Should(Equal(setupSummary))
+ })
+ })
+
+ Describe("/AfterSuiteDidRun", func() {
+ It("should decode and forward the setup summary", func() {
+ forwardingReporter.AfterSuiteDidRun(setupSummary)
+ Ω(reporterA.AfterSuiteSummary).Should(Equal(setupSummary))
+ Ω(reporterB.AfterSuiteSummary).Should(Equal(setupSummary))
+ })
+ })
+
+ Describe("/SpecWillRun", func() {
+ It("should decode and forward the spec summary", func(done Done) {
+ forwardingReporter.SpecWillRun(specSummary)
+ Ω(reporterA.SpecWillRunSummaries[0]).Should(Equal(specSummary))
+ Ω(reporterB.SpecWillRunSummaries[0]).Should(Equal(specSummary))
+ close(done)
+ })
+ })
+
+ Describe("/SpecDidComplete", func() {
+ It("should decode and forward the spec summary", func(done Done) {
+ forwardingReporter.SpecDidComplete(specSummary)
+ Ω(reporterA.SpecSummaries[0]).Should(Equal(specSummary))
+ Ω(reporterB.SpecSummaries[0]).Should(Equal(specSummary))
+ close(done)
+ })
+ })
+
+ Describe("/SpecSuiteDidEnd", func() {
+ It("should decode and forward the suite summary", func(done Done) {
+ forwardingReporter.SpecSuiteDidEnd(suiteSummary)
+ Ω(reporterA.EndSummary).Should(Equal(suiteSummary))
+ Ω(reporterB.EndSummary).Should(Equal(suiteSummary))
+ close(done)
+ })
+ })
+ })
+
+ Describe("Synchronization endpoints", func() {
+ Describe("GETting and POSTing BeforeSuiteState", func() {
+ getBeforeSuite := func() types.RemoteBeforeSuiteData {
+ resp, err := http.Get(server.Address() + "/BeforeSuiteState")
+ Ω(err).ShouldNot(HaveOccurred())
+ Ω(resp.StatusCode).Should(Equal(http.StatusOK))
+
+ r := types.RemoteBeforeSuiteData{}
+ decoder := json.NewDecoder(resp.Body)
+ err = decoder.Decode(&r)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ return r
+ }
+
+ postBeforeSuite := func(r types.RemoteBeforeSuiteData) {
+ resp, err := http.Post(server.Address()+"/BeforeSuiteState", "application/json", bytes.NewReader(r.ToJSON()))
+ Ω(err).ShouldNot(HaveOccurred())
+ Ω(resp.StatusCode).Should(Equal(http.StatusOK))
+ }
+
+ Context("when the first node's Alive has not been registered yet", func() {
+ It("should return pending", func() {
+ state := getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
+
+ state = getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
+ })
+ })
+
+ Context("when the first node is Alive but has not responded yet", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(1, func() bool {
+ return true
+ })
+ })
+
+ It("should return pending", func() {
+ state := getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
+
+ state = getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
+ })
+ })
+
+ Context("when the first node has responded", func() {
+ var state types.RemoteBeforeSuiteData
+ BeforeEach(func() {
+ server.RegisterAlive(1, func() bool {
+ return false
+ })
+
+ state = types.RemoteBeforeSuiteData{
+ Data: []byte("my data"),
+ State: types.RemoteBeforeSuiteStatePassed,
+ }
+ postBeforeSuite(state)
+ })
+
+ It("should return the passed in state", func() {
+ returnedState := getBeforeSuite()
+ Ω(returnedState).Should(Equal(state))
+ })
+ })
+
+ Context("when the first node is no longer Alive and has not responded yet", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(1, func() bool {
+ return false
+ })
+ })
+
+ It("should return disappeared", func() {
+ state := getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared}))
+
+ state = getBeforeSuite()
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared}))
+ })
+ })
+ })
+
+ Describe("GETting RemoteAfterSuiteData", func() {
+ getRemoteAfterSuiteData := func() bool {
+ resp, err := http.Get(server.Address() + "/RemoteAfterSuiteData")
+ Ω(err).ShouldNot(HaveOccurred())
+ Ω(resp.StatusCode).Should(Equal(http.StatusOK))
+
+ a := types.RemoteAfterSuiteData{}
+ decoder := json.NewDecoder(resp.Body)
+ err = decoder.Decode(&a)
+ Ω(err).ShouldNot(HaveOccurred())
+
+ return a.CanRun
+ }
+
+ Context("when there are unregistered nodes", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(2, func() bool {
+ return false
+ })
+ })
+
+ It("should return false", func() {
+ Ω(getRemoteAfterSuiteData()).Should(BeFalse())
+ })
+ })
+
+ Context("when all none-node-1 nodes are still running", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(2, func() bool {
+ return true
+ })
+
+ server.RegisterAlive(3, func() bool {
+ return false
+ })
+ })
+
+ It("should return false", func() {
+ Ω(getRemoteAfterSuiteData()).Should(BeFalse())
+ })
+ })
+
+ Context("when all none-1 nodes are done", func() {
+ BeforeEach(func() {
+ server.RegisterAlive(2, func() bool {
+ return false
+ })
+
+ server.RegisterAlive(3, func() bool {
+ return false
+ })
+ })
+
+ It("should return true", func() {
+ Ω(getRemoteAfterSuiteData()).Should(BeTrue())
+ })
+
+ })
+ })
+ })
+})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
new file mode 100644
index 000000000..9550d37b3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
@@ -0,0 +1,11 @@
+// +build linux,arm64
+
+package remote
+
+import "syscall"
+
+// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
+// use the nearly identical syscall.Dup3 instead
+func syscallDup(oldfd int, newfd int) (err error) {
+ return syscall.Dup3(oldfd, newfd, 0)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
new file mode 100644
index 000000000..75ef7fb78
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
@@ -0,0 +1,9 @@
+// +build solaris
+
+package remote
+
+import "golang.org/x/sys/unix"
+
+func syscallDup(oldfd int, newfd int) (err error) {
+ return unix.Dup2(oldfd, newfd)
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
new file mode 100644
index 000000000..ef6255960
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
@@ -0,0 +1,11 @@
+// +build !linux !arm64
+// +build !windows
+// +build !solaris
+
+package remote
+
+import "syscall"
+
+func syscallDup(oldfd int, newfd int) (err error) {
+ return syscall.Dup2(oldfd, newfd)
+}