grunt-parallel-spec-runner
Advanced tools
Comparing version 0.3.13 to 0.3.14
{ | ||
"name": "grunt-parallel-spec-runner", | ||
"version": "0.3.13", | ||
"version": "0.3.14", | ||
"description": "Plugin used to configure and launch multiple spec ", | ||
@@ -5,0 +5,0 @@ "homepage": "https://github.com/RallySoftware/grunt-parallel-spec-runner.git", |
# grunt-parallel-spec-runner | ||
> Plugin used to configure and launch multiple spec runners in parallel. | ||
> This task is currently experimental and may produce unexpected results, any questions email rmcqueen@rallydev.com | ||
## Getting Started | ||
@@ -78,2 +76,9 @@ Install the module with: `npm install grunt-parallel-spec-runner` | ||
#### options.burnin | ||
Type: `String` | ||
Specify the --burnin options to run a single spec file 100 times in parallel, the results of each run will be output to a table this can be useful for finding flaky test | ||
NOTE: you must specify the full relative path to the spec file | ||
e.g. grunt test:faster:firefox --burnin=rui/test/javascripts/rui/ui/notify/NotificationContainerSpec.js | ||
### Output | ||
@@ -80,0 +85,0 @@ |
@@ -20,4 +20,7 @@ /* | ||
activeRunners = 0, | ||
runnersNeeded = 0, | ||
runnersCompleted = 0, | ||
specRunnerQueue = [], | ||
retries = 0, | ||
retriesRequired = false, | ||
specRunnersCompleted = false; | ||
@@ -34,3 +37,5 @@ | ||
message += grunt.util.linefeed; | ||
grunt.log.error(message); | ||
grunt.log.write(message); | ||
specRuner.summary.retry = true; | ||
retriesRequired = true; | ||
} | ||
@@ -65,2 +70,3 @@ }; | ||
specRunnerQueue = specRunners.slice(0); | ||
runnersNeeded = specRunnerQueue.length; | ||
}; | ||
@@ -79,3 +85,5 @@ | ||
message: 'Inconclusive'.yellow, | ||
failedSpecs: [] | ||
failedSpecs: [], | ||
retry: false, | ||
retries: 0 | ||
} | ||
@@ -123,7 +131,23 @@ }; | ||
} | ||
if(runnersCompleted >= runnersNeeded) | ||
{ | ||
var retriesNeeded = specRunners.filter(function (sr) { | ||
return sr.summary.retry; | ||
}); | ||
if (retries < 2 && retriesNeeded.length > 0 && specRunnersCompleted == false && !options.burnin) { | ||
runnersCompleted = 0; | ||
specRunnerQueue = retriesNeeded.slice(0); | ||
runnersNeeded = specRunnerQueue.length; | ||
retries++; | ||
retriesRequired = false; | ||
} | ||
else { | ||
specRunnersCompleted = true; | ||
} | ||
} | ||
if(specRunnersCompleted){ | ||
if(specRunnersCompleted) { | ||
clearInterval(i); | ||
} | ||
}, 2000); | ||
}, 4000); | ||
}; | ||
@@ -141,3 +165,3 @@ | ||
runnersCompleted++; | ||
if(runnersCompleted >= specRunners.length){ | ||
if(runnersCompleted >= runnersNeeded && !retriesRequired){ | ||
specRunnersCompleted = true; | ||
@@ -155,3 +179,7 @@ } | ||
opts.args.push('--symbolSummaryTimeout=' + options.symbolSummaryTimeout); | ||
if(specRunner.summary.retry){ | ||
specRunner.summary.retry = false; | ||
specRunner.summary.retries++; | ||
grunt.log.writeln(specRunner.file + ': Attempting retry ' + (specRunner.summary.retries) + ' of 2'); | ||
} | ||
var s = grunt.util.spawn(opts, df); | ||
@@ -185,3 +213,5 @@ | ||
specRunner.summary.message = msg.replace(':', '').red; | ||
specRunner.summary.retry = true; | ||
specRunner.summary.stop = new Date(); | ||
retriesRequired = true; | ||
} | ||
@@ -204,3 +234,5 @@ | ||
specRunner.summary.wins = +msg.match(/\d+/g)[0]; | ||
specRunner.summary.losses = 0; | ||
specRunner.summary.message = msg.replace('!', ''); | ||
specRunner.summary.failedSpecs = []; | ||
specRunner.summary.stop = new Date(); | ||
@@ -220,3 +252,3 @@ } | ||
msg = msg.match(pat)[0]; | ||
specRunner.summary.time = Math.ceil(+msg.match(/\d+\.?\d+/g)[0]); | ||
specRunner.summary.time += Math.ceil(+msg.match(/\d+\.?\d+/g)[0]); | ||
} | ||
@@ -263,6 +295,7 @@ | ||
totalSeconds = 0, | ||
totalRetries = 0, | ||
totalResult = '?'; | ||
grunt.log.writeln(ln); | ||
grunt.log.writeln('SpecRunner | Files | Tests | Seconds | Result'); | ||
grunt.log.writeln('SpecRunner | Files | Tests | Seconds | Retries | Result'); | ||
grunt.log.writeln(ln); | ||
@@ -285,5 +318,7 @@ | ||
totalTests = totalTests + specRunner.summary.tests; | ||
totalRetries = totalRetries + specRunner.summary.retries; | ||
totalSeconds = totalSeconds + specRunner.summary.time; | ||
if(totalLosses > 0){ | ||
this.hasFailures = true; | ||
totalResult = (totalLosses + ' of ' + totalWins + ' tests failed').red; | ||
@@ -299,2 +334,3 @@ } | ||
+ ' | ' + (' ' + specRunner.summary.time).slice(-6) + ' ' | ||
+ ' | ' + (' ' + specRunner.summary.retries).slice(-6) + ' ' | ||
+ ' | ' + specRunner.summary.message + ' '; | ||
@@ -305,2 +341,3 @@ grunt.log.writeln(row); | ||
this.Inconclusive = true; | ||
totalResult = 'Inconclusive'.yellow | ||
} | ||
@@ -330,2 +367,3 @@ | ||
+ ' | ' + (' ' + totalSeconds).slice(-6) + ' ' | ||
+ ' | ' + (' ' + totalRetries).slice(-6) + ' ' | ||
+ ' | ' + totalResult; | ||
@@ -418,4 +456,2 @@ | ||
//Use the user defined number of spec files per runner, or split the spec files among the available runners | ||
@@ -441,2 +477,5 @@ options.maxSpecFilesPerRunner = options.maxSpecFilesPerRunner || (+grunt.option('maxSpecFilesPerRunner') > 0 ? +grunt.option('maxSpecFilesPerRunner') | ||
} | ||
if(specRunner.hasFailures){ | ||
grunt.log.error('Task failed due to failed test, please try again.'.red); | ||
} | ||
var endTime = new Date(); | ||
@@ -443,0 +482,0 @@ grunt.log.writeln("Total Execution Time:" + Math.ceil((endTime - startTime) / 1000 / 60).toFixed(2) + ' Minutes'); |
218624
520
112