Comparing version 4.1.1 to 4.2.0
@@ -7,2 +7,10 @@ # Changelog | ||
## [4.1.1](https://github.com/doesdev/mvt/compare/4.1.1...4.2.0) | ||
#### 2021-07-10 | ||
### Changed | ||
- Fix #10, caused by circular require | ||
- Update Travis to use node 16, 14, 12 | ||
- Update ava | ||
## [4.1.1](https://github.com/doesdev/mvt/compare/4.1.0...4.1.1) | ||
@@ -9,0 +17,0 @@ #### 2020-07-16 |
@@ -0,0 +0,0 @@ #! /usr/bin/env node |
@@ -6,84 +6,87 @@ 'use strict' | ||
const { handleError } = require('./errors') | ||
const { failing } = require('./tests') | ||
const { reporter, info } = require('./reporters') | ||
const { char, color, fmtMs } = require('./utility') | ||
const runner = async (t, noExit) => { | ||
const { msg, fn, failing, benchOpts, fileName: currFile } = t | ||
const getRunner = (tests) => { | ||
const runner = async (t, noExit) => { | ||
const { msg, fn, failing, benchOpts, fileName: currFile } = t | ||
if (currFile) { | ||
if (currFile !== state.lastFileName) { | ||
state.fileTestCount = 0 | ||
info(`\nRunning tests for ${currFile}\n\n`) | ||
if (currFile) { | ||
if (currFile !== state.lastFileName) { | ||
state.fileTestCount = 0 | ||
info(`\nRunning tests for ${currFile}\n\n`) | ||
} | ||
state.lastFileName = currFile | ||
} | ||
state.lastFileName = currFile | ||
} | ||
if (benchOpts) return benchRunner(t) | ||
if (benchOpts) return benchRunner(t) | ||
const start = Date.now() | ||
const start = Date.now() | ||
try { | ||
await fn(assert(msg, failing)) | ||
} catch (error) { | ||
if (!failing) return handleError(msg, error, noExit) | ||
try { | ||
await fn(assert(msg, failing)) | ||
} catch (error) { | ||
if (!failing) return handleError(msg, error, noExit) | ||
const ms = ` (${fmtMs(Date.now() - start)})` | ||
const out = `${char('okFail')} ${color.red}${msg}${ms}` | ||
return reporter({ msg, out, error, pass: false, mod: 'failing' }) | ||
} | ||
const ms = ` (${fmtMs(Date.now() - start)})` | ||
const out = `${char('okFail')} ${color.red}${msg}${ms}` | ||
return reporter({ msg, out, error, pass: false, mod: 'failing' }) | ||
} | ||
const ms = Date.now() - start | ||
const ms = Date.now() - start | ||
if (failing) { | ||
return handleError(msg, new Error('Passed test called with test.failing')) | ||
} | ||
if (failing) { | ||
return handleError(msg, new Error('Passed test called with test.failing')) | ||
if (!msg) return | ||
const out = `${char('good')} ${msg} (${fmtMs(ms)})` | ||
return reporter({ msg, out, pass: true }) | ||
} | ||
if (!msg) return | ||
const benchRunner = async ({ msg, fn, benchOpts }) => { | ||
const { samples, max } = benchOpts | ||
const start = process.hrtime() | ||
let msAvg | ||
const out = `${char('good')} ${msg} (${fmtMs(ms)})` | ||
return reporter({ msg, out, pass: true }) | ||
} | ||
try { | ||
if (benchOpts.parallel) { | ||
const wrapped = async () => { | ||
const iStart = process.hrtime() | ||
await fn(assert(msg, tests.failing)) | ||
return process.hrtime(iStart) | ||
} | ||
const times = await Promise.all([...Array(samples)].map(wrapped)) | ||
msAvg = parseInt(times.reduce((accum, curval) => { | ||
return accum + (curval[0] * 1e3 + curval[1] / 1e6) | ||
}, 0) / samples, 10) | ||
} else { | ||
for (let i = 0; i < samples; i++) await fn(assert(msg, tests.failing)) | ||
} | ||
} catch (ex) { | ||
return handleError(msg, ex) | ||
} | ||
const benchRunner = async ({ msg, fn, benchOpts }) => { | ||
const { samples, max } = benchOpts | ||
const start = process.hrtime() | ||
let msAvg | ||
const ranFor = process.hrtime(start) | ||
const msTotal = ranFor[0] * 1e3 + ranFor[1] / 1e6 | ||
msAvg = msAvg || parseInt(msTotal / samples, 10) | ||
try { | ||
if (benchOpts.parallel) { | ||
const wrapped = async () => { | ||
const iStart = process.hrtime() | ||
await fn(assert(msg, failing)) | ||
return process.hrtime(iStart) | ||
} | ||
const times = await Promise.all([...Array(samples)].map(wrapped)) | ||
msAvg = parseInt(times.reduce((accum, curval) => { | ||
return accum + (curval[0] * 1e3 + curval[1] / 1e6) | ||
}, 0) / samples, 10) | ||
} else { | ||
for (let i = 0; i < samples; i++) await fn(assert(msg, failing)) | ||
if (typeof benchOpts.cb === 'function') { | ||
benchOpts.cb({ msTotal, msAvg }) | ||
} | ||
} catch (ex) { | ||
return handleError(msg, ex) | ||
} | ||
const ranFor = process.hrtime(start) | ||
const msTotal = ranFor[0] * 1e3 + ranFor[1] / 1e6 | ||
msAvg = msAvg || parseInt(msTotal / samples, 10) | ||
if (msAvg > max) { | ||
const maxErr = new Error(`Bench failed: (${fmtMs(msAvg)} > ${fmtMs(max)})`) | ||
return handleError(msg, maxErr) | ||
} | ||
if (typeof benchOpts.cb === 'function') { | ||
benchOpts.cb({ msTotal, msAvg }) | ||
const out = `${char('good')} ${msg} (${fmtMs(msAvg)} avg)` | ||
return reporter({ msg, out, pass: true }) | ||
} | ||
if (msAvg > max) { | ||
const maxErr = new Error(`Bench failed: (${fmtMs(msAvg)} > ${fmtMs(max)})`) | ||
return handleError(msg, maxErr) | ||
} | ||
const out = `${char('good')} ${msg} (${fmtMs(msAvg)} avg)` | ||
return reporter({ msg, out, pass: true }) | ||
return { runner, benchRunner } | ||
} | ||
module.exports = { runner, benchRunner } | ||
module.exports = { getRunner } |
@@ -5,3 +5,3 @@ 'use strict' | ||
const { finalizeError } = require('./errors') | ||
const { runner } = require('./runners') | ||
const { getRunner } = require('./runners') | ||
const { assert } = require('./assertions') | ||
@@ -14,2 +14,3 @@ const { reporter, summary } = require('./reporters') | ||
const { runner } = getRunner(test) | ||
const curLen = state.queue.length | ||
@@ -16,0 +17,0 @@ process.nextTick(async () => { |
{ | ||
"name": "mvt", | ||
"version": "4.1.1", | ||
"version": "4.2.0", | ||
"description": "Minimum Viable Testing framework", | ||
@@ -42,4 +42,4 @@ "engines": { | ||
"devDependencies": { | ||
"ava": "^3.10.1" | ||
"ava": "^3.15.0" | ||
} | ||
} |
29566
582