Comparing version 1.0.0-RC.2 to 1.0.0-RC.3
17
index.js
@@ -11,9 +11,14 @@ #!/usr/bin/env node | ||
debug: [ 'd', 'debug' ], | ||
failBadCount: [ 'fail-bad-count' ], | ||
} | ||
const options = { | ||
const defaultOptions = { | ||
color: true, | ||
help: false, | ||
pessimistic: false, | ||
failBadCount: false, | ||
verbose: false, | ||
debug: false, | ||
} | ||
const options = { | ||
...defaultOptions, | ||
...minimist(process.argv.slice(2), { alias }), | ||
@@ -31,3 +36,11 @@ } | ||
const { results } = parser._writable | ||
if (!results.ok) process.exit(1) | ||
if (!results.ok) { | ||
if ( | ||
results.badCount && | ||
results.failures.length === 0 && | ||
!options.failBadCount | ||
) process.exit(0) | ||
else process.exit(1) | ||
} | ||
if ( | ||
@@ -34,0 +47,0 @@ results.count === 0 && |
@@ -5,3 +5,3 @@ { | ||
"author": "tbeseda", | ||
"version": "1.0.0-RC.2", | ||
"version": "1.0.0-RC.3", | ||
"license": "Apache-2.0", | ||
@@ -46,10 +46,10 @@ "type": "module", | ||
"tap-arc.upstream-error": "npm run --silent tape.upstream-error | node index.js", | ||
"tape.diff": "tape test/create-diff-tap.cjs", | ||
"tape.empty": "tape test/create-empty-tap.cjs", | ||
"tape.mixed": "tape test/create-mixed-tap.cjs", | ||
"tape.passing": "tape test/create-passing-tap.cjs", | ||
"tape.simple": "tape test/create-simple-tap.cjs", | ||
"tape.throws": "tape test/create-throws-tap.cjs", | ||
"tape.upstream-error": "tape test/create-upstream-error-tap.cjs", | ||
"tape": "tape test/index.js | tap-min", | ||
"tape.diff": "tape test/mock/create-diff-tap.cjs", | ||
"tape.empty": "tape test/mock/create-empty-tap.cjs", | ||
"tape.mixed": "tape test/mock/create-mixed-tap.cjs", | ||
"tape.passing": "tape test/mock/create-passing-tap.cjs", | ||
"tape.simple": "tape test/mock/create-simple-tap.cjs", | ||
"tape.throws": "tape test/mock/create-throws-tap.cjs", | ||
"tape.upstream-error": "tape test/mock/create-upstream-error-tap.cjs", | ||
"tape": "tape test/**/*-test.js | tap-min", | ||
"test": "npm run lint && npm run tape" | ||
@@ -56,0 +56,0 @@ }, |
@@ -1,4 +0,14 @@ | ||
# `tap-arc` | ||
<h1 align="center"><code>tap-arc</code> ๐</h1> | ||
> A small (~25kB) [TAP](https://testanything.org/) reporter with spec-like output, streaming, and failure diffing. | ||
<p align="center"> | ||
A small <a href="https://testanything.org/">TAP</a> reporter with spec-like output, streaming, and failure diffing.<br> | ||
<a href="https://www.npmjs.com/package/tap-arc"><strong><code>tap-arc</code> on npmjs.org ยป</strong></a><br> | ||
<br> | ||
Contents: | ||
<a href="#Installation-and-usage">Install</a> | ||
โข | ||
<a href="#Development">Development</a> | ||
โข | ||
<a href="#FAQ">FAQ</a> | ||
</p> | ||
@@ -14,5 +24,5 @@ ## Objectives | ||
## Installation & Usage | ||
## Installation and Usage | ||
> Compatible with Node.js 16+. | ||
Compatible with Node.js v16+ -- v14 also works but is ***not*** recommended. | ||
@@ -35,3 +45,3 @@ Save `tap-arc` as a development dependency: | ||
> ๐ `tap-arc` will format output from any tap reporter. [`tape`](https://github.com/substack/tape) was used for testing. | ||
๐ `tap-arc` will format output from any tap reporter. [`tape`](https://github.com/ljharb/tape) is our favorite and was used for testing. | ||
@@ -49,3 +59,3 @@ ### `tap-arc --help` | ||
-v | --verbose | ||
Output full stack trace | ||
Output full stack trace, TAP version, and plan | ||
@@ -69,6 +79,9 @@ -p | --pessimistic | --bail | ||
### Tip! | ||
### Dev Tips | ||
Use | ||
1. `./test/smoke.js` contains the bare minimum usage of `tap-parser` with `process.stdin`. | ||
Helpful for understanding `tap-parser`'s behavior. | ||
2. To see previous exit code, run: | ||
```sh | ||
@@ -78,14 +91,48 @@ echo $? | ||
to see previous exit code. | ||
### Testing | ||
### Tests | ||
Primarily, `tap-arc` is tested to output the correct exit code based on your test suite's TAP output. | ||
`tap-arc` is tested to output the correct exit code based on your test suite's TAP output. In the process, the boundaries of tap-arc's process are also tested by creating and parsing several types of TAP output. | ||
Testing could be improved by unit testing the printer and diff maker. | ||
## FAQ | ||
<details open> | ||
<summary>"Expected <code>n</code> assertions, but found <code>< n</code>"</summary> | ||
_What happened?_ | ||
โ The TAP parser found zero failing tests | ||
โ The final tally from the raw TAP shows `n` of `n` passed | ||
๐คจ But the TAP plan called for more assertions than were found, counted, and parsed. | ||
๐โโ๏ธ Currently, when this case is detected, `tap-arc` will exit with a successful status code. | ||
This can be overridden with the `--fail-bad-count` flag. | ||
_Why, though_? | ||
This has been observed specifically on Windows, where the TAP output is buffered to another stream and not piped to `tap-arc`. | ||
Libraries like `mock-fs` tinker with stdout and subsequent TAP output is lost. Try closing those helpers before making an assertion that generates TAP. | ||
</details> | ||
<details> | ||
<summary>"0 tests found" fails the suite?</summary> | ||
Yes. At least one passing test is required to pass the suite. | ||
This helps ensures there wasn't a silent, catastrophic failure in the test suite. | ||
</details> | ||
<details> | ||
<summary>Why does <code>tap-arc</code> get to decide these things?</summary> | ||
`tap-arc` is responsible for the test suite's exit code. If your entire CI stack is piped to a reporter, it's an important job. So `tap-arc` is a bit skeptical by default to help ensure your suite is passing. | ||
If you'd like to see different behavior from `tap-arc`, please open an issue or PR. We'd love to hear your use case. | ||
</details> | ||
## Credit & Inspiration | ||
- [tap-spec](https://github.com/scottcorgan/tap-spec) ol' reliable, but a bit stale and npm vulnerabilities | ||
- [tap-spec](https://github.com/scottcorgan/tap-spec) ol' reliable, but a bit stale and vulnerable | ||
- [tap-difflet](https://github.com/namuol/tap-difflet) inspired output and diffing, also vulnerable | ||
- [tap-min](https://github.com/derhuerst/tap-min) helpful approaches to streaming and exit codes | ||
- [tap-min](https://github.com/derhuerst/tap-min) helpful approaches to streaming and exit codes, used to report `tap-arc`'s TAP |
@@ -10,3 +10,3 @@ export default ` | ||
-v | --verbose | ||
Output full stack trace | ||
Output full stack trace, TAP version, and plan | ||
@@ -13,0 +13,0 @@ -p | --pessimistic | --bail |
@@ -12,3 +12,3 @@ import { Chalk } from 'chalk' | ||
export default function (options, output) { | ||
export default function (options) { | ||
const { color, debug, verbose } = options | ||
@@ -39,24 +39,12 @@ const d = debug || verbose | ||
return { | ||
end (start) { | ||
output.end(`${dim(prettyMs(start))}\n`) | ||
}, | ||
print (str, p = 0, n = 1) { | ||
output.write(`${pad(p)}${str}${'\n'.repeat(n)}`) | ||
}, | ||
pass (test) { | ||
const { id, name } = test | ||
pass ({ id, name }) { | ||
return `${passMark}${d ? ` [${id}]` : ''} ${dim(name)}` | ||
}, | ||
fail (test) { | ||
const { id, name, tapError } = test | ||
return tapError | ||
? `${failMark} ${red(tapError)}` | ||
: `${failMark} [${id}] ${red(name)}` | ||
fail ({ id, name }) { | ||
return `${failMark} [${id}] ${red(name)}` | ||
}, | ||
skip (test) { | ||
const { id, name } = test | ||
skip ({ id, name }) { | ||
return cyan(`${skipMark}${d ? ` [${id}]` : ''} ${name}`) | ||
}, | ||
todo (test) { | ||
const { id, name, ok: pass } = test | ||
todo ({ id, name, ok: pass }) { | ||
const method = pass ? dim : red | ||
@@ -67,5 +55,6 @@ return method(`${skipMark}${d ? ` [${id}]` : ''} ${name}`) | ||
pad, | ||
prettyMs, | ||
actual, | ||
bad: red, | ||
bail: bold.underline.red, | ||
realBad: bold.underline.red, | ||
dim, | ||
@@ -72,0 +61,0 @@ expected, |
@@ -15,6 +15,9 @@ import { PassThrough } from 'stream' | ||
const stream = duplexer(parser, output) | ||
const _ = createPrinter(options) | ||
const { diffOptions, prettyMs, pad } = _ | ||
const makeDiff = createMakeDiff(diffOptions) | ||
const _ = createPrinter(options, output) | ||
const { print: P } = _ | ||
const makeDiff = createMakeDiff(_.diffOptions) | ||
function P (str, p = 0, n = 1) { | ||
output.write(`${pad(p)}${str}${'\n'.repeat(n)}`) | ||
} | ||
@@ -171,14 +174,30 @@ const cwd = process.cwd() | ||
if (!result.ok) { | ||
let failureSummary = '\n' | ||
failureSummary += _.bad('Failed tests:') | ||
failureSummary += ` There ${result.fail > 1 ? 'were' : 'was'} ` | ||
failureSummary += _.bad(result.fail) | ||
failureSummary += ` failure${result.fail > 1 ? 's' : ''}\n` | ||
if ( | ||
result.failures[0] && | ||
result.failures[0].tapError && | ||
result.failures[0].tapError.startsWith('incorrect number of tests') | ||
) { | ||
// custom failure was created by tap-parser | ||
result.badCount = true // persisted to CLI process handler | ||
result.failures.shift() | ||
result.fail-- | ||
P(_.realBad(`\nExpected ${result.plan.end || '?'} assertions, parsed ${result.count || '?'}`)) | ||
} | ||
P(failureSummary) | ||
if (result.failures.length > 0) { | ||
const singular = result.fail === 1 | ||
let failureSummary = '\n' | ||
failureSummary += _.bad('Failed tests:') | ||
failureSummary += ` There ${singular ? 'was' : 'were'} ` | ||
failureSummary += _.bad(result.fail) | ||
failureSummary += ` failure${singular ? '' : 's'}\n` | ||
for (const test of result.failures) P(_.fail(test), 2) | ||
P(failureSummary) | ||
for (const test of result.failures) P(_.fail(test), 2) | ||
} | ||
} | ||
P(`\ntotal: ${result.count}`) | ||
if (result.bailout) P(_.realBad('BAILED!')) | ||
if (result.pass > 0) P(_.good(`passing: ${result.pass}`)) | ||
@@ -188,3 +207,2 @@ if (result.fail > 0) P(_.bad(`failing: ${result.fail}`)) | ||
if (result.todo > 0) P(`todo: ${result.todo}`) | ||
if (result.bailout) P(_.bail('BAILED!')) | ||
@@ -198,3 +216,3 @@ if (debug) { | ||
_.end(start) | ||
output.end(`${_.dim(prettyMs(start))}\n`) | ||
}) | ||
@@ -201,0 +219,0 @@ |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
29062
391
134