New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

mvt

Package Overview
Dependencies
Maintainers
1
Versions
26
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

mvt - npm Package Compare versions

Comparing version 3.0.0 to 4.0.0-beta.0

cli-char-supported.js

351

index.js
'use strict'
const readline = require('readline')
const { deepStrictEqual } = require('assert').strict
const checkChar = require('./cli-char-supported')
const colorReset = `\u001b[0m`
const colorGreen = `\u001b[32m`
const verbose = process.argv.some((a) => a === '--verbose')
const colorRed = `\u001b[31m`
const colorBlue = `\u001b[34m`
const colorYellow = `\u001b[33m`
const queue = []
let startime
const start = (msg) => {
if (msg) process.stdout.write(`${msg}\n`)
startime = Date.now()
let verbose = process.argv.some((a) => a === '--verbose' || a === '-v')
let charsChecked = false
const chars = {
good: { emoji: '✅', plain: `${colorGreen}[√]` },
fail: { emoji: '❌', plain: `${colorRed}[X]` },
skip: { emoji: '⛔', plain: `${colorYellow}[-]` },
todo: { emoji: '⏰', plain: `${colorBlue}[!]` },
okFail: { emoji: '❎', plain: `${colorGreen}[X]` }
}
const finish = () => {
const sec = `${((Date.now() - startime) / 1000).toFixed(2)} seconds`
const plural = run > 1 ? 'tests' : 'test'
readline.clearLine(process.stdout, 0)
readline.cursorTo(process.stdout, 0)
const out = `${colorGreen}All ${run} ${plural} passed${colorReset} in ${sec}\n`
process.stdout.write(out)
process.exit(0)
const char = (n) => {
return `${chars[n].useEmoji ? chars[n].emoji : chars[n].plain}${colorReset}`
}
let run = 0
const fail = (err, meta) => {
process.stdout.write(`${colorReset}\n`)
console.error(err instanceof Error ? err : new Error(`Fail: ${err}`))
const setup = (opts = {}) => before(async () => {
verbose = opts.verbose !== undefined ? !!opts.verbose : verbose
if (meta) {
console.error('\n')
if (charsChecked) return
if (meta instanceof Error) {
console.error(meta)
} else {
Object.entries(meta).forEach(([k, v]) => {
console.error(`${k}:`)
console.error(`${`${v}`.trimEnd()}\n`)
})
}
for (let c of Object.values(chars)) {
c.useEmoji = await checkChar(c.emoji, 2)
}
return process.exit(1)
charsChecked = true
})
const handlErr = (msg, err, noExit) => {
process.stderr.write(`${char('fail')} ${msg}\n`)
err = err || `Failed: ${msg}`
console.error(err instanceof Error ? err : new Error(err))
return noExit ? null : process.exit(1)
}
const runTests = async (msg, cb, meta) => {
if (typeof msg === 'function') {
meta = cb
cb = msg
msg = undefined
const runner = async (t, noExit) => {
const { msg, fn, failing, benchOpts } = t
if (benchOpts) return benchRunner(t)
const start = Date.now()
try {
await fn(assert(msg, failing))
} catch (ex) {
if (!failing) return handlErr(msg, ex, noExit)
if (!verbose) return
const toPrint = `${char('okFail')} ${colorRed}${msg}${colorReset}\n`
return process.stdout.write(toPrint)
}
start(msg)
const ms = Date.now() - start
if (failing) {
return handlErr(msg, new Error('Passed test called with test.failing'))
}
if (!msg || !verbose) return
process.stdout.write(`${char('good')} ${msg} (${ms}ms)\n`)
}
const benchRunner = async ({ msg, fn, benchOpts }) => {
const { samples, max } = benchOpts
const start = Date.now()
try {
await cb()
for (let i = 0; i < samples; i++) await fn(assert(msg, failing))
} catch (ex) {
return fail(ex, meta)
return handlErr(msg, ex)
}
finish()
const ms = (Date.now() - start) / samples
if (ms > max) {
return handlErr(msg, new Error(`Bench failed: (${ms}ms > ${max}ms)`))
}
if (!verbose) return
process.stdout.write(`${char('good')} ${msg} (${ms}ms)\n`)
}
const test = (msg, isTruthyOrCompA, compB, meta) => {
run++
const test = async (msg, fn) => {
if (msg && fn) queue.push({ msg, fn })
if (typeof isTruthyOrCompA === 'function') {
const curLen = queue.length
process.nextTick(async () => {
if (curLen !== queue.length) return
let first = []
let last = []
let only = []
let normal = []
let countTodo = 0
let countSkipped = 0
let countFailing = 0
for (let t of queue) {
if (t.todo) countTodo += 1
if (t.skipped) countSkipped += 1
if (t.failing) countFailing += 1
if (t.first) first.push(t)
else if (t.last) last.push(t)
else if (t.only) only.push(t)
else normal.push(t)
}
const countOnly = only.length
const countRan = (countOnly + normal.length)
const plural = (c, s) => c > 1 ? `${s || 'test'}s` : `${s || 'test'}`
const start = Date.now()
for (let t of first) await runner(t)
try {
isTruthyOrCompA = isTruthyOrCompA()
if (countOnly) {
for (let t of only) await runner(t, true)
} else {
for (let t of normal) await runner(t, true)
}
} catch (ex) {
return fail(msg, ex)
for (let t of last) await runner(t)
return process.exit(1)
}
for (let t of last) await runner(t)
const ms = Date.now() - start
const result = `${colorGreen}All tests passed in ${ms}ms${colorReset}`
process.stdout.write(`\n${char('good')} ${result}\n`)
process.stdout.write(`${colorReset}${countRan} ${plural(countRan)} declared\n`)
if (countOnly) {
const result = `${countOnly} ${plural(countOnly)} run with test.only`
process.stdout.write(`${colorBlue}${result}${colorReset}\n`)
} else {
if (countTodo) {
const result = `${countTodo} ${plural(countTodo)} marked as TODO`
process.stdout.write(`${colorBlue}${result}${colorReset}\n`)
}
if (countSkipped) {
const result = `${countSkipped} ${plural(countSkipped)} skipped`
process.stdout.write(`${colorYellow}${result}${colorReset}\n`)
}
if (countFailing) {
const result = `${countFailing} known ${plural(countFailing, 'failure')}`
process.stdout.write(`${colorRed}${result}${colorReset}\n`)
}
}
})
}
const before = (fn) => {
queue.unshift({ fn, first: true })
test()
return test
}
const after = (fn) => {
queue.push({ fn, last: true })
test()
return test
}
const skip = (msg) => {
const fn = () => {
const toPrint = `${char('skip')} ${colorYellow}${msg}${colorReset}\n`
if (verbose) process.stdout.write(toPrint)
}
queue.push({ fn, skipped: true })
test()
return test
}
if (compB !== undefined) {
if (isTruthyOrCompA !== compB) msg += `\n${isTruthyOrCompA} !== ${compB}`
isTruthyOrCompA = isTruthyOrCompA === compB
const todo = (msg) => {
const fn = () => {
const toPrint = `${char('todo')} ${colorBlue}${msg}${colorReset}\n`
if (verbose) process.stdout.write(toPrint)
}
queue.push({ fn, todo: true })
test()
return test
}
if (!isTruthyOrCompA) return fail(msg, meta)
const only = (msg, fn) => {
queue.push({ msg, fn, only: true })
test()
return test
}
if (verbose) {
process.stdout.write(`${colorGreen}Passed:${colorReset} ${msg}\n`)
} else {
const plural = run > 1 ? 'tests have' : 'test'
readline.clearLine(process.stdout, 0)
readline.cursorTo(process.stdout, 0)
process.stdout.write(`${colorGreen}${run} ${plural} passed${colorReset}`)
const failing = (msg, fn) => {
queue.push({ msg, fn, failing: true })
test()
return test
}
const bench = (msg, benchOpts = {}, fn) => {
if (typeof benchOpts === 'function') {
fn = benchOpts
benchOpts = {}
}
return true
benchOpts.max = benchOpts.max || 100
benchOpts.samples = benchOpts.samples || 10
queue.push({ msg, fn, benchOpts })
test()
return test
}
const testAsync = async (msg, promise, meta) => {
const assert = (msg, f) => ({
is: is(msg, f),
not: not(msg, f),
pass: pass(msg, f),
fail: fail(msg, f),
truthy: truthy(msg, f),
falsy: falsy(msg, f),
deepEqual: deepEqual(msg, f),
throws: throws(msg, f),
notThrows: notThrows(msg, f),
throwsAsync: throwsAsync(msg, f),
notThrowsAsync: notThrowsAsync(msg, f)
})
const toPrint = (s) => typeof s === 'string' ? `'${s}'` : s
const wrap = (msg, passFn, err, failing) => {
let passed = false
try {
test(msg, await promise(), undefined, meta)
return true
passed = passFn()
} catch (ex) {
return fail(msg, ex)
err = ex
}
if (failing && !passed) throw (err instanceof Error ? err : new Error(msg))
return passed ? assert(msg, failing) : handlErr(msg, err)
}
module.exports = { runTests, start, finish, test, testAsync }
const is = (msg, f) => (a, b) => {
return wrap(msg, () => Object.is(a, b), `${toPrint(a)} !== ${toPrint(b)}`, f)
}
const not = (msg, f) => (a, b) => {
return wrap(msg, () => !Object.is(a, b), `${toPrint(a)} === ${toPrint(b)}`, f)
}
const pass = (msg, f) => () => wrap(msg, () => true, null, f)
const fail = (msg, f) => () => {
return wrap(msg, () => false, 'called with assert.fail', f)
}
const truthy = (msg, f) => (a) => {
return wrap(msg, () => !!a, `not truthy: ${toPrint(a)}`, f)
}
const falsy = (msg, f) => (a) => {
return wrap(msg, () => !a, `not falsy: ${toPrint(a)}`, f)
}
const deepEqual = (msg, f) => (a, b) => {
return wrap(
msg,
() => deepStrictEqual(a, b) || true,
`not deepEqual:\nA:\n${toPrint(a)}\nB:\n${toPrint(b)}`,
f
)
}
const throws = (msg, f) => (a) => {
let threw
try { a() } catch (ex) { threw = true }
return wrap(msg, () => threw, `did not throw error`, f)
}
const notThrows = (msg, f) => (a) => {
let err
try { a() } catch (ex) { err = ex }
return wrap(msg, () => !err, err, f)
}
const throwsAsync = (msg, f) => async (a) => {
let threw
try { await a() } catch (ex) { threw = true }
return wrap(msg, () => threw, `did not throw error`, f)
}
const notThrowsAsync = (msg, f) => async (a) => {
let err
try { await a() } catch (ex) { err = ex }
return wrap(msg, () => !err, err, f)
}
Object.assign(test, {
assert,
setup,
before,
after,
skip,
todo,
only,
failing,
bench
})
setup()
module.exports = test

5

package.json
{
"name": "mvt",
"version": "3.0.0",
"version": "4.0.0-beta.0",
"description": "A minimum viable testing framework, aka a few test helpers, 0 dependencies",
"main": "index.js",
"files": [
"index.js"
"index.js",
"cli-char-supported.js"
],

@@ -9,0 +10,0 @@ "scripts": {

# mvt [![NPM version](https://badge.fury.io/js/mvt.svg)](https://npmjs.org/package/mvt) [![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](https://github.com/feross/standard)
> A minimum viable testing framework, aka a few test helpers, 0 dependencies
> Minimum Viable Testing framework, it's like AVA, if AVA sucked
## why another testing module
I'm getting tired of Github security alerts on nested dev dependencies, typically
from my chosen test framework. The tests I run are typically lite and
don't require all the frills provided by the majors. I'm totes fine with
nothing more than testing for a truthy value or comparing two values. As with
most of my open source this is for me. There are many actually good test
frameworks out there, but this is all I need for many circumstances.
## you should probably use AVA
Because [AVA](https://github.com/avajs/ava) is awesome.
## what is this
It started as a few test helpers. I made it for small projects with minimal
test needs. That was a bit too restrictive though. In version `4.0.0+` it became
a blatant ripoff of AVA. Well, not really. It does about 1% of what AVA does,
and it doesn't do that nearly as well.
## if it's inferior in every way to AVA then why does it exist
Because I need to minimize my security alerts so that I don't lose real security
threats in the deluge. I'm often finding my devDependencies to be the culprit
of most alerts. This is an effort to minimize recursive devDependencies.
## what's good about it
- It's lightweight @ < 100 sloc
- It has 0 dependencies (same for devDependencies)
- It has 0 dependencies and devDependencies
- That's really about it
## what it lacks (way more than I can list, but here's the most notable)
- A CLI (for now)
- tests must be run as `node tests/a.js`
- the above must be done for each test file `node tests/a.js && node tests/b.js`
- thus each test file gets a distinct output, so no central tracking of tests
- Useful `Error` output
- you won't get a clean stack, it will be filled with useless info from `mvt`
- that will make it difficult to debug where problems actually occurred
- Concurrency
- that's not a thing here
- Transpilation
- also not a thing here
## this might be for you if
- Your tests don't require heavy tooling
- Concurrency doesn't make or break your testing times
- You know how to coerce tests to fitting truthy or a/b equality checks
- You're happy to do said coercion in your test file
- You don't need much control over test output formatting
- You're willing to sacrifice all of the above (and more) to reduce dependencies
- Your tests are in a single file and concurrency doesn't impact test times

@@ -30,39 +46,68 @@ ## install

## api
## usage
**runTests** (message, testCallback[, meta]) - Just a wrapper that takes care to catch anything in it
- arguments
- `string` - Message to display on test start-up
- `function / async function` - Run your tests in this to ensure nothing is missed
- `object` - If anything fails this object will be printed to stderr
```js
const test = require('mvt')
**test** (description, truthyOrComparison[[, comparison], meta]) - Test against single truthy value or compare two values
- arguments
- `string` - Test description
- `any` - Will fail if not truthy OR if doesn't `===` argument 3
- `any` - If not `undefined` this must `===` previous argument or will fail
- `object` - If failed this object will be printed to stderr (if only passing truthy value the comparison arg must be `undefined`)
- returns `true` if test passed
test.setup({ verbose: true })
**testAsync** (description, asyncCallback[, meta]) - This simply awaits a promise that must resolve truthy
- arguments
- `String` - Test description
- `async function` - Async function / promise returning function that must resolve truthy
- `object` - If failed this object will be printed to stderr
- resolves with `true` if test passed
test.after(() => console.log('test.after invoked'))
## usage
test.before(() => console.log('test.before invoked'))
```js
const { runTests, start, finish, test, testAsync } = require('mvt')
test('assert.is works', (assert) => assert.is(1, 1))
runTests(`Testing my app`, async () => {
test('Should be truthy', true)
test('assert.not works', (assert) => assert.not(1, 2))
test('Should be equal', 1, 1)
test('assert.pass works', assert) => assert.pass())
await testAsync('Should resolve truthy', async () => Promise.resolve(true))
test.failing('test.failing and assert.fail works', (assert) => assert.fail())
test('assert.truthy works', (assert) => assert.truthy(1))
test('assert.falsy works', (assert) => assert.falsy(0))
test('assert.deepEqual works', (assert) => assert.deepEqual([1, 2], [1, 2]))
test('assert.throws works', (assert) => {
assert.throws(() => { throw new Error('it throws') })
})
test('assert.notThrows works', async (assert) => {
assert.notThrows(() => {})
})
test('assert.throwsAsync works', async (assert) => {
await assert.throwsAsync(() => new Promise((resolve, reject) => {
process.nextTick(() => reject(new Error('rejected Promise')))
}))
})
test('assert.notThrowsAsync works', async (assert) => {
await assert.notThrowsAsync(() => new Promise((resolve, reject) => {
process.nextTick(() => resolve('all good'))
}))
})
test.todo('test.todo works')
test.skip('test.skip works', (assert) => assert.truthy('skipped'))
test.only('test.only works', (assert) => assert.truthy('only'))
test.bench('test.bench works', { samples: 5, max: 300 }, (assert) => {
return new Promise((resolve, reject) => {
setTimeout(() => resolve(), 200)
})
})
```
![Output](images/output.png)
## api
Right now I'm feeling lazy. The full API is documented under `usage`. Eventually
I'll add it here. You can also check the test file, though it's about the same
as usage.
## notes

@@ -69,0 +114,0 @@

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc