buffalo-bench
Advanced tools
Comparing version 1.0.0 to 1.0.1
@@ -0,1 +1,8 @@ | ||
### [1.0.1](https://github.com/Masquerade-Circus/buffalo-bench/compare/1.0.0...1.0.1) (2021-08-09) | ||
### Bug Fixes | ||
* **main:** fix method names ([27b2ce7](https://github.com/Masquerade-Circus/buffalo-bench/commit/27b2ce7c50016b7b2daa80d123d4d34fb27a7ec1)) | ||
## [1.0.0](https://github.com/Masquerade-Circus/buffalo-bench/compare/0.1.0...1.0.0) (2021-08-09) | ||
@@ -2,0 +9,0 @@ |
@@ -1,1 +0,1 @@ | ||
(()=>{var e,t,r=Object.defineProperty,a=(e,t,a)=>(((e,t,a)=>{t in e?r(e,t,{enumerable:!0,configurable:!0,writable:!0,value:a}):e[t]=a})(e,"symbol"!=typeof t?t+"":t,a),a),i=class extends Error{code;message;name;statusCode=0;constructor(e="Something went wrong",t){super(),this.message=e,this.code=t,this.name=this.constructor.name}},s={BenchmarkError:i,beforeEachError:class extends i{statusCode=1;name="beforeEachError"},afterEachError:class extends i{statusCode=2;name="afterEachError"},RunError:class extends i{statusCode=3;name="RunError"},CompleteError:class extends i{statusCode=4;name="CompleteError"},StartError:class extends i{statusCode=5;name="StartError"},FatalError:class extends i{statusCode=7;name="FatalError"}};function n(e,t,r){let a=new s[r](t);return a.stack=e.stack,a}async function m(e,t,r,...a){if(r)try{await r.bind(e)(...a)}catch(a){return n(a,`Benchmark \`${e.name}\` failed to run \`${r.name}\` callback: ${a.message}`,t)}}(t=e||(e={})).MeanTime="meanTime",t.MedianTime="medianTime",t.StandardDeviation="standardDeviation",t.MaxTime="maxTime",t.MinTime="minTime",t.Hz="hz",t.RunTime="runTime",t.Cycles="cycles",t.Percent="percent";var o=class{name;error;cycles=0;samples=0;hz=0;meanTime=0;medianTime=0;standardDeviation=0;maxTime=0;minTime=0;times=[];options;stamp;runTime=0;totalTime=0;constructor(e,t,r={}){this.name=e;let a={...o.defaults,...r};"function"==typeof t?a.fn=t:a={...a,...t},this.options=a}toJSON(){let{name:e,error:t,cycles:r,hz:a,runTime:i,totalTime:s,samples:n,meanTime:m,medianTime:o,standardDeviation:h,maxTime:c,minTime:l}=this;return{name:e,errorMessage:t?t.message:void 0,cycles:r,samples:n,hz:a,meanTime:m,medianTime:o,standardDeviation:h,maxTime:c,minTime:l,runTime:i,totalTime:s}}compareWith(t,r=e.Percent){let{error:a,cycles:i,hz:s,meanTime:n,medianTime:m,standardDeviation:o,maxTime:h,minTime:c,runTime:l}=this;if(a)return-1;if(t.error)return 1;switch(r){case"meanTime":return n-t.meanTime;case"medianTime":return m-t.medianTime;case"standardDeviation":return o-t.standardDeviation;case"maxTime":return h-t.maxTime;case"minTime":return c-t.minTime;case"hz":return s-t.hz;case"runTime":return l-t.runTime;case"cycles":return i-t.cycles;case"percent":return Math.trunc(100/t.hz*s-100);default:throw new Error(`Unknown compare field: ${r}`)}}async runSample(){let{beforeEach:e,afterEach:t,fn:r}=this.options,a=performance.now();for(;performance.now()-a<1e3;){let a=performance.now();this.cycles++;let i,s=await m(this,"beforeEachError",e);if(s)throw s;try{if("AsyncFunction"===r.constructor.name){let e=performance.now();await r(),i=performance.now()-e}else{let e=performance.now();r(),i=performance.now()-e}}catch(e){throw n(e,`Benchmark \`${this.name}\` failed to run \`fn\`: ${e.message}`,"RunError")}this.times.push(i),this.runTime+=i;let o=await m(this,"afterEachError",t);if(o)throw o;this.totalTime+=performance.now()-a}}async run(){this.stamp=performance.now();let{maxTime:e,minSamples:t,onComplete:r,onStart:a,onError:i,fn:s}=this.options,n=1e3*e;try{let e=await m(this,"StartError",a);if(e)throw e;for(;this.samples<t||this.totalTime<n;)this.samples++,await this.runSample();this.hz=this.cycles/(this.runTime/1e3),this.meanTime=this.runTime/this.times.length,this.medianTime=this.times.sort((e,t)=>e-t)[Math.floor(this.times.length/2)]||0,this.standardDeviation=Math.sqrt(this.times.map(e=>Math.pow(e-this.meanTime,2)).reduce((e,t)=>e+t,0)/this.times.length),this.maxTime=this.times.reduce((e,t)=>Math.max(e,t),0),this.minTime=this.times.reduce((e,t)=>Math.min(e,t),1/0);let i=await m(this,"CompleteError",r);if(i)throw i}catch(e){this.error=e;let t=await m(this,"FatalError",i,e);if(t)throw t}}},h=o;a(h,"Suite"),a(h,"version","0.1.0"),a(h,"defaults",{maxTime:5,minSamples:1});var c=class{name;error;options;stamp;runTime=0;totalTime=0;benchmarks=[];constructor(e,t={}){this.name=e,this.options={...c.defaults,...t}}toJSON(){let{error:e,name:t,runTime:r,totalTime:a}=this;return{name:t,errorMessage:e?e.message:void 0,runTime:r,totalTime:a,passed:!e,benchmarks:this.benchmarks.map(e=>e.toJSON())}}add(e,t,r={}){let a={minSamples:this.options.minSamples,maxTime:this.options.maxTime,...r};"function"==typeof t?a.fn=t:a={...a,...t};let i=new h(e,a);return this.benchmarks.push(i),i}async run(){this.stamp=performance.now();let{beforeEach:e,afterEach:t,onComplete:r,onStart:a,onError:i}=this.options;try{let i=await m(this,"StartError",a);if(i)throw i;for(let r=0,a=this.benchmarks.length;r<a;r++){let a=this.benchmarks[r],i=await m(this,"beforeEachError",e,a,r);if(i)throw i;await a.run(),this.runTime+=a.runTime,this.totalTime+=a.totalTime;let s=await m(this,"afterEachError",t,a,r);if(s)throw s}let s=await m(this,"CompleteError",r);if(s)throw s}catch(e){this.error=e;let t=await m(this,"FatalError",i,e);if(t)throw t}}getSortedBenchmarksBy(e){return this.benchmarks.slice().sort((t,r)=>{let a=r.compareWith(t,e);return a>0?1:a<0?-1:0})}getFastest(e){return this.getSortedBenchmarksBy(e)[0]}getSlowest(e){let t=this.getSortedBenchmarksBy(e);return t[t.length-1]}compareFastestWithLowest(e){let t=this.getFastest(e),r=this.getSlowest(e);return{fastest:t,slowest:r,by:t.compareWith(r,e)}}},l=c;a(l,"defaults",{maxTime:5,minSamples:1}),h.Suite=l;var u={CompareBy:e,default:h};"undefined"!=typeof module?module.exports=u:self.Benchmark=u})(); | ||
(()=>{var e,t,r=Object.defineProperty,a=(e,t,a)=>(((e,t,a)=>{t in e?r(e,t,{enumerable:!0,configurable:!0,writable:!0,value:a}):e[t]=a})(e,"symbol"!=typeof t?t+"":t,a),a),i=class extends Error{code;message;name;statusCode=0;constructor(e="Something went wrong",t){super(),this.message=e,this.code=t,this.name=this.constructor.name}},s={BenchmarkError:i,BeforeEachError:class extends i{statusCode=1;name="BeforeEachError"},AfterEachError:class extends i{statusCode=2;name="AfterEachError"},RunError:class extends i{statusCode=3;name="RunError"},AfterError:class extends i{statusCode=4;name="AfterError"},BeforeError:class extends i{statusCode=5;name="BeforeError"},FatalError:class extends i{statusCode=7;name="FatalError"}};function n(e,t,r){let a=new s[r](t);return a.stack=e.stack,a}async function m(e,t,r,...a){if(r)try{await r.bind(e)(...a)}catch(a){return n(a,`Benchmark \`${e.name}\` failed to run \`${r.name}\` callback: ${a.message}`,t)}}(t=e||(e={})).MeanTime="meanTime",t.MedianTime="medianTime",t.StandardDeviation="standardDeviation",t.MaxTime="maxTime",t.MinTime="minTime",t.Hz="hz",t.RunTime="runTime",t.Cycles="cycles",t.Percent="percent";var o=class{name;error;cycles=0;samples=0;hz=0;meanTime=0;medianTime=0;standardDeviation=0;maxTime=0;minTime=0;times=[];options;stamp;runTime=0;totalTime=0;constructor(e,t,r={}){this.name=e;let a={...o.defaults,...r};"function"==typeof t?a.fn=t:a={...a,...t},this.options=a}toJSON(){let{name:e,error:t,cycles:r,hz:a,runTime:i,totalTime:s,samples:n,meanTime:m,medianTime:o,standardDeviation:h,maxTime:c,minTime:l}=this;return{name:e,errorMessage:t?t.message:void 0,cycles:r,samples:n,hz:a,meanTime:m,medianTime:o,standardDeviation:h,maxTime:c,minTime:l,runTime:i,totalTime:s}}compareWith(t,r=e.Percent){let{error:a,cycles:i,hz:s,meanTime:n,medianTime:m,standardDeviation:o,maxTime:h,minTime:c,runTime:l}=this;if(a)return-1;if(t.error)return 1;switch(r){case"meanTime":return n-t.meanTime;case"medianTime":return m-t.medianTime;case"standardDeviation":return o-t.standardDeviation;case"maxTime":return h-t.maxTime;case"minTime":return c-t.minTime;case"hz":return s-t.hz;case"runTime":return l-t.runTime;case"cycles":return i-t.cycles;case"percent":return Math.trunc(100/t.hz*s-100);default:throw new Error(`Unknown compare field: ${r}`)}}async runSample(){let{beforeEach:e,afterEach:t,fn:r}=this.options,a=performance.now();for(;performance.now()-a<1e3;){let a=performance.now();this.cycles++;let i,s=await m(this,"BeforeEachError",e);if(s)throw s;try{if("AsyncFunction"===r.constructor.name){let e=performance.now();await r(),i=performance.now()-e}else{let e=performance.now();r(),i=performance.now()-e}}catch(e){throw n(e,`Benchmark \`${this.name}\` failed to run \`fn\`: ${e.message}`,"RunError")}this.times.push(i),this.runTime+=i;let o=await m(this,"AfterEachError",t);if(o)throw o;this.totalTime+=performance.now()-a}}async run(){this.stamp=performance.now();let{maxTime:e,minSamples:t,after:r,before:a,onError:i,fn:s}=this.options,n=1e3*e;try{let e=await m(this,"BeforeError",a);if(e)throw e;for(;this.samples<t||this.totalTime<n;)this.samples++,await this.runSample();this.hz=this.cycles/(this.runTime/1e3),this.meanTime=this.runTime/this.times.length,this.medianTime=this.times.sort((e,t)=>e-t)[Math.floor(this.times.length/2)]||0,this.standardDeviation=Math.sqrt(this.times.map(e=>Math.pow(e-this.meanTime,2)).reduce((e,t)=>e+t,0)/this.times.length),this.maxTime=this.times.reduce((e,t)=>Math.max(e,t),0),this.minTime=this.times.reduce((e,t)=>Math.min(e,t),1/0);let i=await m(this,"AfterError",r);if(i)throw i}catch(e){this.error=e;let t=await m(this,"FatalError",i,e);if(t)throw t}}},h=o;a(h,"Suite"),a(h,"version","1.0.0"),a(h,"defaults",{maxTime:5,minSamples:1});var c=class{name;error;options;stamp;runTime=0;totalTime=0;benchmarks=[];constructor(e,t={}){this.name=e,this.options={...c.defaults,...t}}toJSON(){let{error:e,name:t,runTime:r,totalTime:a}=this;return{name:t,errorMessage:e?e.message:void 0,runTime:r,totalTime:a,passed:!e,benchmarks:this.benchmarks.map(e=>e.toJSON())}}add(e,t,r={}){let a={minSamples:this.options.minSamples,maxTime:this.options.maxTime,...r};"function"==typeof t?a.fn=t:a={...a,...t};let i=new h(e,a);return this.benchmarks.push(i),i}async run(){this.stamp=performance.now();let{beforeEach:e,afterEach:t,after:r,before:a,onError:i}=this.options;try{let i=await m(this,"BeforeError",a);if(i)throw i;for(let r=0,a=this.benchmarks.length;r<a;r++){let a=this.benchmarks[r],i=await m(this,"BeforeEachError",e,a,r);if(i)throw i;await a.run(),this.runTime+=a.runTime,this.totalTime+=a.totalTime;let s=await m(this,"AfterEachError",t,a,r);if(s)throw s}let s=await m(this,"AfterError",r);if(s)throw s}catch(e){this.error=e;let t=await m(this,"FatalError",i,e);if(t)throw t}}getSortedBenchmarksBy(e){return this.benchmarks.slice().sort((t,r)=>{let a=r.compareWith(t,e);return a>0?1:a<0?-1:0})}getFastest(e){return this.getSortedBenchmarksBy(e)[0]}getSlowest(e){let t=this.getSortedBenchmarksBy(e);return t[t.length-1]}compareFastestWithSlowest(e){let t=this.getFastest(e),r=this.getSlowest(e);return{fastest:t,slowest:r,by:t.compareWith(r,e)}}},l=c;a(l,"defaults",{maxTime:5,minSamples:1}),h.Suite=l;var u={CompareBy:e,default:h};"undefined"!=typeof module?module.exports=u:self.Benchmark=u})(); |
124
lib/index.ts
@@ -11,3 +11,3 @@ // A benchmarking library that supports async hooks and benchmarks by default. | ||
async: true, | ||
async onStart() => { | ||
async before() => { | ||
console.log(1); | ||
@@ -21,3 +21,3 @@ await new Promise(resolve => setTimeout(resolve, 1000)); | ||
// The previous code will log 1 and then run the benchmark and the log 2 could be logged before the benchmark is finished or could't be logged at all. | ||
// This problem prevent us to create an async onStart and/or onComplete for a benchmark like an api call that could require it. | ||
// This problem prevent us to create an async before and/or after for a benchmark like an api call that could require it. | ||
@@ -42,6 +42,6 @@ // This library solves this problem by providing a way to create a benchmark with all the hooks and benchmark handled as async by default. | ||
// }, | ||
// onComplete: async () => { | ||
// after: async () => { | ||
// await doSomething(); | ||
// }, | ||
// onStart: async () => { | ||
// before: async () => { | ||
// await doSomething(); | ||
@@ -64,4 +64,4 @@ // }, | ||
// * `afterEach`: A function to be run once after each benchmark loop, does not count for run time. | ||
// * `onComplete`: A function to be run once after the benchmark loop finishes, does not count for run time. | ||
// * `onStart`: A function to be run once before the benchmark loop starts, does not count for run time. | ||
// * `after`: A function to be run once after the benchmark loop finishes, does not count for run time. | ||
// * `before`: A function to be run once before the benchmark loop starts, does not count for run time. | ||
// * `onError`: A function to be run if an error occurs. | ||
@@ -83,4 +83,4 @@ // * `fn`: The function to be run. | ||
// * `stamp`: A timestamp representing when the benchmark was created. | ||
// * `runTime`: The total time taken to run the benchmark, this does not include beforeEach, afterEach, onStrart and onComplete hooks. | ||
// * `totalTime`: The total time taken to run the benchmark including beforeEach, afterEach, onStart and onComplete hooks. | ||
// * `runTime`: The total time taken to run the benchmark, this does not include beforeEach, afterEach, onStrart and after hooks. | ||
// * `totalTime`: The total time taken to run the benchmark including beforeEach, afterEach, before and after hooks. | ||
@@ -96,9 +96,9 @@ // The `Benchmark` instance has the following methods: | ||
// If the `beforeEach` `afterEach` `onComplete` `onStart` `onError` returns a Promise, the benchmark will wait for the promise to resolve before continuing. | ||
// If the `beforeEach` `afterEach` `after` `before` `onError` returns a Promise, the benchmark will wait for the promise to resolve before continuing. | ||
// If the `beforeEach` function throws an error, the benchmark will stop and emit an `beforeEachError` event. | ||
// If the `afterEach` function throws an error, the benchmark will stop and emit an `afterEachError` event. | ||
// If the `beforeEach` function throws an error, the benchmark will stop and emit an `BeforeEachError` event. | ||
// If the `afterEach` function throws an error, the benchmark will stop and emit an `AfterEachError` event. | ||
// If the `fn` function throws an error, the benchmark will stop and emit an `RunError` event. | ||
// If the `onComplete` function throws an error, the benchmark will stop and emit an `CompleteError` event. | ||
// If the `onStart` function throws an error, the benchmark will stop and emit an `StartError` event. | ||
// If the `after` function throws an error, the benchmark will stop and emit an `AfterError` event. | ||
// If the `before` function throws an error, the benchmark will stop and emit an `BeforeError` event. | ||
// If the `onError` function throws an error, the benchmark will stop and emit an `FatalError` event. | ||
@@ -130,12 +130,12 @@ | ||
// beforeEachError: The `beforeEach` function threw an error. | ||
class beforeEachError extends BenchmarkError { | ||
// BeforeEachError: The `beforeEach` function threw an error. | ||
class BeforeEachError extends BenchmarkError { | ||
statusCode = 1; | ||
name = "beforeEachError"; | ||
name = "BeforeEachError"; | ||
} | ||
// afterEachError: The `afterEach` function threw an error. | ||
class afterEachError extends BenchmarkError { | ||
// AfterEachError: The `afterEach` function threw an error. | ||
class AfterEachError extends BenchmarkError { | ||
statusCode = 2; | ||
name = "afterEachError"; | ||
name = "AfterEachError"; | ||
} | ||
@@ -149,12 +149,12 @@ | ||
// CompleteError: The `onComplete` function threw an error. | ||
class CompleteError extends BenchmarkError { | ||
// AfterError: The `after` function threw an error. | ||
class AfterError extends BenchmarkError { | ||
statusCode = 4; | ||
name = "CompleteError"; | ||
name = "AfterError"; | ||
} | ||
// StartError: The `onStart` function threw an error. | ||
class StartError extends BenchmarkError { | ||
// BeforeError: The `before` function threw an error. | ||
class BeforeError extends BenchmarkError { | ||
statusCode = 5; | ||
name = "StartError"; | ||
name = "BeforeError"; | ||
} | ||
@@ -170,11 +170,11 @@ | ||
BenchmarkError, | ||
beforeEachError, | ||
afterEachError, | ||
BeforeEachError, | ||
AfterEachError, | ||
RunError, | ||
CompleteError, | ||
StartError, | ||
AfterError, | ||
BeforeError, | ||
FatalError | ||
}; | ||
type ErrorType = "beforeEachError" | "afterEachError" | "RunError" | "CompleteError" | "StartError" | "FatalError"; | ||
type ErrorType = "BeforeEachError" | "AfterEachError" | "RunError" | "AfterError" | "BeforeError" | "FatalError"; | ||
@@ -195,5 +195,5 @@ // BenchmarkFunction a function that can be used as a benchmark. | ||
// A function to be run once after the benchmark completes, does not count for run time. | ||
onComplete?: () => Promise<void> | void; | ||
after?: () => Promise<void> | void; | ||
// A function to be run once before the benchmark starts, does not count for run time. | ||
onStart?: () => Promise<void> | void; | ||
before?: () => Promise<void> | void; | ||
// A function to be run if an error occurs. | ||
@@ -402,5 +402,5 @@ onError?: (error: BenchmarkError) => Promise<void> | void; | ||
this.cycles++; | ||
const beforeEachError = await runCallback(this, "beforeEachError", beforeEach); | ||
if (beforeEachError) { | ||
throw beforeEachError; | ||
const BeforeEachError = await runCallback(this, "BeforeEachError", beforeEach); | ||
if (BeforeEachError) { | ||
throw BeforeEachError; | ||
} | ||
@@ -426,5 +426,5 @@ | ||
const afterEachError = await runCallback(this, "afterEachError", afterEach); | ||
if (afterEachError) { | ||
throw afterEachError; | ||
const AfterEachError = await runCallback(this, "AfterEachError", afterEach); | ||
if (AfterEachError) { | ||
throw AfterEachError; | ||
} | ||
@@ -439,9 +439,9 @@ | ||
this.stamp = performance.now(); | ||
const { maxTime, minSamples, onComplete, onStart, onError, fn } = this.options; | ||
const { maxTime, minSamples, after, before, onError, fn } = this.options; | ||
let maxTimeInMilliseconds = maxTime * 1000; | ||
try { | ||
const onStartError = await runCallback(this, "StartError", onStart); | ||
if (onStartError) { | ||
throw onStartError; | ||
const beforeError = await runCallback(this, "BeforeError", before); | ||
if (beforeError) { | ||
throw beforeError; | ||
} | ||
@@ -466,5 +466,5 @@ | ||
const onCompleteError = await runCallback(this, "CompleteError", onComplete); | ||
if (onCompleteError) { | ||
throw onCompleteError; | ||
const afterError = await runCallback(this, "AfterError", after); | ||
if (afterError) { | ||
throw afterError; | ||
} | ||
@@ -493,5 +493,5 @@ } catch (error) { | ||
// A function to be run once after the suite completes | ||
onComplete?: () => Promise<void> | void; | ||
after?: () => Promise<void> | void; | ||
// A function to be run once before the suite starts | ||
onStart?: () => Promise<void> | void; | ||
before?: () => Promise<void> | void; | ||
// A function to be run if an error occurs. | ||
@@ -534,3 +534,3 @@ onError?: (error: BenchmarkError) => Promise<void> | void; | ||
getSlowest(sortedBy: CompareBy): Benchmark; | ||
compareFastestWithLowest(compareBy: CompareBy): { fastest: Benchmark; slowest: Benchmark; by: number }; | ||
compareFastestWithSlowest(compareBy: CompareBy): { fastest: Benchmark; slowest: Benchmark; by: number }; | ||
} | ||
@@ -601,8 +601,8 @@ | ||
this.stamp = performance.now(); | ||
const { beforeEach, afterEach, onComplete, onStart, onError } = this.options; | ||
const { beforeEach, afterEach, after, before, onError } = this.options; | ||
try { | ||
const onStartError = await runCallback(this, "StartError", onStart); | ||
if (onStartError) { | ||
throw onStartError; | ||
const beforeError = await runCallback(this, "BeforeError", before); | ||
if (beforeError) { | ||
throw beforeError; | ||
} | ||
@@ -612,5 +612,5 @@ | ||
let benchmark = this.benchmarks[i]; | ||
const onbeforeEachError = await runCallback(this, "beforeEachError", beforeEach, benchmark, i); | ||
if (onbeforeEachError) { | ||
throw onbeforeEachError; | ||
const beforeEachError = await runCallback(this, "BeforeEachError", beforeEach, benchmark, i); | ||
if (beforeEachError) { | ||
throw beforeEachError; | ||
} | ||
@@ -622,11 +622,11 @@ | ||
const onafterEachError = await runCallback(this, "afterEachError", afterEach, benchmark, i); | ||
if (onafterEachError) { | ||
throw onafterEachError; | ||
const afterEachError = await runCallback(this, "AfterEachError", afterEach, benchmark, i); | ||
if (afterEachError) { | ||
throw afterEachError; | ||
} | ||
} | ||
const onCompleteError = await runCallback(this, "CompleteError", onComplete); | ||
if (onCompleteError) { | ||
throw onCompleteError; | ||
const afterError = await runCallback(this, "AfterError", after); | ||
if (afterError) { | ||
throw afterError; | ||
} | ||
@@ -663,3 +663,3 @@ } catch (error) { | ||
compareFastestWithLowest(compareBy: CompareBy) { | ||
compareFastestWithSlowest(compareBy: CompareBy) { | ||
const fastest = this.getFastest(compareBy); | ||
@@ -666,0 +666,0 @@ const slowest = this.getSlowest(compareBy); |
{ | ||
"name": "buffalo-bench", | ||
"version": "1.0.0", | ||
"version": "1.0.1", | ||
"description": "A benchmarking library that supports async hooks and benchmarks by default.", | ||
@@ -5,0 +5,0 @@ "source": "lib/index.ts", |
@@ -75,6 +75,6 @@ <div style="text-align: center"> | ||
}, | ||
onComplete: async () => { | ||
before: async () => { | ||
await doSomething(); | ||
}, | ||
onStart: async () => { | ||
after: async () => { | ||
await doSomething(); | ||
@@ -114,4 +114,4 @@ }, | ||
* `afterEach`: A function to be run once after each benchmark loop, does not count for run time. | ||
* `onStart`: A function to be run once before the benchmark loop starts, does not count for run time. | ||
* `onComplete`: A function to be run once after the benchmark loop finishes, does not count for run time. | ||
* `before`: A function to be run once before the benchmark loop starts, does not count for run time. | ||
* `after`: A function to be run once after the benchmark loop finishes, does not count for run time. | ||
* `onError`: A function to be run if an error occurs. | ||
@@ -133,4 +133,4 @@ * `fn`: The function to be run. | ||
* `stamp`: A timestamp representing when the benchmark was created. | ||
* `runTime`: The total time taken to run the benchmark, this does not include beforeEach, afterEach, onStrart and onComplete hooks. | ||
* `totalTime`: The total time taken to run the benchmark including beforeEach, afterEach, onStart and onComplete hooks. | ||
* `runTime`: The total time taken to run the benchmark, this does not include beforeEach, afterEach, before and after hooks. | ||
* `totalTime`: The total time taken to run the benchmark including beforeEach, afterEach, before and after hooks. | ||
@@ -148,9 +148,9 @@ The `Benchmark` instance has the following methods: | ||
If the `beforeEach` `afterEach` `onComplete` `onStart` `onError` returns a Promise, the benchmark will wait for the promise to resolve before continuing. | ||
If the `beforeEach` `afterEach` `before` `after` `onError` returns a Promise, the benchmark will wait for the promise to resolve before continuing. | ||
If the `beforeEach` function throws an error, the benchmark will stop and emit an `beforeEachError` event. | ||
If the `afterEach` function throws an error, the benchmark will stop and emit an `afterEachError` event. | ||
If the `beforeEach` function throws an error, the benchmark will stop and emit an `BeforeEachError` event. | ||
If the `afterEach` function throws an error, the benchmark will stop and emit an `AfterEachError` event. | ||
If the `fn` function throws an error, the benchmark will stop and emit an `RunError` event. | ||
If the `onComplete` function throws an error, the benchmark will stop and emit an `CompleteError` event. | ||
If the `onStart` function throws an error, the benchmark will stop and emit an `StartError` event. | ||
If the `after` function throws an error, the benchmark will stop and emit an `AfterError` event. | ||
If the `before` function throws an error, the benchmark will stop and emit an `BeforeError` event. | ||
If the `onError` function throws an error, the benchmark will stop and emit an `FatalError` event. | ||
@@ -157,0 +157,0 @@ |
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
88612