Socket
Socket
Sign inDemoInstall

buffalo-bench

Package Overview
Dependencies
Maintainers
1
Versions
7
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

buffalo-bench - npm Package Compare versions

Comparing version 0.1.0 to 1.0.0

11

CHANGELOG.md

@@ -0,1 +1,12 @@

## [1.0.0](https://github.com/Masquerade-Circus/buffalo-bench/compare/0.1.0...1.0.0) (2021-08-09)
### ⚠ BREAKING CHANGES
* **main:** Renamed `compare` benchmark field to `compareWith`
### Features
* **main:** add Suite class ([96d595a](https://github.com/Masquerade-Circus/buffalo-bench/commit/96d595a4c7e8eb651738927ebb3e232d6475baa9))
## 0.1.0 (2021-08-08)

@@ -2,0 +13,0 @@

2

dist/index.min.js

@@ -1,1 +0,1 @@

(()=>{var e=Object.defineProperty,t=(t,r,a)=>(((t,r,a)=>{r in t?e(t,r,{enumerable:!0,configurable:!0,writable:!0,value:a}):t[r]=a})(t,"symbol"!=typeof r?r+"":r,a),a),r=class extends Error{code;message;name;statusCode=0;constructor(e="Something went wrong",t){super(),this.message=e,this.code=t,this.name=this.constructor.name}},a={BenchmarkError:r,SetupError:class extends r{statusCode=1;name="SetupError"},TeardownError:class extends r{statusCode=2;name="TeardownError"},RunError:class extends r{statusCode=3;name="RunError"},CompleteError:class extends r{statusCode=4;name="CompleteError"},StartError:class extends r{statusCode=5;name="StartError"},FatalError:class extends r{statusCode=7;name="FatalError"}};function n(e,t,r){let n=new a[r](t);return n.stack=e.stack,n}var s=Object.getPrototypeOf(async function(){}).constructor;var i=class{name;error;cycles=0;samples=0;hz=0;meanTime=0;medianTime=0;standardDeviation=0;maxTime=0;minTime=0;times=[];options;stamp;runTime=0;totalTime=0;constructor(e,t,r={}){this.name=e;let a={...i.defaults,...r};"function"==typeof t?a.fn=t:a={...a,...t},this.options=a,this.stamp=performance.now()}async runCallback(e,t,...r){if(t)try{await t.bind(this)(...r)}catch(r){return n(r,`Benchmark \`${this.name}\` failed to run \`${t.name}\` callback: ${r.message}`,e)}}toJSON(){let{name:e,error:t,cycles:r,hz:a,runTime:n,totalTime:s,samples:i,meanTime:m,medianTime:o,standardDeviation:c,maxTime:l,minTime:h}=this;return{name:e,errorMessage:t?t.message:void 0,cycles:r,samples:i,hz:a,meanTime:m,medianTime:o,standardDeviation:c,maxTime:l,minTime:h,runTime:n,totalTime:s}}compare(e,t="percent"){let{error:r,cycles:a,hz:n,meanTime:s,medianTime:i,standardDeviation:m,maxTime:o,minTime:c,runTime:l}=this;if(r)return-1;if(e.error)return 1;switch(t){case"meanTime":return s-e.meanTime;case"medianTime":return i-e.medianTime;case"standardDeviation":return m-e.standardDeviation;case"maxTime":return o-e.maxTime;case"minTime":return c-e.minTime;case"hz":return n-e.hz;case"runTime":return l-e.runTime;case"cycles":return a-e.cycles;case"percent":return Math.trunc(100/e.hz*n-100);default:throw new Error(`Unknown compare field: ${t}`)}}async runSample(e){let{setup:t,teardown:r}=this.options,a=performance.now();for(;performance.now()-a<1e3;){let a=performance.now();this.cycles++;let s,i=await this.runCallback("SetupError",t);if(i)throw i;try{s=await e()}catch(e){throw n(e,`Benchmark \`${this.name}\` failed to run \`fn\`: ${e.message}`,"RunError")}this.times.push(s),this.runTime+=s;let m=await this.runCallback("TeardownError",r);if(m)throw m;this.totalTime+=performance.now()-a}}async run(){this.stamp=performance.now();let{maxTime:e,minSamples:t,onComplete:r,onStart:a,onError:n,fn:i}=this.options,m=1e3*e,o=function(e){let t=`const __start__ = performance.now();\n\n${e.toString().replace(/\n\s*/g,"").replace(/^(async\s)?\(\)\s?=>\s?\{(.*)\}$/g,"$2").replace(/^(async\s)?\(\)\s?=>\s?\((.*)\)$/g,"$2").replace(/^(async\s)?\(\)\s?=>\s?(.*)$/g,"$2").replace(/^(async\s)?function\s?\w+\(\)\s?\{(.*)\}$/g,"$2").replace(/^(async\s)?fn\s?\(\)\s?\{(.*)\}$/g,"$2").replace(/;$/g,"")};\n \nreturn performance.now() - __start__;`;return"AsyncFunction"===e.constructor.name?new s(t):new Function(t)}(i);try{let e=await this.runCallback("StartError",a);if(e)throw e;for(;this.samples<t||this.totalTime<m;)this.samples++,await this.runSample(o);this.hz=this.cycles/(this.runTime/1e3),this.meanTime=this.runTime/this.times.length,this.medianTime=this.times.sort((e,t)=>e-t)[Math.floor(this.times.length/2)]||0,this.standardDeviation=Math.sqrt(this.times.map(e=>Math.pow(e-this.meanTime,2)).reduce((e,t)=>e+t,0)/this.times.length),this.maxTime=this.times.reduce((e,t)=>Math.max(e,t),0),this.minTime=this.times.reduce((e,t)=>Math.min(e,t),1/0);let n=await this.runCallback("CompleteError",r);if(n)throw n}catch(e){this.error=e;let t=await this.runCallback("FatalError",n);if(t)throw t}}},m=i;t(m,"version","0.0.0"),t(m,"defaults",{maxTime:5,minSamples:1});var o=m;"undefined"!=typeof module?module.exports=o:self.Benchmark=o})();
(()=>{var e,t,r=Object.defineProperty,a=(e,t,a)=>(((e,t,a)=>{t in e?r(e,t,{enumerable:!0,configurable:!0,writable:!0,value:a}):e[t]=a})(e,"symbol"!=typeof t?t+"":t,a),a),i=class extends Error{code;message;name;statusCode=0;constructor(e="Something went wrong",t){super(),this.message=e,this.code=t,this.name=this.constructor.name}},s={BenchmarkError:i,beforeEachError:class extends i{statusCode=1;name="beforeEachError"},afterEachError:class extends i{statusCode=2;name="afterEachError"},RunError:class extends i{statusCode=3;name="RunError"},CompleteError:class extends i{statusCode=4;name="CompleteError"},StartError:class extends i{statusCode=5;name="StartError"},FatalError:class extends i{statusCode=7;name="FatalError"}};function n(e,t,r){let a=new s[r](t);return a.stack=e.stack,a}async function m(e,t,r,...a){if(r)try{await r.bind(e)(...a)}catch(a){return n(a,`Benchmark \`${e.name}\` failed to run \`${r.name}\` callback: ${a.message}`,t)}}(t=e||(e={})).MeanTime="meanTime",t.MedianTime="medianTime",t.StandardDeviation="standardDeviation",t.MaxTime="maxTime",t.MinTime="minTime",t.Hz="hz",t.RunTime="runTime",t.Cycles="cycles",t.Percent="percent";var o=class{name;error;cycles=0;samples=0;hz=0;meanTime=0;medianTime=0;standardDeviation=0;maxTime=0;minTime=0;times=[];options;stamp;runTime=0;totalTime=0;constructor(e,t,r={}){this.name=e;let a={...o.defaults,...r};"function"==typeof t?a.fn=t:a={...a,...t},this.options=a}toJSON(){let{name:e,error:t,cycles:r,hz:a,runTime:i,totalTime:s,samples:n,meanTime:m,medianTime:o,standardDeviation:h,maxTime:c,minTime:l}=this;return{name:e,errorMessage:t?t.message:void 0,cycles:r,samples:n,hz:a,meanTime:m,medianTime:o,standardDeviation:h,maxTime:c,minTime:l,runTime:i,totalTime:s}}compareWith(t,r=e.Percent){let{error:a,cycles:i,hz:s,meanTime:n,medianTime:m,standardDeviation:o,maxTime:h,minTime:c,runTime:l}=this;if(a)return-1;if(t.error)return 1;switch(r){case"meanTime":return n-t.meanTime;case"medianTime":return m-t.medianTime;case"standardDeviation":return o-t.standardDeviation;case"maxTime":return h-t.maxTime;case"minTime":return c-t.minTime;case"hz":return s-t.hz;case"runTime":return l-t.runTime;case"cycles":return i-t.cycles;case"percent":return Math.trunc(100/t.hz*s-100);default:throw new Error(`Unknown compare field: ${r}`)}}async runSample(){let{beforeEach:e,afterEach:t,fn:r}=this.options,a=performance.now();for(;performance.now()-a<1e3;){let a=performance.now();this.cycles++;let i,s=await m(this,"beforeEachError",e);if(s)throw s;try{if("AsyncFunction"===r.constructor.name){let e=performance.now();await r(),i=performance.now()-e}else{let e=performance.now();r(),i=performance.now()-e}}catch(e){throw n(e,`Benchmark \`${this.name}\` failed to run \`fn\`: ${e.message}`,"RunError")}this.times.push(i),this.runTime+=i;let o=await m(this,"afterEachError",t);if(o)throw o;this.totalTime+=performance.now()-a}}async run(){this.stamp=performance.now();let{maxTime:e,minSamples:t,onComplete:r,onStart:a,onError:i,fn:s}=this.options,n=1e3*e;try{let e=await m(this,"StartError",a);if(e)throw e;for(;this.samples<t||this.totalTime<n;)this.samples++,await this.runSample();this.hz=this.cycles/(this.runTime/1e3),this.meanTime=this.runTime/this.times.length,this.medianTime=this.times.sort((e,t)=>e-t)[Math.floor(this.times.length/2)]||0,this.standardDeviation=Math.sqrt(this.times.map(e=>Math.pow(e-this.meanTime,2)).reduce((e,t)=>e+t,0)/this.times.length),this.maxTime=this.times.reduce((e,t)=>Math.max(e,t),0),this.minTime=this.times.reduce((e,t)=>Math.min(e,t),1/0);let i=await m(this,"CompleteError",r);if(i)throw i}catch(e){this.error=e;let t=await m(this,"FatalError",i,e);if(t)throw t}}},h=o;a(h,"Suite"),a(h,"version","0.1.0"),a(h,"defaults",{maxTime:5,minSamples:1});var c=class{name;error;options;stamp;runTime=0;totalTime=0;benchmarks=[];constructor(e,t={}){this.name=e,this.options={...c.defaults,...t}}toJSON(){let{error:e,name:t,runTime:r,totalTime:a}=this;return{name:t,errorMessage:e?e.message:void 0,runTime:r,totalTime:a,passed:!e,benchmarks:this.benchmarks.map(e=>e.toJSON())}}add(e,t,r={}){let a={minSamples:this.options.minSamples,maxTime:this.options.maxTime,...r};"function"==typeof t?a.fn=t:a={...a,...t};let i=new h(e,a);return this.benchmarks.push(i),i}async run(){this.stamp=performance.now();let{beforeEach:e,afterEach:t,onComplete:r,onStart:a,onError:i}=this.options;try{let i=await m(this,"StartError",a);if(i)throw i;for(let r=0,a=this.benchmarks.length;r<a;r++){let a=this.benchmarks[r],i=await m(this,"beforeEachError",e,a,r);if(i)throw i;await a.run(),this.runTime+=a.runTime,this.totalTime+=a.totalTime;let s=await m(this,"afterEachError",t,a,r);if(s)throw s}let s=await m(this,"CompleteError",r);if(s)throw s}catch(e){this.error=e;let t=await m(this,"FatalError",i,e);if(t)throw t}}getSortedBenchmarksBy(e){return this.benchmarks.slice().sort((t,r)=>{let a=r.compareWith(t,e);return a>0?1:a<0?-1:0})}getFastest(e){return this.getSortedBenchmarksBy(e)[0]}getSlowest(e){let t=this.getSortedBenchmarksBy(e);return t[t.length-1]}compareFastestWithLowest(e){let t=this.getFastest(e),r=this.getSlowest(e);return{fastest:t,slowest:r,by:t.compareWith(r,e)}}},l=c;a(l,"defaults",{maxTime:5,minSamples:1}),h.Suite=l;var u={CompareBy:e,default:h};"undefined"!=typeof module?module.exports=u:self.Benchmark=u})();

@@ -20,3 +20,3 @@ // A benchmarking library that supports async hooks and benchmarks by default.

// The previous code will log 1 and then run the benchmark and the log 2 could be logged before the benchmark is finished or could't be logged at all.
// This problem prevent us to create an async setup and/or teardown for a benchmark like an api call that could require it.
// This problem prevent us to create an async onStart and/or onComplete for a benchmark like an api call that could require it.

@@ -35,6 +35,6 @@ // This library solves this problem by providing a way to create a benchmark with all the hooks and benchmark handled as async by default.

// minSamples: 1,
// setup: async () => {
// beforeEach: async () => {
// await doSomething();
// },
// teardown: async () => {
// afterEach: async () => {
// await doSomething();

@@ -61,4 +61,4 @@ // },

// * `minSamples`: The minimum number of samples that must be taken.
// * `setup`: A function to be run once before each benchmark loop, does not count for run time.
// * `teardown`: A function to be run once after each benchmark loop, does not count for run time.
// * `beforeEach`: A function to be run once before each benchmark loop, does not count for run time.
// * `afterEach`: A function to be run once after each benchmark loop, does not count for run time.
// * `onComplete`: A function to be run once after the benchmark loop finishes, does not count for run time.

@@ -82,4 +82,4 @@ // * `onStart`: A function to be run once before the benchmark loop starts, does not count for run time.

// * `stamp`: A timestamp representing when the benchmark was created.
// * `runTime`: The total time taken to run the benchmark, this does not include setup, teardown, onStrart and onComplete hooks.
// * `totalTime`: The total time taken to run the benchmark including setup, teardown, onStart and onComplete hooks.
// * `runTime`: The total time taken to run the benchmark, this does not include beforeEach, afterEach, onStrart and onComplete hooks.
// * `totalTime`: The total time taken to run the benchmark including beforeEach, afterEach, onStart and onComplete hooks.

@@ -89,3 +89,3 @@ // The `Benchmark` instance has the following methods:

// * `toJSON`: Return a JSON representation of the benchmark.
// * `compare`: Compare this benchmark to another.
// * `compareWith`: Compare this benchmark to another.

@@ -96,6 +96,6 @@ // The `Benchmark` class has the following static properties:

// If the `setup` `teardown` `onComplete` `onStart` `onError` returns a Promise, the benchmark will wait for the promise to resolve before continuing.
// If the `beforeEach` `afterEach` `onComplete` `onStart` `onError` returns a Promise, the benchmark will wait for the promise to resolve before continuing.
// If the `setup` function throws an error, the benchmark will stop and emit an `SetupError` event.
// If the `teardown` function throws an error, the benchmark will stop and emit an `TeardownError` event.
// If the `beforeEach` function throws an error, the benchmark will stop and emit an `beforeEachError` event.
// If the `afterEach` function throws an error, the benchmark will stop and emit an `afterEachError` event.
// If the `fn` function throws an error, the benchmark will stop and emit an `RunError` event.

@@ -130,12 +130,12 @@ // If the `onComplete` function throws an error, the benchmark will stop and emit an `CompleteError` event.

// SetupError: The `setup` function threw an error.
class SetupError extends BenchmarkError {
// beforeEachError: The `beforeEach` function threw an error.
class beforeEachError extends BenchmarkError {
statusCode = 1;
name = "SetupError";
name = "beforeEachError";
}
// TeardownError: The `teardown` function threw an error.
class TeardownError extends BenchmarkError {
// afterEachError: The `afterEach` function threw an error.
class afterEachError extends BenchmarkError {
statusCode = 2;
name = "TeardownError";
name = "afterEachError";
}

@@ -169,4 +169,4 @@

BenchmarkError,
SetupError,
TeardownError,
beforeEachError,
afterEachError,
RunError,

@@ -178,3 +178,3 @@ CompleteError,

type ErrorType = "SetupError" | "TeardownError" | "RunError" | "CompleteError" | "StartError" | "FatalError";
type ErrorType = "beforeEachError" | "afterEachError" | "RunError" | "CompleteError" | "StartError" | "FatalError";

@@ -191,5 +191,5 @@ // BenchmarkFunction a function that can be used as a benchmark.

// A function to be run once before each benchmark loop, does not count for run time.
setup?: () => Promise<void> | void;
beforeEach?: () => Promise<void> | void;
// A function to be run once after each benchmark loop, does not count for run time.
teardown?: () => Promise<void> | void;
afterEach?: () => Promise<void> | void;
// A function to be run once after the benchmark completes, does not count for run time.

@@ -220,3 +220,13 @@ onComplete?: () => Promise<void> | void;

type CompareBy = "meanTime" | "medianTime" | "standardDeviation" | "maxTime" | "minTime" | "hz" | "runTime" | "cycles" | "percent";
export const enum CompareBy {
MeanTime = "meanTime",
MedianTime = "medianTime",
StandardDeviation = "standardDeviation",
MaxTime = "maxTime",
MinTime = "minTime",
Hz = "hz",
RunTime = "runTime",
Cycles = "cycles",
Percent = "percent"
}

@@ -230,2 +240,3 @@ type BenchmarkConstructor = (

interface Benchmark {
Suite: typeof Suite;
readonly version: string;

@@ -254,3 +265,3 @@ readonly defaults: {

toJSON(): JsonBenchmark;
compare(other: Benchmark, compare: CompareBy): number;
compareWith(other: Benchmark, compareBy: CompareBy): number;
}

@@ -265,23 +276,20 @@

// AsyncFunction constructor to create an async function that can be used as a benchmark.
let AsyncFunction = Object.getPrototypeOf(async function () {}).constructor;
// helper function to know if a function is async or not
function isAsync(fn: BenchmarkFunction): boolean {
return fn.constructor.name === "AsyncFunction";
}
// Create a new function that can be used as a benchmark from a passed function.
function getFunctionToBench(fn: BenchmarkFunction): Function {
let body = fn
.toString()
.replace(/\n\s*/g, "") // Inline newlines and whitespace
.replace(/^(async\s)?\(\)\s?=>\s?\{(.*)\}$/g, "$2") // Handles `async () => { ... }` && `() => { ... }`
.replace(/^(async\s)?\(\)\s?=>\s?\((.*)\)$/g, "$2") // Handles `async () => ( ... )` && `() => ( ... )`
.replace(/^(async\s)?\(\)\s?=>\s?(.*)$/g, "$2") // Handles `async () => ...` && `() => ...`
.replace(/^(async\s)?function\s?\w+\(\)\s?\{(.*)\}$/g, "$2") // Handles `async function ... { ... }` && `function ... { ... }`
.replace(/^(async\s)?fn\s?\(\)\s?\{(.*)\}$/g, "$2") // Handles `async fn() { ... }` && `fn() { ... }`
.replace(/;$/g, ""); // Replace the last ; to prevent double ;; we will add it later
let code = `const __start__ = performance.now();
${body};
return performance.now() - __start__;`;
return fn.constructor.name === "AsyncFunction" ? new AsyncFunction(code) : new Function(code);
async function runCallback(
instance: any,
errorTypeIfAny: ErrorType,
callback?: (...args: any[]) => Promise<void> | void,
...args: any[]
): Promise<void | BenchmarkError> {
if (callback) {
try {
await callback.bind(instance)(...args);
} catch (error) {
return getError(error, `Benchmark \`${instance.name}\` failed to run \`${callback.name}\` callback: ${error.message}`, errorTypeIfAny);
}
}
}

@@ -291,2 +299,3 @@

class Benchmark implements Benchmark {
static Suite: typeof Suite;
static readonly version: string = version;

@@ -313,3 +322,3 @@ static readonly defaults: {

options: BenchmarkOptions;
stamp: number;
stamp!: number;
runTime: number = 0;

@@ -335,15 +344,4 @@ totalTime: number = 0;

this.options = opts;
this.stamp = performance.now();
}
private async runCallback(errorTypeIfAny: ErrorType, callback?: (...args: any[]) => Promise<void> | void, ...args: any[]): Promise<void | BenchmarkError> {
if (callback) {
try {
await callback.bind(this)(...args);
} catch (error) {
return getError(error, `Benchmark \`${this.name}\` failed to run \`${callback.name}\` callback: ${error.message}`, errorTypeIfAny);
}
}
}
toJSON(): JsonBenchmark {

@@ -368,3 +366,3 @@ const { name, error, cycles, hz, runTime, totalTime, samples, meanTime, medianTime, standardDeviation, maxTime, minTime } = this;

compare(other: Benchmark, compare: CompareBy = "percent"): number {
compareWith(other: Benchmark, compareBy: CompareBy = CompareBy.Percent): number {
const { error, cycles, hz, meanTime, medianTime, standardDeviation, maxTime, minTime, runTime } = this;

@@ -380,3 +378,3 @@

switch (compare) {
switch (compareBy) {
case "meanTime":

@@ -401,8 +399,8 @@ return meanTime - other.meanTime;

default:
throw new Error(`Unknown compare field: ${compare}`);
throw new Error(`Unknown compare field: ${compareBy}`);
}
}
async runSample(functionToBenchCleaned: Function) {
const { setup, teardown } = this.options;
async runSample() {
const { beforeEach, afterEach, fn } = this.options;
let sampleMaxTime = 1000;

@@ -414,5 +412,5 @@ let startTime = performance.now();

this.cycles++;
const setupError = await this.runCallback("SetupError", setup);
if (setupError) {
throw setupError;
const beforeEachError = await runCallback(this, "beforeEachError", beforeEach);
if (beforeEachError) {
throw beforeEachError;
}

@@ -422,3 +420,11 @@

try {
time = await functionToBenchCleaned();
if (isAsync(fn)) {
let start = performance.now();
await fn();
time = performance.now() - start;
} else {
let start = performance.now();
fn();
time = performance.now() - start;
}
} catch (error) {

@@ -431,5 +437,5 @@ throw getError(error, `Benchmark \`${this.name}\` failed to run \`fn\`: ${error.message}`, "RunError");

const teardownError = await this.runCallback("TeardownError", teardown);
if (teardownError) {
throw teardownError;
const afterEachError = await runCallback(this, "afterEachError", afterEach);
if (afterEachError) {
throw afterEachError;
}

@@ -447,6 +453,4 @@

let functionToBenchCleaned = getFunctionToBench(fn);
try {
const onStartError = await this.runCallback("StartError", onStart);
const onStartError = await runCallback(this, "StartError", onStart);
if (onStartError) {

@@ -458,3 +462,3 @@ throw onStartError;

this.samples++;
await this.runSample(functionToBenchCleaned);
await this.runSample();
}

@@ -474,3 +478,3 @@

const onCompleteError = await this.runCallback("CompleteError", onComplete);
const onCompleteError = await runCallback(this, "CompleteError", onComplete);
if (onCompleteError) {

@@ -482,3 +486,3 @@ throw onCompleteError;

const onErrorError = await this.runCallback("FatalError", onError);
const onErrorError = await runCallback(this, "FatalError", onError, error);
if (onErrorError) {

@@ -491,3 +495,193 @@ throw onErrorError;

//*** Class Suite ***//
type SuiteOptions = {
// The maximum time in seconds that a benchmark can take.
maxTime: number;
// The minimum number of samples that must be taken.
minSamples: number;
// A function to be run once before each benchmark run
beforeEach?: (benchmark: Benchmark, i: number) => Promise<void> | void;
// A function to be run once after each benchmark run
afterEach?: (benchmark: Benchmark, i: number) => Promise<void> | void;
// A function to be run once after the suite completes
onComplete?: () => Promise<void> | void;
// A function to be run once before the suite starts
onStart?: () => Promise<void> | void;
// A function to be run if an error occurs.
onError?: (error: BenchmarkError) => Promise<void> | void;
};
interface JsonSuite {
name: string;
errorMessage?: string;
runTime: number;
totalTime: number;
passed: boolean;
benchmarks: JsonBenchmark[];
}
type SuiteConstructor = (name: string, options?: Partial<SuiteOptions>) => Suite;
interface Suite {
readonly defaults: {
maxTime: number;
minSamples: number;
};
name: string;
error?: BenchmarkError;
options: SuiteOptions;
stamp: number;
runTime: number;
totalTime: number;
benchmarks: Benchmark[];
constructor: SuiteConstructor;
add(name: string, optionsOrFn: (Partial<BenchmarkOptions> & { fn: BenchmarkFunction }) | BenchmarkFunction, options: Partial<BenchmarkOptions>): Benchmark;
toJSON(): JsonSuite;
run(): Promise<void>;
getSortedBenchmarks(sortedBy: CompareBy): Benchmark[];
getFastest(sortedBy: CompareBy): Benchmark;
getSlowest(sortedBy: CompareBy): Benchmark;
compareFastestWithLowest(compareBy: CompareBy): { fastest: Benchmark; slowest: Benchmark; by: number };
}
class Suite implements Suite {
static readonly defaults = {
maxTime: 5,
minSamples: 1
};
name: string;
error?: BenchmarkError;
options: SuiteOptions;
stamp!: number;
runTime: number = 0;
totalTime: number = 0;
benchmarks: Benchmark[] = [];
constructor(name: string, options: Partial<SuiteOptions> = {}) {
this.name = name;
this.options = {
...Suite.defaults,
...options
};
}
toJSON(): JsonSuite {
const { error, name, runTime, totalTime } = this;
return {
name,
errorMessage: error ? error.message : undefined,
runTime,
totalTime,
passed: !error,
benchmarks: this.benchmarks.map((benchmark) => benchmark.toJSON())
};
}
add(
name: string,
optionsOrFn: (Partial<BenchmarkOptions> & { fn: BenchmarkFunction }) | BenchmarkFunction,
options: Partial<BenchmarkOptions> = {}
): Benchmark {
let opts = {
...{
minSamples: this.options.minSamples,
maxTime: this.options.maxTime
},
...options
} as BenchmarkOptions;
if (typeof optionsOrFn === "function") {
opts.fn = optionsOrFn;
} else {
opts = {
...opts,
...optionsOrFn
};
}
let benchmark = new Benchmark(name, opts);
this.benchmarks.push(benchmark);
return benchmark;
}
async run(): Promise<void> {
this.stamp = performance.now();
const { beforeEach, afterEach, onComplete, onStart, onError } = this.options;
try {
const onStartError = await runCallback(this, "StartError", onStart);
if (onStartError) {
throw onStartError;
}
for (let i = 0, l = this.benchmarks.length; i < l; i++) {
let benchmark = this.benchmarks[i];
const onbeforeEachError = await runCallback(this, "beforeEachError", beforeEach, benchmark, i);
if (onbeforeEachError) {
throw onbeforeEachError;
}
await benchmark.run();
this.runTime += benchmark.runTime;
this.totalTime += benchmark.totalTime;
const onafterEachError = await runCallback(this, "afterEachError", afterEach, benchmark, i);
if (onafterEachError) {
throw onafterEachError;
}
}
const onCompleteError = await runCallback(this, "CompleteError", onComplete);
if (onCompleteError) {
throw onCompleteError;
}
} catch (error) {
this.error = error;
const onErrorError = await runCallback(this, "FatalError", onError, error);
if (onErrorError) {
throw onErrorError;
}
}
}
getSortedBenchmarksBy(sortBy: CompareBy): Benchmark[] {
const benchmarks = this.benchmarks.slice();
const sortedBenchmarks = benchmarks.sort((a, b) => {
let result = b.compareWith(a, sortBy);
return result > 0 ? 1 : result < 0 ? -1 : 0;
});
return sortedBenchmarks;
}
getFastest(sortBy: CompareBy): Benchmark {
const sortedBenchmarks = this.getSortedBenchmarksBy(sortBy);
return sortedBenchmarks[0];
}
getSlowest(sortBy: CompareBy): Benchmark {
const sortedBenchmarks = this.getSortedBenchmarksBy(sortBy);
return sortedBenchmarks[sortedBenchmarks.length - 1];
}
compareFastestWithLowest(compareBy: CompareBy) {
const fastest = this.getFastest(compareBy);
const slowest = this.getSlowest(compareBy);
return {
fastest,
slowest,
by: fastest.compareWith(slowest, compareBy)
};
}
}
Benchmark.Suite = Suite;
// Export the Benchmark class.
export default Benchmark;
{
"name": "buffalo-bench",
"version": "0.1.0",
"version": "1.0.0",
"description": "A benchmarking library that supports async hooks and benchmarks by default.",

@@ -5,0 +5,0 @@ "source": "lib/index.ts",

@@ -38,5 +38,5 @@ <div style="text-align: center">

The previous code will log `1` and then run the benchmark, and the log `2` could be logged before the benchmark is finished or could't be logged at all.
The previous code will log `1` and then run the benchmark, and the log `2` could be logged before the benchmark is finished or couldn't be logged at all.
This problem prevent us to create an async setup and/or teardown for a benchmark like an api call that requires opening a db connection, creating a collection, adding a document and launching a server to handle the request. And on the teardown clear the databese, close the connection and stop the server.
This problem prevent us to create an async onStart and/or onComplete for a benchmark like an api call that requires opening a db connection, creating a collection, adding a document and launching a server to handle the request. And after the benchmark finishes clear the databese, close the connection and stop the server.

@@ -70,6 +70,6 @@ So, BuffaloBench solves this problem by providing a way to create a benchmarks with all the hooks handled as async by default. (Also works with sync functions)

minSamples: 1,
setup: async () => {
beforeEach: async () => {
await doSomething();
},
teardown: async () => {
afterEach: async () => {
await doSomething();

@@ -113,4 +113,4 @@ },

* `minSamples`: The minimum number of samples that must be taken.
* `setup`: A function to be run once before each benchmark loop, does not count for run time.
* `teardown`: A function to be run once after each benchmark loop, does not count for run time.
* `beforeEach`: A function to be run once before each benchmark loop, does not count for run time.
* `afterEach`: A function to be run once after each benchmark loop, does not count for run time.
* `onStart`: A function to be run once before the benchmark loop starts, does not count for run time.

@@ -134,4 +134,4 @@ * `onComplete`: A function to be run once after the benchmark loop finishes, does not count for run time.

* `stamp`: A timestamp representing when the benchmark was created.
* `runTime`: The total time taken to run the benchmark, this does not include setup, teardown, onStrart and onComplete hooks.
* `totalTime`: The total time taken to run the benchmark including setup, teardown, onStart and onComplete hooks.
* `runTime`: The total time taken to run the benchmark, this does not include beforeEach, afterEach, onStrart and onComplete hooks.
* `totalTime`: The total time taken to run the benchmark including beforeEach, afterEach, onStart and onComplete hooks.

@@ -149,6 +149,6 @@ The `Benchmark` instance has the following methods:

If the `setup` `teardown` `onComplete` `onStart` `onError` returns a Promise, the benchmark will wait for the promise to resolve before continuing.
If the `beforeEach` `afterEach` `onComplete` `onStart` `onError` returns a Promise, the benchmark will wait for the promise to resolve before continuing.
If the `setup` function throws an error, the benchmark will stop and emit an `SetupError` event.
If the `teardown` function throws an error, the benchmark will stop and emit an `TeardownError` event.
If the `beforeEach` function throws an error, the benchmark will stop and emit an `beforeEachError` event.
If the `afterEach` function throws an error, the benchmark will stop and emit an `afterEachError` event.
If the `fn` function throws an error, the benchmark will stop and emit an `RunError` event.

@@ -155,0 +155,0 @@ If the `onComplete` function throws an error, the benchmark will stop and emit an `CompleteError` event.

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc