Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

buffalo-bench

Package Overview
Dependencies
Maintainers
1
Versions
7
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

buffalo-bench - npm Package Compare versions

Comparing version 1.0.3 to 2.0.0

dist/index.d.ts

2

dist/index.min.js

@@ -1,1 +0,1 @@

(()=>{var e,t,r=Object.defineProperty,a=(e,t,a)=>(((e,t,a)=>{t in e?r(e,t,{enumerable:!0,configurable:!0,writable:!0,value:a}):e[t]=a})(e,"symbol"!=typeof t?t+"":t,a),a),i="undefined"==typeof performance?()=>Date.now():()=>performance.now(),s=class extends Error{code;message;name;statusCode=0;constructor(e="Something went wrong",t){super(),this.message=e,this.code=t,this.name=this.constructor.name}},n={BenchmarkError:s,BeforeEachError:class extends s{statusCode=1;name="BeforeEachError"},AfterEachError:class extends s{statusCode=2;name="AfterEachError"},RunError:class extends s{statusCode=3;name="RunError"},AfterError:class extends s{statusCode=4;name="AfterError"},BeforeError:class extends s{statusCode=5;name="BeforeError"},FatalError:class extends s{statusCode=7;name="FatalError"}};function m(e,t,r){let a=new n[r](t);a.stack=e.stack;for(let t in e)e.hasOwnProperty(t)&&(a[t]=e[t]);return a}async function o(e,t,r,...a){if(r)try{await r.bind(e)(...a)}catch(a){return m(a,`Benchmark \`${e.name}\` failed to run \`${r.name}\` callback: ${a.message}`,t)}}(t=e||(e={})).MeanTime="meanTime",t.MedianTime="medianTime",t.StandardDeviation="standardDeviation",t.MaxTime="maxTime",t.MinTime="minTime",t.Hz="hz",t.RunTime="runTime",t.Cycles="cycles",t.Percent="percent";var h=class{name;error;cycles=0;samples=0;hz=0;meanTime=0;medianTime=0;standardDeviation=0;maxTime=0;minTime=0;times=[];options;stamp;runTime=0;totalTime=0;constructor(e,t,r={}){this.name=e;let a={...h.defaults,...r};"function"==typeof t?a.fn=t:a={...a,...t},this.options=a}toJSON(){let{name:e,error:t,cycles:r,hz:a,runTime:i,totalTime:s,samples:n,meanTime:m,medianTime:o,standardDeviation:h,maxTime:c,minTime:l}=this;return{name:e,errorMessage:t?t.message:void 0,cycles:r,samples:n,hz:a,meanTime:m,medianTime:o,standardDeviation:h,maxTime:c,minTime:l,runTime:i,totalTime:s}}compareWith(t,r=e.Percent){let{error:a,cycles:i,hz:s,meanTime:n,medianTime:m,standardDeviation:o,maxTime:h,minTime:c,runTime:l}=this;if(a)return-1;if(t.error)return 1;switch(r){case"meanTime":return t.meanTime-n;case"medianTime":return t.medianTime-m;case"standardDeviation":return o-t.standardDeviation;case"maxTime":return h-t.maxTime;case"minTime":return t.minTime-c;case"hz":return s-t.hz;case"runTime":return l-t.runTime;case"cycles":return i-t.cycles;case"percent":return Math.trunc(100*(100/n*t.meanTime-100))/100;default:throw new Error(`Unknown compare field: ${r}`)}}async runSample(){let{beforeEach:e,afterEach:t,fn:r}=this.options,a=i();for(;i()-a<1e3;){let a=i();this.cycles++;let s,n=await o(this,"BeforeEachError",e);if(n)throw n;try{if("AsyncFunction"===r.constructor.name){let e=i();await r(),s=i()-e}else{let e=i();r(),s=i()-e}}catch(e){throw m(e,`Benchmark \`${this.name}\` failed to run \`fn\`: ${e.message}`,"RunError")}this.times.push(s),this.runTime+=s;let h=await o(this,"AfterEachError",t);if(h)throw h;this.totalTime+=i()-a}}async run(){this.stamp=i();let{maxTime:e,minSamples:t,after:r,before:a,onError:s}=this.options,n=1e3*e;try{let e=await o(this,"BeforeError",a);if(e)throw e;for(;this.samples<t||this.totalTime<n;)this.samples++,await this.runSample();this.hz=this.cycles/(this.runTime/1e3),this.meanTime=this.runTime/this.times.length,this.medianTime=this.times.sort((e,t)=>e-t)[Math.floor(this.times.length/2)]||0,this.standardDeviation=Math.sqrt(this.times.map(e=>Math.pow(e-this.meanTime,2)).reduce((e,t)=>e+t,0)/this.times.length),this.maxTime=this.times.reduce((e,t)=>Math.max(e,t),0),this.minTime=this.times.reduce((e,t)=>Math.min(e,t),1/0);let i=await o(this,"AfterError",r);if(i)throw i}catch(e){this.error=e;let t=await o(this,"FatalError",s,e);if(t)throw t}}},c=h;a(c,"Suite"),a(c,"version","1.0.1"),a(c,"defaults",{maxTime:5,minSamples:1});var l=class{name;error;options;stamp;runTime=0;totalTime=0;benchmarks=[];constructor(e,t={}){this.name=e,this.options={...l.defaults,...t}}toJSON(){let{error:t,name:r,runTime:a,totalTime:i}=this;return{name:r,errorMessage:t?t.message:void 0,runTime:a,totalTime:i,passed:!t,benchmarks:this.getSortedBenchmarksBy(e.MeanTime).map(e=>e.toJSON())}}add(e,t,r={}){let a={minSamples:this.options.minSamples,maxTime:this.options.maxTime,...r};"function"==typeof t?a.fn=t:a={...a,...t};let i=new c(e,a);return this.benchmarks.push(i),i}async run(){this.stamp=i();let{beforeEach:e,afterEach:t,after:r,before:a,onError:s}=this.options;try{let i=await o(this,"BeforeError",a);if(i)throw i;for(let r=0,a=this.benchmarks.length;r<a;r++){let a=this.benchmarks[r],i=await o(this,"BeforeEachError",e,a,r);if(i)throw i;await a.run(),this.runTime+=a.runTime,this.totalTime+=a.totalTime;let s=await o(this,"AfterEachError",t,a,r);if(s)throw s}let s=await o(this,"AfterError",r);if(s)throw s}catch(e){this.error=e;let t=await o(this,"FatalError",s,e);if(t)throw t}}getSortedBenchmarksBy(e){return this.benchmarks.slice().sort((t,r)=>{let a=r.compareWith(t,e);return a>0?1:a<0?-1:0})}getFastest(e){return this.getSortedBenchmarksBy(e)[0]}getSlowest(e){let t=this.getSortedBenchmarksBy(e);return t[t.length-1]}compareFastestWithSlowest(t){let r=t===e.Percent?e.MeanTime:t,a=this.getFastest(r),i=this.getSlowest(r);return{fastest:a,slowest:i,by:a.compareWith(i,t)}}},u=l;a(u,"defaults",{maxTime:5,minSamples:1}),c.Suite=u;var f={CompareBy:e,default:c};"undefined"!=typeof module?module.exports=f:self.Benchmark=f})();
(()=>{"use strict";var e=Object.defineProperty,t=(t,r,a)=>(((t,r,a)=>{r in t?e(t,r,{enumerable:!0,configurable:!0,writable:!0,value:a}):t[r]=a})(t,"symbol"!=typeof r?r+"":r,a),a),r="undefined"==typeof performance?()=>Date.now():()=>performance.now(),a=class extends Error{code;message;name;statusCode=0;constructor(e="Something went wrong",t){super(),this.message=e,this.code=t,this.name=this.constructor.name}},s={BenchmarkError:a,BeforeEachError:class extends a{statusCode=1;name="BeforeEachError"},AfterEachError:class extends a{statusCode=2;name="AfterEachError"},RunError:class extends a{statusCode=3;name="RunError"},AfterError:class extends a{statusCode=4;name="AfterError"},BeforeError:class extends a{statusCode=5;name="BeforeError"},FatalError:class extends a{statusCode=7;name="FatalError"}},i=(e=>(e.MeanTime="meanTime",e.MedianTime="medianTime",e.StandardDeviation="standardDeviation",e.MaxTime="maxTime",e.MinTime="minTime",e.Hz="hz",e.RunTime="runTime",e.Cycles="cycles",e.Percent="percent",e))(i||{});function n(e,t,r){let a=new s[r](t);a.stack=e.stack;for(let t in e)e.hasOwnProperty(t)&&(a[t]=e[t]);return a}function m(e){return"AsyncFunction"===e.constructor.name}async function o(e,t,r,...a){if(r)try{await r.bind(e)(...a)}catch(a){return n(a,`Benchmark \`${e.name}\` failed to run \`${r.name}\` callback: ${a.message}`,t)}}var c=class{name;error;cycles=0;samples=0;hz=0;meanTime=0;medianTime=0;standardDeviation=0;maxTime=0;minTime=0;times=[];options;stamp;runTime=0;totalTime=0;constructor(e,t,r={}){this.name=e;let a={...c.defaults,...r};"function"==typeof t?a.fn=t:a={...a,...t},this.options=a}toJSON(){const{name:e,error:t,cycles:r,hz:a,runTime:s,totalTime:i,samples:n,meanTime:m,medianTime:o,standardDeviation:c,maxTime:h,minTime:l}=this;return{name:e,errorMessage:t?t.message:void 0,cycles:r,samples:n,hz:a,meanTime:m,medianTime:o,standardDeviation:c,maxTime:h,minTime:l,runTime:s,totalTime:i}}compareWith(e,t="percent"){const{error:r,cycles:a,hz:s,meanTime:i,medianTime:n,standardDeviation:m,maxTime:o,minTime:c,runTime:h}=this;if(r)return-1;if(e.error)return 1;switch(t){case"meanTime":return e.meanTime-i;case"medianTime":return e.medianTime-n;case"standardDeviation":return m-e.standardDeviation;case"maxTime":return o-e.maxTime;case"minTime":return e.minTime-c;case"hz":return s-e.hz;case"runTime":return h-e.runTime;case"cycles":return a-e.cycles;case"percent":return Math.trunc(100*(100/i*e.meanTime-100))/100;default:throw new Error(`Unknown compare field: ${t}`)}}async runSample(){const{beforeEach:e,afterEach:t,fn:a}=this.options;let s=r();for(;r()-s<1e3;){const s=r();this.cycles++;const i=await o(this,"BeforeEachError",e);if(i)throw i;let c;try{if(m(a)){let e=r();await a(),c=r()-e}else{let e=r();a(),c=r()-e}}catch(e){throw n(e,`Benchmark \`${this.name}\` failed to run \`fn\`: ${e.message}`,"RunError")}this.times.push(c),this.runTime+=c;const h=await o(this,"AfterEachError",t);if(h)throw h;this.totalTime+=r()-s}}async run(){this.stamp=r();const{maxTime:e,minSamples:t,after:a,before:s,onError:i}=this.options;let n=1e3*e;try{const e=await o(this,"BeforeError",s);if(e)throw e;for(;this.samples<t||this.totalTime<n;)this.samples++,await this.runSample();this.hz=this.cycles/(this.runTime/1e3),this.meanTime=this.runTime/this.times.length,this.medianTime=this.times.sort((e,t)=>e-t)[Math.floor(this.times.length/2)]||0,this.standardDeviation=Math.sqrt(this.times.map(e=>Math.pow(e-this.meanTime,2)).reduce((e,t)=>e+t,0)/this.times.length),this.maxTime=this.times.reduce((e,t)=>Math.max(e,t),0),this.minTime=this.times.reduce((e,t)=>Math.min(e,t),1/0);const r=await o(this,"AfterError",a);if(r)throw r}catch(e){this.error=e;const t=await o(this,"FatalError",i,e);if(t)throw t}}},h=c;t(h,"Suite"),t(h,"version","2.0.0"),t(h,"defaults",{maxTime:5,minSamples:1});var l=class{name;error;options;stamp;runTime=0;totalTime=0;benchmarks=[];constructor(e,t={}){this.name=e,this.options={...l.defaults,...t}}toJSON(){const{error:e,name:t,runTime:r,totalTime:a}=this;return{name:t,errorMessage:e?e.message:void 0,runTime:r,totalTime:a,passed:!e,benchmarks:this.getSortedBenchmarksBy("meanTime").map(e=>e.toJSON())}}add(e,t,r={}){let a={minSamples:this.options.minSamples,maxTime:this.options.maxTime,...r};"function"==typeof t?a.fn=t:a={...a,...t};let s=new h(e,a);return this.benchmarks.push(s),s}async run(){this.stamp=r();const{beforeEach:e,afterEach:t,after:a,before:s,onError:i}=this.options;try{const r=await o(this,"BeforeError",s);if(r)throw r;for(let r=0,a=this.benchmarks.length;r<a;r++){let a=this.benchmarks[r];const s=await o(this,"BeforeEachError",e,a,r);if(s)throw s;await a.run(),this.runTime+=a.runTime,this.totalTime+=a.totalTime;const i=await o(this,"AfterEachError",t,a,r);if(i)throw i}const i=await o(this,"AfterError",a);if(i)throw i}catch(e){this.error=e;const t=await o(this,"FatalError",i,e);if(t)throw t}}getSortedBenchmarksBy(e){return this.benchmarks.slice().sort((t,r)=>{let a=r.compareWith(t,e);return a>0?1:a<0?-1:0})}getFastest(e){return this.getSortedBenchmarksBy(e)[0]}getSlowest(e){const t=this.getSortedBenchmarksBy(e);return t[t.length-1]}compareFastestWithSlowest(e){let t="percent"===e?"meanTime":e;const r=this.getFastest(t),a=this.getSlowest(t);return{fastest:r,slowest:a,by:r.compareWith(a,e)}}},u=l;t(u,"defaults",{maxTime:5,minSamples:1}),h.Suite=u;var f={Benchmark:h,CompareBy:i};"undefined"!=typeof module?module.exports=f:self.Benchmark=f})();

@@ -103,5 +103,8 @@ // A benchmarking library that supports async hooks and benchmarks by default.

import { version } from '../package.json';
const version = "2.0.0";
let now = typeof performance === 'undefined' ? () => Date.now() : () => performance.now();
let now =
typeof performance === "undefined"
? () => Date.now()
: () => performance.now();

@@ -118,3 +121,3 @@ //*** Errors ***//

constructor(message = 'Something went wrong', code?: string) {
constructor(message = "Something went wrong", code?: string) {
super();

@@ -130,3 +133,3 @@ this.message = message;

statusCode = 1;
name = 'BeforeEachError';
name = "BeforeEachError";
}

@@ -137,3 +140,3 @@

statusCode = 2;
name = 'AfterEachError';
name = "AfterEachError";
}

@@ -144,3 +147,3 @@

statusCode = 3;
name = 'RunError';
name = "RunError";
}

@@ -151,3 +154,3 @@

statusCode = 4;
name = 'AfterError';
name = "AfterError";
}

@@ -158,3 +161,3 @@

statusCode = 5;
name = 'BeforeError';
name = "BeforeError";
}

@@ -165,3 +168,3 @@

statusCode = 7;
name = 'FatalError';
name = "FatalError";
}

@@ -179,3 +182,9 @@

type ErrorType = 'BeforeEachError' | 'AfterEachError' | 'RunError' | 'AfterError' | 'BeforeError' | 'FatalError';
type ErrorType =
| "BeforeEachError"
| "AfterEachError"
| "RunError"
| "AfterError"
| "BeforeError"
| "FatalError";

@@ -192,11 +201,11 @@ // BenchmarkFunction a function that can be used as a benchmark.

// A function to be run once before each benchmark loop, does not count for run time.
beforeEach?: () => Promise<void> | void;
beforeEach?: (this: Benchmark) => Promise<void> | void;
// A function to be run once after each benchmark loop, does not count for run time.
afterEach?: () => Promise<void> | void;
afterEach?: (this: Benchmark) => Promise<void> | void;
// A function to be run once after the benchmark completes, does not count for run time.
after?: () => Promise<void> | void;
after?: (this: Benchmark) => Promise<void> | void;
// A function to be run once before the benchmark starts, does not count for run time.
before?: () => Promise<void> | void;
before?: (this: Benchmark) => Promise<void> | void;
// A function to be run if an error occurs.
onError?: (error: BenchmarkError) => Promise<void> | void;
onError?: (this: Benchmark, error: BenchmarkError) => Promise<void> | void;
// The function to be run.

@@ -222,11 +231,11 @@ fn: BenchmarkFunction;

export const enum CompareBy {
MeanTime = 'meanTime',
MedianTime = 'medianTime',
StandardDeviation = 'standardDeviation',
MaxTime = 'maxTime',
MinTime = 'minTime',
Hz = 'hz',
RunTime = 'runTime',
Cycles = 'cycles',
Percent = 'percent'
MeanTime = "meanTime",
MedianTime = "medianTime",
StandardDeviation = "standardDeviation",
MaxTime = "maxTime",
MinTime = "minTime",
Hz = "hz",
RunTime = "runTime",
Cycles = "cycles",
Percent = "percent"
}

@@ -236,7 +245,9 @@

name: string,
optionsOrFn: (Partial<BenchmarkOptions> & { fn: BenchmarkFunction }) | BenchmarkFunction,
optionsOrFn:
| (Partial<BenchmarkOptions> & { fn: BenchmarkFunction })
| BenchmarkFunction,
options: Partial<BenchmarkOptions>
) => Benchmark;
interface Benchmark {
export interface Benchmark {
Suite: typeof Suite;

@@ -270,3 +281,7 @@ readonly version: string;

// helper to get the correct error type from a normal error
function getError(error: Error, message: string, type: ErrorType): BenchmarkError {
function getError(
error: Error,
message: string,
type: ErrorType
): BenchmarkError {
let benchmarkError = new Errors[type](message);

@@ -284,3 +299,3 @@ benchmarkError.stack = error.stack;

function isAsync(fn: BenchmarkFunction): boolean {
return fn.constructor.name === 'AsyncFunction';
return fn.constructor.name === "AsyncFunction";
}

@@ -300,3 +315,5 @@

error as Error,
`Benchmark \`${instance.name}\` failed to run \`${callback.name}\` callback: ${(error as Error).message}`,
`Benchmark \`${instance.name}\` failed to run \`${
callback.name
}\` callback: ${(error as Error).message}`,
errorTypeIfAny

@@ -309,3 +326,3 @@ );

// The benchmark class
class Benchmark implements Benchmark {
export class Benchmark implements Benchmark {
static Suite: typeof Suite;

@@ -337,3 +354,9 @@ static readonly version: string = version;

constructor(name: string, optionsOrFn: (Partial<BenchmarkOptions> & { fn: BenchmarkFunction }) | BenchmarkFunction, options: Partial<BenchmarkOptions> = {}) {
constructor(
name: string,
optionsOrFn:
| (Partial<BenchmarkOptions> & { fn: BenchmarkFunction })
| BenchmarkFunction,
options: Partial<BenchmarkOptions> = {}
) {
this.name = name;

@@ -345,3 +368,3 @@ let opts = {

if (typeof optionsOrFn === 'function') {
if (typeof optionsOrFn === "function") {
opts.fn = optionsOrFn;

@@ -359,3 +382,16 @@ } else {

toJSON(): JsonBenchmark {
const { name, error, cycles, hz, runTime, totalTime, samples, meanTime, medianTime, standardDeviation, maxTime, minTime } = this;
const {
name,
error,
cycles,
hz,
runTime,
totalTime,
samples,
meanTime,
medianTime,
standardDeviation,
maxTime,
minTime
} = this;

@@ -378,4 +414,17 @@ return {

compareWith(other: Benchmark, compareBy: CompareBy = CompareBy.Percent): number {
const { error, cycles, hz, meanTime, medianTime, standardDeviation, maxTime, minTime, runTime } = this;
compareWith(
other: Benchmark,
compareBy: CompareBy = CompareBy.Percent
): number {
const {
error,
cycles,
hz,
meanTime,
medianTime,
standardDeviation,
maxTime,
minTime,
runTime
} = this;

@@ -391,20 +440,22 @@ if (error) {

switch (compareBy) {
case 'meanTime':
case "meanTime":
return other.meanTime - meanTime;
case 'medianTime':
case "medianTime":
return other.medianTime - medianTime;
case 'standardDeviation':
case "standardDeviation":
return standardDeviation - other.standardDeviation;
case 'maxTime':
case "maxTime":
return maxTime - other.maxTime;
case 'minTime':
case "minTime":
return other.minTime - minTime;
case 'hz':
case "hz":
return hz - other.hz;
case 'runTime':
case "runTime":
return runTime - other.runTime;
case 'cycles':
case "cycles":
return cycles - other.cycles;
case 'percent':
return Math.trunc(((100 / meanTime) * other.meanTime - 100) * 100) / 100;
case "percent":
return (
Math.trunc(((100 / meanTime) * other.meanTime - 100) * 100) / 100
);
default:

@@ -423,3 +474,7 @@ throw new Error(`Unknown compare field: ${compareBy}`);

this.cycles++;
const BeforeEachError = await runCallback(this, 'BeforeEachError', beforeEach);
const BeforeEachError = await runCallback(
this,
"BeforeEachError",
beforeEach
);
if (BeforeEachError) {

@@ -441,3 +496,9 @@ throw BeforeEachError;

} catch (error) {
throw getError(error as Error, `Benchmark \`${this.name}\` failed to run \`fn\`: ${(error as Error).message}`, 'RunError');
throw getError(
error as Error,
`Benchmark \`${this.name}\` failed to run \`fn\`: ${
(error as Error).message
}`,
"RunError"
);
}

@@ -448,3 +509,7 @@

const AfterEachError = await runCallback(this, 'AfterEachError', afterEach);
const AfterEachError = await runCallback(
this,
"AfterEachError",
afterEach
);
if (AfterEachError) {

@@ -465,3 +530,3 @@ throw AfterEachError;

try {
const beforeError = await runCallback(this, 'BeforeError', before);
const beforeError = await runCallback(this, "BeforeError", before);
if (beforeError) {

@@ -471,3 +536,6 @@ throw beforeError;

while (this.samples < minSamples || this.totalTime < maxTimeInMilliseconds) {
while (
this.samples < minSamples ||
this.totalTime < maxTimeInMilliseconds
) {
this.samples++;

@@ -482,10 +550,19 @@ await this.runSample();

this.meanTime = this.runTime / this.times.length;
this.medianTime = this.times.sort((a, b) => a - b)[Math.floor(this.times.length / 2)] || 0;
this.standardDeviation = Math.sqrt(this.times.map((t) => Math.pow(t - this.meanTime, 2)).reduce((a, b) => a + b, 0) / this.times.length);
this.medianTime =
this.times.sort((a, b) => a - b)[Math.floor(this.times.length / 2)] ||
0;
this.standardDeviation = Math.sqrt(
this.times
.map((t) => Math.pow(t - this.meanTime, 2))
.reduce((a, b) => a + b, 0) / this.times.length
);
// Calculate the max, min, and average times.
this.maxTime = this.times.reduce((max, time) => Math.max(max, time), 0);
this.minTime = this.times.reduce((min, time) => Math.min(min, time), Infinity);
this.minTime = this.times.reduce(
(min, time) => Math.min(min, time),
Infinity
);
const afterError = await runCallback(this, 'AfterError', after);
const afterError = await runCallback(this, "AfterError", after);
if (afterError) {

@@ -497,3 +574,8 @@ throw afterError;

const onErrorError = await runCallback(this, 'FatalError', onError, error);
const onErrorError = await runCallback(
this,
"FatalError",
onError,
error
);
if (onErrorError) {

@@ -513,11 +595,19 @@ throw onErrorError;

// A function to be run once before each benchmark run
beforeEach?: (benchmark: Benchmark, i: number) => Promise<void> | void;
beforeEach?: (
this: Suite,
benchmark: Benchmark,
i: number
) => Promise<void> | void;
// A function to be run once after each benchmark run
afterEach?: (benchmark: Benchmark, i: number) => Promise<void> | void;
afterEach?: (
this: Suite,
benchmark: Benchmark,
i: number
) => Promise<void> | void;
// A function to be run once after the suite completes
after?: () => Promise<void> | void;
after?: (this: Suite) => Promise<void> | void;
// A function to be run once before the suite starts
before?: () => Promise<void> | void;
before?: (this: Suite) => Promise<void> | void;
// A function to be run if an error occurs.
onError?: (error: BenchmarkError) => Promise<void> | void;
onError?: (this: Suite, error: BenchmarkError) => Promise<void> | void;
};

@@ -534,3 +624,6 @@

type SuiteConstructor = (name: string, options?: Partial<SuiteOptions>) => Suite;
type SuiteConstructor = (
name: string,
options?: Partial<SuiteOptions>
) => Suite;

@@ -552,3 +645,9 @@ interface Suite {

constructor: SuiteConstructor;
add(name: string, optionsOrFn: (Partial<BenchmarkOptions> & { fn: BenchmarkFunction }) | BenchmarkFunction, options: Partial<BenchmarkOptions>): Benchmark;
add(
name: string,
optionsOrFn:
| (Partial<BenchmarkOptions> & { fn: BenchmarkFunction })
| BenchmarkFunction,
options: Partial<BenchmarkOptions>
): Benchmark;
toJSON(): JsonSuite;

@@ -560,3 +659,7 @@ run(): Promise<void>;

getSlowest(sortedBy: CompareBy): Benchmark;
compareFastestWithSlowest(compareBy: CompareBy): { fastest: Benchmark; slowest: Benchmark; by: number };
compareFastestWithSlowest(compareBy: CompareBy): {
fastest: Benchmark;
slowest: Benchmark;
by: number;
};
}

@@ -595,3 +698,5 @@

passed: !error,
benchmarks: this.getSortedBenchmarksBy(CompareBy.MeanTime).map((benchmark) => benchmark.toJSON())
benchmarks: this.getSortedBenchmarksBy(CompareBy.MeanTime).map(
(benchmark) => benchmark.toJSON()
)
};

@@ -602,3 +707,5 @@ }

name: string,
optionsOrFn: (Partial<BenchmarkOptions> & { fn: BenchmarkFunction }) | BenchmarkFunction,
optionsOrFn:
| (Partial<BenchmarkOptions> & { fn: BenchmarkFunction })
| BenchmarkFunction,
options: Partial<BenchmarkOptions> = {}

@@ -614,3 +721,3 @@ ): Benchmark {

if (typeof optionsOrFn === 'function') {
if (typeof optionsOrFn === "function") {
opts.fn = optionsOrFn;

@@ -633,3 +740,3 @@ } else {

try {
const beforeError = await runCallback(this, 'BeforeError', before);
const beforeError = await runCallback(this, "BeforeError", before);
if (beforeError) {

@@ -641,3 +748,9 @@ throw beforeError;

let benchmark = this.benchmarks[i];
const beforeEachError = await runCallback(this, 'BeforeEachError', beforeEach, benchmark, i);
const beforeEachError = await runCallback(
this,
"BeforeEachError",
beforeEach,
benchmark,
i
);
if (beforeEachError) {

@@ -651,3 +764,9 @@ throw beforeEachError;

const afterEachError = await runCallback(this, 'AfterEachError', afterEach, benchmark, i);
const afterEachError = await runCallback(
this,
"AfterEachError",
afterEach,
benchmark,
i
);
if (afterEachError) {

@@ -658,3 +777,3 @@ throw afterEachError;

const afterError = await runCallback(this, 'AfterError', after);
const afterError = await runCallback(this, "AfterError", after);
if (afterError) {

@@ -666,3 +785,8 @@ throw afterError;

const onErrorError = await runCallback(this, 'FatalError', onError, error);
const onErrorError = await runCallback(
this,
"FatalError",
onError,
error
);
if (onErrorError) {

@@ -695,3 +819,4 @@ throw onErrorError;

compareFastestWithSlowest(compareBy: CompareBy) {
let sortBy = compareBy === CompareBy.Percent ? CompareBy.MeanTime : compareBy;
let sortBy =
compareBy === CompareBy.Percent ? CompareBy.MeanTime : compareBy;
const fastest = this.getFastest(sortBy);

@@ -709,4 +834,1 @@ const slowest = this.getSlowest(sortBy);

Benchmark.Suite = Suite;
// Export the Benchmark class.
export default Benchmark;
{
"name": "buffalo-bench",
"version": "1.0.3",
"version": "2.0.0",
"description": "A benchmarking library that supports async hooks and benchmarks by default.",
"source": "lib/index.ts",
"main": "dist/index.min.js",
"module": "dist/index.min.js",
"module": "dist/index.mjs",
"unpkg": "dist/index.min.js",
"browser": "dist/index.min.js",
"types": "types/lib/index.d.ts",
"types": "dist/index.d.ts",
"exports": {
"import": "./dist/index.mjs",
"require": "./dist/index.js"
},
"repository": "git@github.com:Masquerade-Circus/buffalo-bench.git",

@@ -18,3 +22,3 @@ "author": "Masquerade <christian@masquerade-circus.net>",

"lib",
"register.js"
"tsconfig.json"
],

@@ -34,4 +38,8 @@ "keywords": [

"scripts": {
"dev": "nodemon -e js,ts -w ./bench -w ./lib --exec 'yarn test'",
"test": "node ./index.js",
"dev": "nodemon -e js,ts -w ./tests -w ./lib --exec 'yarn test'",
"test": "node --require ts-node/register tests/index.ts",
"test-cjs": "node tests/index.cjs",
"test-mjs": "node tests/index.mjs",
"test-min": "node tests/index.min.js",
"test-all": "yarn test && yarn test-cjs && yarn test-mjs && yarn test-min",
"build": "node ./build.js",

@@ -43,17 +51,16 @@ "remark": "remark . -o",

},
"dependencies": {
"esbuild": "^0.13.13",
"pirates": "^4.0.1"
},
"dependencies": {},
"devDependencies": {
"@release-it/conventional-changelog": "^3.3.0",
"@types/node": "^16.11.7",
"@release-it/conventional-changelog": "^5.0.0",
"@types/node": "^18.0.0",
"cz-conventional-changelog": "^3.3.0",
"nodemon": "^2.0.15",
"release-it": "^14.11.6",
"remark-cli": "^10.0.0",
"terser": "^5.9.0",
"tsc": "^2.0.3",
"esbuild": "^0.14.47",
"nodemon": "^2.0.18",
"release-it": "^15.1.0",
"remark-cli": "^11.0.0",
"terser": "^5.14.1",
"ts-node": "^10.8.1",
"tsc": "^2.0.4",
"tsc-prog": "^2.2.1",
"typescript": "^4.4.4"
"typescript": "^4.7.4"
},

@@ -60,0 +67,0 @@ "config": {

@@ -48,3 +48,3 @@ <div style="text-align: center">

```js
const Benchmark = require('buffalo-bench');
const { Benchmark } = require('buffalo-bench');

@@ -66,2 +66,4 @@ // Create a benchmark only with name and function

```js
const { Benchmark } = require('buffalo-bench');
// Create a benchmark with all the options

@@ -97,2 +99,4 @@ const bench = new Benchmark('myBenchmark', {

```js
const { Benchmark } = require('buffalo-bench');
let suite = new Benchmark.Suite("String comparison", {

@@ -207,3 +211,3 @@ beforeEach(benchmark) {

* `getSlowest(sortedBy: CompareBy)`: Get the slowest benchmark in the suite sorting by the given `CompareBy` metric.
* `CompareFastestWithSlowest(compareBy: CompareBy)`: Compare the fastest benchmark with the slowest benchmark sorting by the given `CompareBy` metric.
* `compareFastestWithSlowest(compareBy: CompareBy)`: Compare the fastest benchmark with the slowest benchmark sorting by the given `CompareBy` metric.
* `run`: Async method that runs the suite.

@@ -239,3 +243,3 @@ * `toJSON`: Return a JSON representation of the suite.

If you want to write your benchmarks with typescript, you can use the library as it is by requiring in your project the `buffalo-bench/register` file.
If you want to write your benchmarks with typescript, you must install the `ts-node` library and require in your project the `ts-node/register` file.

@@ -245,8 +249,8 @@ Example:

```js
require('buffalo-bench/register');
require('./my-benchmark.ts');)
require('ts-node/register');
require('./my-benchmark.ts');
```
```ts
import Benchmark from 'buffalo-bench';
import { Benchmark } from 'buffalo-bench/lib';

@@ -260,11 +264,7 @@ const bench = new Benchmark('myBenchmark', () => {});

This register file uses the `eslint` and `pirates` modules to transpile the typescript code to javascript on the fly.
Take into account that this will not check the typescript code for errors. If you want to check your typescript code, you can must use the `tsc` package.
## Development and Build
- Use `yarn dev` to watch and compile the library on every change to it running the benchmarks in the bench folder.
- Use `yarn dev` to watch and compile the library on every change to it running the index.ts benchmark in the tests folder.
- Use `yarn build` to build the library.
- Use `yarn commit` to commit your changes.
- Use `yarn commit` to commit your changes.

@@ -271,0 +271,0 @@ ## Contributing

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc