New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@chainsafe/benchmark

Package Overview
Dependencies
Maintainers
2
Versions
12
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@chainsafe/benchmark - npm Package Compare versions

Comparing version 1.2.1 to 1.2.2

lib/cjs/benchmark/options.d.ts

53

lib/cjs/benchmark/benchmarkFn.js

@@ -14,17 +14,9 @@ "use strict";

const runBenchmarkFn_js_1 = require("./runBenchmarkFn.js");
const options_js_1 = require("../cli/options.js");
const options_js_1 = require("./options.js");
exports.bench = createBenchmarkFunction(function (idOrOpts, fn) {
const { fn: benchTask, ...opts } = coerceToOptsObj(idOrOpts, fn);
const { fn: benchTask, before, beforeEach, ...opts } = coerceToOptsObj(idOrOpts, fn);
const currentSuite = (0, runner_1.getCurrentSuite)();
const globalOptions = globalState_js_1.store.getGlobalOptions() ?? {};
const parentOptions = globalState_js_1.store.getOptions((0, runner_1.getCurrentSuite)()) ?? {};
const options = { ...globalOptions, ...parentOptions, ...opts };
const { timeoutBench, maxMs, minMs } = options;
let timeout = timeoutBench ?? options_js_1.optionsDefault.timeoutBench;
if (maxMs && maxMs > timeout) {
timeout = maxMs * 1.5;
}
if (minMs && minMs > timeout) {
timeout = minMs * 1.5;
}
const parentOptions = globalState_js_1.store.getOptions(currentSuite) ?? {};
const options = (0, options_js_1.getBenchmarkOptionsWithDefaults)({ ...globalOptions, ...parentOptions, ...opts });
async function handler() {

@@ -35,6 +27,8 @@ // Ensure bench id is unique

}
// Persist full results if requested. dir is created in `beforeAll`
const benchmarkResultsCsvDir = process.env.BENCHMARK_RESULTS_CSV_DIR;
const persistRunsNs = Boolean(benchmarkResultsCsvDir);
const { result, runsNs } = await (0, runBenchmarkFn_js_1.runBenchFn)({ ...options, fn: benchTask }, persistRunsNs);
const { result, runsNs } = await (0, runBenchmarkFn_js_1.runBenchFn)({
...options,
fn: benchTask,
before,
beforeEach,
});
// Store result for:

@@ -44,2 +38,4 @@ // - to persist benchmark data latter

globalState_js_1.store.setResult(opts.id, result);
// Persist full results if requested. dir is created in `beforeAll`
const benchmarkResultsCsvDir = process.env.BENCHMARK_RESULTS_CSV_DIR;
if (benchmarkResultsCsvDir) {

@@ -57,3 +53,3 @@ node_fs_1.default.mkdirSync(benchmarkResultsCsvDir, { recursive: true });

concurrent: false,
timeout,
timeout: options.timeoutBench,
meta: {

@@ -63,16 +59,13 @@ "chainsafe/benchmark": true,

});
const { id: _, ...optionsWithoutId } = opts;
(0, runner_1.setFn)(task, handler);
globalState_js_1.store.setOptions(task, optionsWithoutId);
task.onFinished = [
() => {
globalState_js_1.store.removeOptions(task);
},
() => {
// Clear up the assigned handler to clean the memory
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-expect-error
(0, runner_1.setFn)(task, null);
},
];
globalState_js_1.store.setOptions(task, opts);
const cleanup = () => {
globalState_js_1.store.removeOptions(task);
// Clear up the assigned handler to clean the memory
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-expect-error
(0, runner_1.setFn)(task, null);
};
task.onFailed = [cleanup];
task.onFinished = [cleanup];
});

@@ -79,0 +72,0 @@ function createBenchmarkFunction(fn) {

@@ -14,3 +14,3 @@ import { Task, Suite, File } from "@vitest/runner";

});
onTestStarted(_task: Task): void;
onTestStarted(task: Task): void;
onTestFinished(task: Task): void;

@@ -17,0 +17,0 @@ onSuiteStarted(suite: Suite): void;

@@ -7,3 +7,3 @@ "use strict";

const format_js_1 = require("./format.js");
const options_js_1 = require("../cli/options.js");
const options_js_1 = require("./options.js");
class BenchmarkReporter {

@@ -18,3 +18,3 @@ indents = 0;

this.prevResults = new Map();
this.threshold = benchmarkOpts.threshold ?? options_js_1.optionsDefault.threshold;
this.threshold = benchmarkOpts.threshold ?? options_js_1.defaultBenchmarkOptions.threshold;
if (prevBench) {

@@ -26,5 +26,11 @@ for (const bench of prevBench.results) {

}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
onTestStarted(_task) {
// this.log(task.name, "started");
onTestStarted(task) {
if (task.mode === "skip") {
this.skipped++;
(0, output_js_1.consoleLog)(`${this.indent()}${(0, output_js_1.color)("pending", " - %s")}`, task.name);
}
else if (task.mode === "todo") {
this.skipped++;
(0, output_js_1.consoleLog)(`${this.indent()}${(0, output_js_1.color)("pending", " - %s")}`, task.name);
}
}

@@ -45,4 +51,5 @@ onTestFinished(task) {

this.failed++;
(0, output_js_1.consoleLog)(this.indent() + (0, output_js_1.color)("fail", " %d) %s"), ++this.failed, task.name);
(0, output_js_1.consoleLog)(task.result?.errors);
const fmt = this.indent() + (0, output_js_1.color)("fail", " " + output_js_1.symbols.err) + (0, output_js_1.color)("fail", " %s");
(0, output_js_1.consoleLog)(fmt, task.name);
(0, output_js_1.consoleLog)(task.result?.errors?.map((e) => e.stackStr).join("\n"));
break;

@@ -49,0 +56,0 @@ }

@@ -11,3 +11,3 @@ import { BenchmarkResult, BenchmarkOpts } from "../types.js";

};
export declare function runBenchFn<T, T2>(opts: BenchmarkRunOptsWithFn<T, T2>, persistRunsNs?: boolean): Promise<{
export declare function runBenchFn<T, T2>(opts: BenchmarkRunOptsWithFn<T, T2>): Promise<{
result: BenchmarkResult;

@@ -14,0 +14,0 @@ runsNs: bigint[];

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.runBenchFn = runBenchFn;
async function runBenchFn(opts, persistRunsNs) {
const minRuns = opts.minRuns || 1;
const maxRuns = opts.maxRuns || Infinity;
const maxMs = opts.maxMs || Infinity;
const minMs = opts.minMs || 100;
const maxWarmUpMs = opts.maxWarmUpMs !== undefined ? opts.maxWarmUpMs : 500;
const maxWarmUpRuns = opts.maxWarmUpRuns !== undefined ? opts.maxWarmUpRuns : 1000;
// Ratio of maxMs that the warmup is allow to take from ellapsedMs
const math_js_1 = require("../utils/math.js");
const options_js_1 = require("./options.js");
const termination_js_1 = require("./termination.js");
const convergenceCriteria = {
["linear"]: termination_js_1.createLinearConvergenceCriteria,
["cv"]: termination_js_1.createCVConvergenceCriteria,
};
async function runBenchFn(opts) {
const { id, before, beforeEach, fn, ...rest } = opts;
const benchOptions = (0, options_js_1.getBenchmarkOptionsWithDefaults)(rest);
const { maxMs, maxRuns, maxWarmUpMs, maxWarmUpRuns, runsFactor, threshold, convergence, averageCalculation } = benchOptions;
if (maxWarmUpMs >= maxMs) {
throw new Error(`Warmup time must be lower than max run time. maxWarmUpMs: ${maxWarmUpMs}, maxMs: ${maxMs}`);
}
if (maxWarmUpRuns >= maxRuns) {
throw new Error(`Warmup runs must be lower than max runs. maxWarmUpRuns: ${maxWarmUpRuns}, maxRuns: ${maxRuns}`);
}
if (averageCalculation !== "simple" && averageCalculation !== "clean-outliers") {
throw new Error(`Average calculation logic is not defined. ${averageCalculation}`);
}
if (convergence !== "linear" && convergence !== "cv") {
throw new Error(`Unknown convergence value ${convergence}`);
}
// Ratio of maxMs that the warmup is allow to take from elapsedMs
const maxWarmUpRatio = 0.5;
const convergeFactor = opts.convergeFactor || 0.5 / 100; // 0.5%
const runsFactor = opts.runsFactor || 1;
const maxWarmUpNs = BigInt(maxWarmUpMs) * BigInt(1e6);
const sampleEveryMs = 100;
const maxWarmUpNs = BigInt(benchOptions.maxWarmUpMs) * BigInt(1e6);
const runsNs = [];
const startRunMs = Date.now();
const shouldTerminate = convergenceCriteria[convergence](startRunMs, benchOptions);
let runIdx = 0;

@@ -23,18 +37,9 @@ let totalNs = BigInt(0);

let totalWarmUpRuns = 0;
let prevAvg0 = 0;
let prevAvg1 = 0;
let lastConvergenceSample = startRunMs;
let isWarmUp = maxWarmUpNs > 0 && maxWarmUpRuns > 0;
const inputAll = opts.before ? await opts.before() : undefined;
let isWarmUpPhase = maxWarmUpNs > 0 && maxWarmUpRuns > 0;
const inputAll = before ? await before() : undefined;
while (true) {
const ellapsedMs = Date.now() - startRunMs;
const mustStop = ellapsedMs >= maxMs || runIdx >= maxRuns;
const mayStop = ellapsedMs > minMs && runIdx > minRuns;
// Exceeds limits, must stop now
if (mustStop) {
break;
}
const input = opts.beforeEach ? await opts.beforeEach(inputAll, runIdx) : undefined;
const elapsedMs = Date.now() - startRunMs;
const input = beforeEach ? await beforeEach(inputAll, runIdx) : undefined;
const startNs = process.hrtime.bigint();
await opts.fn(input);
await fn(input);
const endNs = process.hrtime.bigint();

@@ -46,3 +51,3 @@ const runNs = endNs - startNs;

}
if (isWarmUp) {
if (isWarmUpPhase) {
// Warm-up, do not count towards results

@@ -52,41 +57,13 @@ totalWarmUpRuns += 1;

// On any warm-up finish condition, mark isWarmUp = true to prevent having to check them again
if (totalWarmUpNs >= maxWarmUpNs || totalWarmUpRuns >= maxWarmUpRuns || ellapsedMs / maxMs >= maxWarmUpRatio) {
isWarmUp = false;
if (totalWarmUpNs >= maxWarmUpNs || totalWarmUpRuns >= maxWarmUpRuns || elapsedMs / maxMs >= maxWarmUpRatio) {
isWarmUpPhase = false;
}
continue;
}
else {
// Persist results
runIdx += 1;
totalNs += runNs;
// If the caller wants the exact times of all runs, persist them
if (persistRunsNs)
runsNs.push(runNs);
// When is a good time to stop a benchmark? A naive answer is after N miliseconds or M runs.
// This code aims to stop the benchmark when the average fn run time has converged at a value
// within a given convergence factor. To prevent doing expensive math to often for fast fn,
// it only takes samples every `sampleEveryMs`. It stores two past values to be able to compute
// a very rough linear and quadratic convergence.
if (Date.now() - lastConvergenceSample > sampleEveryMs) {
lastConvergenceSample = Date.now();
const avg = Number(totalNs / BigInt(runIdx));
// Compute convergence (1st order + 2nd order)
const a = prevAvg0;
const b = prevAvg1;
const c = avg;
// Only do convergence math if it may stop
if (mayStop) {
// Aprox linear convergence
const convergence1 = Math.abs(c - a);
// Aprox quadratic convergence
const convergence2 = Math.abs(b - (a + c) / 2);
// Take the greater of both to enfore linear and quadratic are below convergeFactor
const convergence = Math.max(convergence1, convergence2) / a;
// Okay to stop + has converged, stop now
if (convergence < convergeFactor) {
break;
}
}
prevAvg0 = prevAvg1;
prevAvg1 = avg;
}
// Persist results
runIdx += 1;
totalNs += runNs;
runsNs.push(runNs);
if (shouldTerminate(runIdx, totalNs, runsNs)) {
break;
}

@@ -110,10 +87,17 @@ }

}
const averageNs = Number(totalNs / BigInt(runIdx)) / runsFactor;
let averageNs;
if (averageCalculation === "simple") {
averageNs = Number(totalNs / BigInt(runIdx)) / runsFactor;
}
if (averageCalculation === "clean-outliers") {
const cleanData = (0, math_js_1.filterOutliers)(runsNs, false, math_js_1.OutlierSensitivity.Mild);
averageNs = Number((0, math_js_1.calcSum)(cleanData) / BigInt(cleanData.length)) / runsFactor;
}
return {
result: {
id: opts.id,
id: id,
averageNs,
runsDone: runIdx,
totalMs: Date.now() - startRunMs,
threshold: opts.noThreshold === true ? Infinity : opts.threshold,
threshold,
},

@@ -120,0 +104,0 @@ runsNs,

@@ -106,6 +106,3 @@ "use strict";

debug("finished tests. passed: %i, skipped: %i, failed: %i", passed.length, skipped.length, failed.length);
if (failed.length > 0) {
throw failed[0].result?.errors;
}
if (passed.length + skipped.length === res.length) {
if (passed.length + skipped.length + failed.length === res.length) {
return globalState_js_1.store.getAllResults();

@@ -112,0 +109,0 @@ }

import { Options } from "yargs";
import { StorageOptions, BenchmarkOpts, FileCollectionOptions } from "../types.js";
export declare const optionsDefault: {
threshold: number;
timeoutBench: number;
historyLocalPath: string;

@@ -7,0 +5,0 @@ historyCacheKey: string;

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.benchmarkOptions = exports.storageOptions = exports.fileCollectionOptions = exports.optionsDefault = void 0;
const options_js_1 = require("../benchmark/options.js");
exports.optionsDefault = {
threshold: 2,
timeoutBench: 10_000,
historyLocalPath: "./benchmark_data",

@@ -115,3 +114,3 @@ historyCacheKey: "benchmark_data",

type: "number",
default: exports.optionsDefault.threshold,
default: options_js_1.defaultBenchmarkOptions.threshold,
group: benchmarkGroup,

@@ -122,2 +121,3 @@ },

description: "Max number of fn() runs, after which the benchmark stops",
default: options_js_1.defaultBenchmarkOptions.maxRuns,
group: benchmarkGroup,

@@ -128,2 +128,3 @@ },

description: "Min number of fn() runs before considering stopping the benchmark after converging",
default: options_js_1.defaultBenchmarkOptions.minRuns,
group: benchmarkGroup,

@@ -134,2 +135,3 @@ },

description: "Max total miliseconds of runs, after which the benchmark stops",
default: options_js_1.defaultBenchmarkOptions.maxMs,
group: benchmarkGroup,

@@ -140,2 +142,3 @@ },

description: "Min total miiliseconds of runs before considering stopping the benchmark after converging",
default: options_js_1.defaultBenchmarkOptions.minMs,
group: benchmarkGroup,

@@ -146,2 +149,3 @@ },

description: "Maximum real benchmark function run time before starting to count towards results. Set to 0 to not warm-up. May warm up for less ms if the `maxWarmUpRuns` condition is met first.",
default: options_js_1.defaultBenchmarkOptions.maxWarmUpMs,
group: benchmarkGroup,

@@ -152,2 +156,3 @@ },

description: "Maximum benchmark function runs before starting to count towards results. Set to 0 to not warm-up. May warm up for less ms if the `maxWarmUpMs` condition is met first.",
default: options_js_1.defaultBenchmarkOptions.maxWarmUpRuns,
group: benchmarkGroup,

@@ -158,2 +163,3 @@ },

description: "Convergance factor (0,1) at which the benchmark automatically stops. Set to 1 to disable",
default: options_js_1.defaultBenchmarkOptions.convergeFactor,
group: benchmarkGroup,

@@ -164,2 +170,3 @@ },

description: "If fn() contains a foor loop repeating a task N times, you may set runsFactor = N to scale down the results.",
default: options_js_1.defaultBenchmarkOptions.runsFactor,
group: benchmarkGroup,

@@ -170,2 +177,3 @@ },

description: "Run `sleep(0)` after each fn() call. Use when the event loop needs to tick to free resources created by fn()",
default: options_js_1.defaultBenchmarkOptions.yieldEventLoopAfterEach,
group: benchmarkGroup,

@@ -176,3 +184,3 @@ },

description: "Hard timeout for each benchmark",
default: exports.optionsDefault.timeoutBench,
default: options_js_1.defaultBenchmarkOptions.timeoutBench,
group: benchmarkGroup,

@@ -183,3 +191,3 @@ },

description: "List of setup files to load before the tests",
default: [],
default: options_js_1.defaultBenchmarkOptions.setupFiles,
group: benchmarkGroup,

@@ -190,6 +198,20 @@ },

description: "Trigger GC (if available) after every benchmark",
default: false,
default: options_js_1.defaultBenchmarkOptions.triggerGC,
group: benchmarkGroup,
},
convergence: {
type: "string",
description: "The algorithm used to detect the convergence to stop benchmark runs",
default: options_js_1.defaultBenchmarkOptions.convergence,
choices: ["linear", "cv"],
group: benchmarkGroup,
},
averageCalculation: {
type: "string",
description: "Use simple average of all runs or clean the outliers before calculating average",
default: options_js_1.defaultBenchmarkOptions.averageCalculation,
choices: ["simple", "clean-outliers"],
group: benchmarkGroup,
},
};
//# sourceMappingURL=options.js.map

@@ -46,2 +46,3 @@ "use strict";

const octokit_js_1 = require("../github/octokit.js");
const options_js_2 = require("../benchmark/options.js");
const debug = (0, debug_1.default)("@chainsafe/benchmark/cli");

@@ -98,3 +99,3 @@ async function run(opts_) {

}
const resultsComp = (0, compute_js_1.computePerformanceReport)(currBench, prevBench, opts.threshold);
const resultsComp = (0, compute_js_1.computePerformanceReport)(currBench, prevBench, opts.threshold ?? options_js_2.defaultBenchmarkOptions.threshold);
debug("detecting to post comment. skipPostComment: %o, isGaRun: %o", !opts.skipPostComment, (0, context_js_1.isGaRun)());

@@ -101,0 +102,0 @@ if (!opts.skipPostComment && (0, context_js_1.isGaRun)()) {

@@ -64,2 +64,10 @@ export interface FileCollectionOptions {

triggerGC?: boolean;
/**
* The algorithm to detect the convergence to stop the benchmark function runs.
* linear - Calculate the moving average among last 3 runs average and compare through quadratic formula
* cv - Coefficient Variance is a statistical tool which calculates data pattern on all runs and calculate median
* */
convergence?: "linear" | "cv";
/** Use simple average of all runs or clean the outliers before calculating average */
averageCalculation?: "simple" | "clean-outliers";
};

@@ -66,0 +74,0 @@ export type PartialBy<T, K extends keyof T> = Omit<T, K> & Partial<Pick<T, K>>;

@@ -7,17 +7,9 @@ import fs from "node:fs";

import { runBenchFn } from "./runBenchmarkFn.js";
import { optionsDefault } from "../cli/options.js";
import { getBenchmarkOptionsWithDefaults } from "./options.js";
export const bench = createBenchmarkFunction(function (idOrOpts, fn) {
const { fn: benchTask, ...opts } = coerceToOptsObj(idOrOpts, fn);
const { fn: benchTask, before, beforeEach, ...opts } = coerceToOptsObj(idOrOpts, fn);
const currentSuite = getCurrentSuite();
const globalOptions = store.getGlobalOptions() ?? {};
const parentOptions = store.getOptions(getCurrentSuite()) ?? {};
const options = { ...globalOptions, ...parentOptions, ...opts };
const { timeoutBench, maxMs, minMs } = options;
let timeout = timeoutBench ?? optionsDefault.timeoutBench;
if (maxMs && maxMs > timeout) {
timeout = maxMs * 1.5;
}
if (minMs && minMs > timeout) {
timeout = minMs * 1.5;
}
const parentOptions = store.getOptions(currentSuite) ?? {};
const options = getBenchmarkOptionsWithDefaults({ ...globalOptions, ...parentOptions, ...opts });
async function handler() {

@@ -28,6 +20,8 @@ // Ensure bench id is unique

}
// Persist full results if requested. dir is created in `beforeAll`
const benchmarkResultsCsvDir = process.env.BENCHMARK_RESULTS_CSV_DIR;
const persistRunsNs = Boolean(benchmarkResultsCsvDir);
const { result, runsNs } = await runBenchFn({ ...options, fn: benchTask }, persistRunsNs);
const { result, runsNs } = await runBenchFn({
...options,
fn: benchTask,
before,
beforeEach,
});
// Store result for:

@@ -37,2 +31,4 @@ // - to persist benchmark data latter

store.setResult(opts.id, result);
// Persist full results if requested. dir is created in `beforeAll`
const benchmarkResultsCsvDir = process.env.BENCHMARK_RESULTS_CSV_DIR;
if (benchmarkResultsCsvDir) {

@@ -50,3 +46,3 @@ fs.mkdirSync(benchmarkResultsCsvDir, { recursive: true });

concurrent: false,
timeout,
timeout: options.timeoutBench,
meta: {

@@ -56,16 +52,13 @@ "chainsafe/benchmark": true,

});
const { id: _, ...optionsWithoutId } = opts;
setFn(task, handler);
store.setOptions(task, optionsWithoutId);
task.onFinished = [
() => {
store.removeOptions(task);
},
() => {
// Clear up the assigned handler to clean the memory
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-expect-error
setFn(task, null);
},
];
store.setOptions(task, opts);
const cleanup = () => {
store.removeOptions(task);
// Clear up the assigned handler to clean the memory
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-expect-error
setFn(task, null);
};
task.onFailed = [cleanup];
task.onFinished = [cleanup];
});

@@ -72,0 +65,0 @@ function createBenchmarkFunction(fn) {

@@ -14,3 +14,3 @@ import { Task, Suite, File } from "@vitest/runner";

});
onTestStarted(_task: Task): void;
onTestStarted(task: Task): void;
onTestFinished(task: Task): void;

@@ -17,0 +17,0 @@ onSuiteStarted(suite: Suite): void;

import { color, consoleLog, symbols } from "../utils/output.js";
import { store } from "./globalState.js";
import { formatResultRow } from "./format.js";
import { optionsDefault } from "../cli/options.js";
import { defaultBenchmarkOptions } from "./options.js";
export class BenchmarkReporter {

@@ -14,3 +14,3 @@ indents = 0;

this.prevResults = new Map();
this.threshold = benchmarkOpts.threshold ?? optionsDefault.threshold;
this.threshold = benchmarkOpts.threshold ?? defaultBenchmarkOptions.threshold;
if (prevBench) {

@@ -22,5 +22,11 @@ for (const bench of prevBench.results) {

}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
onTestStarted(_task) {
// this.log(task.name, "started");
onTestStarted(task) {
if (task.mode === "skip") {
this.skipped++;
consoleLog(`${this.indent()}${color("pending", " - %s")}`, task.name);
}
else if (task.mode === "todo") {
this.skipped++;
consoleLog(`${this.indent()}${color("pending", " - %s")}`, task.name);
}
}

@@ -41,4 +47,5 @@ onTestFinished(task) {

this.failed++;
consoleLog(this.indent() + color("fail", " %d) %s"), ++this.failed, task.name);
consoleLog(task.result?.errors);
const fmt = this.indent() + color("fail", " " + symbols.err) + color("fail", " %s");
consoleLog(fmt, task.name);
consoleLog(task.result?.errors?.map((e) => e.stackStr).join("\n"));
break;

@@ -45,0 +52,0 @@ }

@@ -11,3 +11,3 @@ import { BenchmarkResult, BenchmarkOpts } from "../types.js";

};
export declare function runBenchFn<T, T2>(opts: BenchmarkRunOptsWithFn<T, T2>, persistRunsNs?: boolean): Promise<{
export declare function runBenchFn<T, T2>(opts: BenchmarkRunOptsWithFn<T, T2>): Promise<{
result: BenchmarkResult;

@@ -14,0 +14,0 @@ runsNs: bigint[];

@@ -1,16 +0,30 @@

export async function runBenchFn(opts, persistRunsNs) {
const minRuns = opts.minRuns || 1;
const maxRuns = opts.maxRuns || Infinity;
const maxMs = opts.maxMs || Infinity;
const minMs = opts.minMs || 100;
const maxWarmUpMs = opts.maxWarmUpMs !== undefined ? opts.maxWarmUpMs : 500;
const maxWarmUpRuns = opts.maxWarmUpRuns !== undefined ? opts.maxWarmUpRuns : 1000;
// Ratio of maxMs that the warmup is allow to take from ellapsedMs
import { calcSum, filterOutliers, OutlierSensitivity } from "../utils/math.js";
import { getBenchmarkOptionsWithDefaults } from "./options.js";
import { createCVConvergenceCriteria, createLinearConvergenceCriteria } from "./termination.js";
const convergenceCriteria = {
["linear"]: createLinearConvergenceCriteria,
["cv"]: createCVConvergenceCriteria,
};
export async function runBenchFn(opts) {
const { id, before, beforeEach, fn, ...rest } = opts;
const benchOptions = getBenchmarkOptionsWithDefaults(rest);
const { maxMs, maxRuns, maxWarmUpMs, maxWarmUpRuns, runsFactor, threshold, convergence, averageCalculation } = benchOptions;
if (maxWarmUpMs >= maxMs) {
throw new Error(`Warmup time must be lower than max run time. maxWarmUpMs: ${maxWarmUpMs}, maxMs: ${maxMs}`);
}
if (maxWarmUpRuns >= maxRuns) {
throw new Error(`Warmup runs must be lower than max runs. maxWarmUpRuns: ${maxWarmUpRuns}, maxRuns: ${maxRuns}`);
}
if (averageCalculation !== "simple" && averageCalculation !== "clean-outliers") {
throw new Error(`Average calculation logic is not defined. ${averageCalculation}`);
}
if (convergence !== "linear" && convergence !== "cv") {
throw new Error(`Unknown convergence value ${convergence}`);
}
// Ratio of maxMs that the warmup is allow to take from elapsedMs
const maxWarmUpRatio = 0.5;
const convergeFactor = opts.convergeFactor || 0.5 / 100; // 0.5%
const runsFactor = opts.runsFactor || 1;
const maxWarmUpNs = BigInt(maxWarmUpMs) * BigInt(1e6);
const sampleEveryMs = 100;
const maxWarmUpNs = BigInt(benchOptions.maxWarmUpMs) * BigInt(1e6);
const runsNs = [];
const startRunMs = Date.now();
const shouldTerminate = convergenceCriteria[convergence](startRunMs, benchOptions);
let runIdx = 0;

@@ -20,18 +34,9 @@ let totalNs = BigInt(0);

let totalWarmUpRuns = 0;
let prevAvg0 = 0;
let prevAvg1 = 0;
let lastConvergenceSample = startRunMs;
let isWarmUp = maxWarmUpNs > 0 && maxWarmUpRuns > 0;
const inputAll = opts.before ? await opts.before() : undefined;
let isWarmUpPhase = maxWarmUpNs > 0 && maxWarmUpRuns > 0;
const inputAll = before ? await before() : undefined;
while (true) {
const ellapsedMs = Date.now() - startRunMs;
const mustStop = ellapsedMs >= maxMs || runIdx >= maxRuns;
const mayStop = ellapsedMs > minMs && runIdx > minRuns;
// Exceeds limits, must stop now
if (mustStop) {
break;
}
const input = opts.beforeEach ? await opts.beforeEach(inputAll, runIdx) : undefined;
const elapsedMs = Date.now() - startRunMs;
const input = beforeEach ? await beforeEach(inputAll, runIdx) : undefined;
const startNs = process.hrtime.bigint();
await opts.fn(input);
await fn(input);
const endNs = process.hrtime.bigint();

@@ -43,3 +48,3 @@ const runNs = endNs - startNs;

}
if (isWarmUp) {
if (isWarmUpPhase) {
// Warm-up, do not count towards results

@@ -49,41 +54,13 @@ totalWarmUpRuns += 1;

// On any warm-up finish condition, mark isWarmUp = true to prevent having to check them again
if (totalWarmUpNs >= maxWarmUpNs || totalWarmUpRuns >= maxWarmUpRuns || ellapsedMs / maxMs >= maxWarmUpRatio) {
isWarmUp = false;
if (totalWarmUpNs >= maxWarmUpNs || totalWarmUpRuns >= maxWarmUpRuns || elapsedMs / maxMs >= maxWarmUpRatio) {
isWarmUpPhase = false;
}
continue;
}
else {
// Persist results
runIdx += 1;
totalNs += runNs;
// If the caller wants the exact times of all runs, persist them
if (persistRunsNs)
runsNs.push(runNs);
// When is a good time to stop a benchmark? A naive answer is after N miliseconds or M runs.
// This code aims to stop the benchmark when the average fn run time has converged at a value
// within a given convergence factor. To prevent doing expensive math to often for fast fn,
// it only takes samples every `sampleEveryMs`. It stores two past values to be able to compute
// a very rough linear and quadratic convergence.
if (Date.now() - lastConvergenceSample > sampleEveryMs) {
lastConvergenceSample = Date.now();
const avg = Number(totalNs / BigInt(runIdx));
// Compute convergence (1st order + 2nd order)
const a = prevAvg0;
const b = prevAvg1;
const c = avg;
// Only do convergence math if it may stop
if (mayStop) {
// Aprox linear convergence
const convergence1 = Math.abs(c - a);
// Aprox quadratic convergence
const convergence2 = Math.abs(b - (a + c) / 2);
// Take the greater of both to enfore linear and quadratic are below convergeFactor
const convergence = Math.max(convergence1, convergence2) / a;
// Okay to stop + has converged, stop now
if (convergence < convergeFactor) {
break;
}
}
prevAvg0 = prevAvg1;
prevAvg1 = avg;
}
// Persist results
runIdx += 1;
totalNs += runNs;
runsNs.push(runNs);
if (shouldTerminate(runIdx, totalNs, runsNs)) {
break;
}

@@ -107,10 +84,17 @@ }

}
const averageNs = Number(totalNs / BigInt(runIdx)) / runsFactor;
let averageNs;
if (averageCalculation === "simple") {
averageNs = Number(totalNs / BigInt(runIdx)) / runsFactor;
}
if (averageCalculation === "clean-outliers") {
const cleanData = filterOutliers(runsNs, false, OutlierSensitivity.Mild);
averageNs = Number(calcSum(cleanData) / BigInt(cleanData.length)) / runsFactor;
}
return {
result: {
id: opts.id,
id: id,
averageNs,
runsDone: runIdx,
totalMs: Date.now() - startRunMs,
threshold: opts.noThreshold === true ? Infinity : opts.threshold,
threshold,
},

@@ -117,0 +101,0 @@ runsNs,

@@ -77,6 +77,3 @@ import { startTests, } from "@vitest/runner";

debug("finished tests. passed: %i, skipped: %i, failed: %i", passed.length, skipped.length, failed.length);
if (failed.length > 0) {
throw failed[0].result?.errors;
}
if (passed.length + skipped.length === res.length) {
if (passed.length + skipped.length + failed.length === res.length) {
return store.getAllResults();

@@ -83,0 +80,0 @@ }

import { Options } from "yargs";
import { StorageOptions, BenchmarkOpts, FileCollectionOptions } from "../types.js";
export declare const optionsDefault: {
threshold: number;
timeoutBench: number;
historyLocalPath: string;

@@ -7,0 +5,0 @@ historyCacheKey: string;

@@ -0,4 +1,3 @@

import { defaultBenchmarkOptions } from "../benchmark/options.js";
export const optionsDefault = {
threshold: 2,
timeoutBench: 10_000,
historyLocalPath: "./benchmark_data",

@@ -112,3 +111,3 @@ historyCacheKey: "benchmark_data",

type: "number",
default: optionsDefault.threshold,
default: defaultBenchmarkOptions.threshold,
group: benchmarkGroup,

@@ -119,2 +118,3 @@ },

description: "Max number of fn() runs, after which the benchmark stops",
default: defaultBenchmarkOptions.maxRuns,
group: benchmarkGroup,

@@ -125,2 +125,3 @@ },

description: "Min number of fn() runs before considering stopping the benchmark after converging",
default: defaultBenchmarkOptions.minRuns,
group: benchmarkGroup,

@@ -131,2 +132,3 @@ },

description: "Max total miliseconds of runs, after which the benchmark stops",
default: defaultBenchmarkOptions.maxMs,
group: benchmarkGroup,

@@ -137,2 +139,3 @@ },

description: "Min total miiliseconds of runs before considering stopping the benchmark after converging",
default: defaultBenchmarkOptions.minMs,
group: benchmarkGroup,

@@ -143,2 +146,3 @@ },

description: "Maximum real benchmark function run time before starting to count towards results. Set to 0 to not warm-up. May warm up for less ms if the `maxWarmUpRuns` condition is met first.",
default: defaultBenchmarkOptions.maxWarmUpMs,
group: benchmarkGroup,

@@ -149,2 +153,3 @@ },

description: "Maximum benchmark function runs before starting to count towards results. Set to 0 to not warm-up. May warm up for less ms if the `maxWarmUpMs` condition is met first.",
default: defaultBenchmarkOptions.maxWarmUpRuns,
group: benchmarkGroup,

@@ -155,2 +160,3 @@ },

description: "Convergance factor (0,1) at which the benchmark automatically stops. Set to 1 to disable",
default: defaultBenchmarkOptions.convergeFactor,
group: benchmarkGroup,

@@ -161,2 +167,3 @@ },

description: "If fn() contains a foor loop repeating a task N times, you may set runsFactor = N to scale down the results.",
default: defaultBenchmarkOptions.runsFactor,
group: benchmarkGroup,

@@ -167,2 +174,3 @@ },

description: "Run `sleep(0)` after each fn() call. Use when the event loop needs to tick to free resources created by fn()",
default: defaultBenchmarkOptions.yieldEventLoopAfterEach,
group: benchmarkGroup,

@@ -173,3 +181,3 @@ },

description: "Hard timeout for each benchmark",
default: optionsDefault.timeoutBench,
default: defaultBenchmarkOptions.timeoutBench,
group: benchmarkGroup,

@@ -180,3 +188,3 @@ },

description: "List of setup files to load before the tests",
default: [],
default: defaultBenchmarkOptions.setupFiles,
group: benchmarkGroup,

@@ -187,6 +195,20 @@ },

description: "Trigger GC (if available) after every benchmark",
default: false,
default: defaultBenchmarkOptions.triggerGC,
group: benchmarkGroup,
},
convergence: {
type: "string",
description: "The algorithm used to detect the convergence to stop benchmark runs",
default: defaultBenchmarkOptions.convergence,
choices: ["linear", "cv"],
group: benchmarkGroup,
},
averageCalculation: {
type: "string",
description: "Use simple average of all runs or clean the outliers before calculating average",
default: defaultBenchmarkOptions.averageCalculation,
choices: ["simple", "clean-outliers"],
group: benchmarkGroup,
},
};
//# sourceMappingURL=options.js.map

@@ -17,2 +17,3 @@ import * as github from "@actions/github";

import { GithubCommentTag } from "../github/octokit.js";
import { defaultBenchmarkOptions } from "../benchmark/options.js";
const debug = Debug("@chainsafe/benchmark/cli");

@@ -69,3 +70,3 @@ export async function run(opts_) {

}
const resultsComp = computePerformanceReport(currBench, prevBench, opts.threshold);
const resultsComp = computePerformanceReport(currBench, prevBench, opts.threshold ?? defaultBenchmarkOptions.threshold);
debug("detecting to post comment. skipPostComment: %o, isGaRun: %o", !opts.skipPostComment, isGaRun());

@@ -72,0 +73,0 @@ if (!opts.skipPostComment && isGaRun()) {

@@ -64,2 +64,10 @@ export interface FileCollectionOptions {

triggerGC?: boolean;
/**
* The algorithm to detect the convergence to stop the benchmark function runs.
* linear - Calculate the moving average among last 3 runs average and compare through quadratic formula
* cv - Coefficient Variance is a statistical tool which calculates data pattern on all runs and calculate median
* */
convergence?: "linear" | "cv";
/** Use simple average of all runs or clean the outliers before calculating average */
averageCalculation?: "simple" | "clean-outliers";
};

@@ -66,0 +74,0 @@ export type PartialBy<T, K extends keyof T> = Omit<T, K> & Partial<Pick<T, K>>;

{
"name": "@chainsafe/benchmark",
"version": "1.2.1",
"version": "1.2.2",
"repository": "git@github.com:chainsafe/benchmark.git",

@@ -36,3 +36,3 @@ "author": "ChainSafe Systems",

"prepublishOnly": "yarn build",
"benchmark": "node --loader ts-node/esm ./src/cli/cli.ts 'test/perf/**/*.test.ts'",
"benchmark": "node --loader ts-node/esm ./src/cli/cli.ts 'test/perf/**/@(!(errors)).test.ts'",
"writeDocs": "node --loader ts-node/esm scripts/writeOptionsMd.ts"

@@ -39,0 +39,0 @@ },

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc