New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

bench-node

Package Overview
Dependencies
Maintainers
0
Versions
16
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

bench-node - npm Package Compare versions

Comparing version 0.0.4-beta.3 to 0.1.0

examples/create-uint32array/node.js.log

3

examples/deleting-properties/node.js

@@ -6,3 +6,4 @@ const { Suite } = require('../../lib');

const NullObject = function NullObject() { }
NullObject.prototype = Object.create(null)
NullObject.prototype = Object.create(null);
%NeverOptimizeFunction(NullObject);

@@ -9,0 +10,0 @@ suite

@@ -35,18 +35,14 @@ const { debug, types } = require('node:util');

if (timeInNs > 1e9) {
return `${ (timeInNs / 1e9).toFixed(2) }s`;
return `${(timeInNs / 1e9).toFixed(2)}s`;
}
if (timeInNs > 1e6) {
return `${ (timeInNs / 1e6).toFixed(2) }ms`;
return `${(timeInNs / 1e6).toFixed(2)}ms`;
}
if (timeInNs > 1e3) {
return `${ (timeInNs / 1e3).toFixed(2) }us`;
return `${(timeInNs / 1e3).toFixed(2)}us`;
}
if (timeInNs > 1e2) {
return `${ (timeInNs).toFixed(2) }ns`;
}
return `${ (timeInNs).toFixed(2) }ns`;
return `${(timeInNs).toFixed(2)}ns`;
}

@@ -53,0 +49,0 @@ }

const { reportConsoleBench } = require('./report');
const { getInitialIterations, runBenchmark } = require('./lifecycle');
const { getInitialIterations, runBenchmark, runWarmup } = require('./lifecycle');
const { debugBench, timer } = require('./clock');

@@ -102,7 +102,14 @@ const {

// This is required to avoid variance on first benchmark run
for (let i = 0; i < this.#benchmarks.length; ++i) {
const benchmark = this.#benchmarks[i];
debugBench(`Warmup ${ benchmark.name } with minTime=${ benchmark.minTime }, maxTime=${ benchmark.maxTime }`);
const initialIteration = await getInitialIterations(benchmark);
await runWarmup(benchmark, initialIteration, { minTime: 0.005, maxTime: 0.05 });
}
for (let i = 0; i < this.#benchmarks.length; ++i) {
const benchmark = this.#benchmarks[i];
// Warmup is calculated to reduce noise/bias on the results
const initialIteration = await getInitialIterations(benchmark);
debugBench(`Starting ${ benchmark.name } with minTime=${ benchmark.minTime }, maxTime=${ benchmark.maxTime }`);

@@ -109,0 +116,0 @@ const result = await runBenchmark(benchmark, initialIteration);

@@ -36,2 +36,29 @@ const { clockBenchmark, debugBench, MIN_RESOLUTION, timer } = require('./clock');

async function runWarmup(bench, initialIterations, { minTime, maxTime }) {
minTime = minTime ?? bench.minTime;
maxTime = maxTime ?? bench.minTime;
const maxDuration = maxTime * timer.scale;
const minSamples = 10;
let iterations = 0;
let timeSpent = 0;
let samples = 0;
while (timeSpent < maxDuration || samples <= minSamples) {
const { 0: duration, 1: realIterations } = await clockBenchmark(bench, initialIterations);
timeSpent += duration
iterations += realIterations;
iterations = Math.min(Number.MAX_SAFE_INTEGER, iterations);
// Just to avoid issues with empty fn
const durationPerOp = Math.max(MIN_RESOLUTION, duration / realIterations);
const minWindowTime = Math.max(0, Math.min((maxDuration - timeSpent) / timer.scale, minTime));
initialIterations = getItersForOpDuration(durationPerOp, minWindowTime);
samples++;
}
}
async function runBenchmark(bench, initialIterations) {

@@ -77,2 +104,3 @@ const histogram = new StatisticalHistogram();

runBenchmark,
runWarmup,
};
{
"name": "bench-node",
"version": "0.0.4-beta.3",
"version": "0.1.0",
"description": "",

@@ -5,0 +5,0 @@ "main": "lib/index.js",

@@ -41,3 +41,4 @@ # `bench-node`

This module uses V8 deoptimization to ensure that the code block is not optimized away, producing accurate benchmarks. See the [Writing JavaScript Microbenchmark Mistakes](#TODO) section for more details.
This module uses V8 deoptimization to helps that the code block is not optimized away, producing accurate benchmarks -- But, not realistics.
See the [Writing JavaScript Microbenchmark Mistakes](#TODO) section for more details.

@@ -59,2 +60,3 @@ ```bash

4. [Setup and Teardown](#setup-and-teardown)
1. [Managed Benchmarks](#managd-benchmarks)

@@ -243,2 +245,56 @@ ## Class: `Suite`

> [!WARNING]
> When using the `timer`, the setup will also be deoptimized.
> As a result, if you compare this approach with one that uses functions outside
> the benchmark function, the results may not match.
> See: [Deleting Properties Example](./examples/deleting-properties/node.js).
Ensure you call `.start()` and `.end()` methods when using the timer argument, or an `ERR_BENCHMARK_MISSING_OPERATION` error will be thrown.
### Managed Benchmarks
In regular benchmarks (when `timer` is not used), you run the benchmarked function in a loop,
and the timing is managed implicitly.
This means each iteration of the benchmarked function is measured directly.
The downside is that optimizations like inlining or caching might affect the timing, especially for fast operations.
Example:
```cjs
suite.add('Using includes', function () {
const text = 'text/html,...';
const r = text.includes('application/json');
});
```
Here, `%DoNotOptimize` is being called inside the loop for regular benchmarks (assuming V8NeverOptimizePlugin is being used),
ensuring that the operation is not overly optimized within each loop iteration.
This prevents V8 from optimizing away the operation (e.g., skipping certain steps because the result is not used or the function is too trivial).
Managed benchmarks explicitly handle timing through `start()` and `end()` calls around the benchmarked code.
This encapsulates the entire set of iterations in one timed block,
which can result in tighter measurement with less overhead.
However, it can lead to over-optimistic results, especially if the timer’s start and stop calls are placed outside of the loop,
allowing V8 to over-optimize the entire block.
Example:
```cjs
suite.add('[Managed] Using includes', function (timer) {
timer.start();
for (let i = 0; i < timer.count; i++) {
const text = 'text/html,...';
const r = text.includes('application/json');
assert.ok(r); // Ensure the result is used so it doesn't get V8 optimized away
}
timer.end(timer.count);
});
```
In this case, `%DoNotOptimize` is being applied outside the loop, so it does not protect each iteration from
excessive optimization. This can result in higher operation counts because V8 might optimize away repetitive tasks.
That's why an `assert.ok(r)` has been used. To avoid V8 optimizing the entire block as the `r` var was not being used.
> [!NOTE]
> V8 assumptions can change any time soon. Therefore, it's crucial to investigate
> results between versions of V8/Node.js.

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc