New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

batch-cluster

Package Overview
Dependencies
Maintainers
1
Versions
93
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

batch-cluster - npm Package Compare versions

Comparing version 3.2.0 to 4.0.0

CHANGELOG.md

54

dist/BatchCluster.d.ts
/// <reference types="node" />
import { ChildProcess } from "child_process";
import { Task } from "./Task";
export { kill, running } from "./BatchProcess";
export { kill, pidExists, pids } from "./Pids";
export { Deferred } from "./Deferred";
export { delay } from "./Delay";
export * from "./Logger";

@@ -78,13 +77,6 @@ export { Task, Parser } from "./Task";

*
* Must be &gt; 0. Defaults to 1 second.
* Must be &gt; 0. Defaults to 5 seconds.
*/
readonly onIdleIntervalMillis: number;
/**
* Tasks that result in errors will be retried at most `taskRetries`
* times.
*
* Must be &gt;= 0. Defaults to 0.
*/
readonly taskRetries: number;
/**
* If the initial `versionCommand` fails for new spawned processes more

@@ -111,4 +103,4 @@ * than this rate, end this BatchCluster and throw an error, because

/**
* If commands take longer than this, presume the underlying process is
* dead and we should fail or retry the task.
* If commands take longer than this, presume the underlying process is dead
* and we should fail the task.
*

@@ -121,10 +113,2 @@ * This should be set to something on the order of seconds.

/**
* When tasks don't complete in `taskTimeoutMillis`, should they be
* retried (a maximum of `taskRetries`)? If taskRetries is set to 0, this
* value is meaningless.
*
* Defaults to false.
*/
readonly retryTasksAfterTimeout: boolean;
/**
* Processes will be recycled after processing `maxTasksPerProcess`

@@ -168,3 +152,3 @@ * tasks. Depending on the commands and platform, batch mode commands

private readonly _procs;
private readonly _pendingTasks;
private readonly tasks;
private readonly onIdleInterval?;

@@ -174,3 +158,6 @@ private readonly startErrorRate;

private _ended;
private _internalErrorCount;
constructor(opts: Partial<BatchClusterOptions> & BatchProcessOptions & ChildProcessFactory);
private readonly beforeExitListener;
private readonly exitListener;
/**

@@ -181,3 +168,3 @@ * Emitted when a child process has an error when spawning

/**
* Emitted when a task has an error even after retries
* Emitted when a task has an error
*/

@@ -202,7 +189,2 @@ on(event: "taskError", listener: (err: Error, task: Task<any>) => void): void;

/**
* @return the current, non-ended child process PIDs. Useful for integration
* tests, but most likely not generally interesting.
*/
readonly pids: number[];
/**
* @return the number of pending tasks

@@ -216,10 +198,16 @@ */

readonly spawnedProcs: number;
readonly pendingMaintenance: Promise<void>;
private readonly beforeExitListener;
private readonly exitListener;
/**
* For integration tests:
*/
readonly internalErrorCount: number;
private endPromise;
private retryTask;
private onInternalError;
private onStartError;
private procs;
private onIdle;
/**
* Exposed only for unit tests
*
* @return the spawned PIDs that are still in the process table.
*/
pids(): Promise<number[]>;
private readonly onIdle;
}
"use strict";
var __assign = (this && this.__assign) || Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
return t;
};

@@ -17,13 +55,15 @@ function __export(m) {

var timers_1 = require("timers");
var Array_1 = require("./Array");
var Async_1 = require("./Async");
var BatchProcess_1 = require("./BatchProcess");
var Logger_1 = require("./Logger");
var Mean_1 = require("./Mean");
var Pids_1 = require("./Pids");
var Rate_1 = require("./Rate");
var BatchProcess_2 = require("./BatchProcess");
exports.kill = BatchProcess_2.kill;
exports.running = BatchProcess_2.running;
var Pids_2 = require("./Pids");
exports.kill = Pids_2.kill;
exports.pidExists = Pids_2.pidExists;
exports.pids = Pids_2.pids;
var Deferred_1 = require("./Deferred");
exports.Deferred = Deferred_1.Deferred;
var Delay_1 = require("./Delay");
exports.delay = Delay_1.delay;
__export(require("./Logger"));

@@ -60,13 +100,6 @@ var Task_1 = require("./Task");

*
* Must be &gt; 0. Defaults to 1 second.
* Must be &gt; 0. Defaults to 5 seconds.
*/
this.onIdleIntervalMillis = 1000;
this.onIdleIntervalMillis = 5000;
/**
* Tasks that result in errors will be retried at most `taskRetries`
* times.
*
* Must be &gt;= 0. Defaults to 0.
*/
this.taskRetries = 0;
/**
* If the initial `versionCommand` fails for new spawned processes more

@@ -93,4 +126,4 @@ * than this rate, end this BatchCluster and throw an error, because

/**
* If commands take longer than this, presume the underlying process is
* dead and we should fail or retry the task.
* If commands take longer than this, presume the underlying process is dead
* and we should fail the task.
*

@@ -103,10 +136,2 @@ * This should be set to something on the order of seconds.

/**
* When tasks don't complete in `taskTimeoutMillis`, should they be
* retried (a maximum of `taskRetries`)? If taskRetries is set to 0, this
* value is meaningless.
*
* Defaults to false.
*/
this.retryTasksAfterTimeout = false;
/**
* Processes will be recycled after processing `maxTasksPerProcess`

@@ -165,3 +190,2 @@ * tasks. Depending on the commands and platform, batch mode commands

gte("endGracefulWaitTimeMillis", 0);
gte("taskRetries", 0);
gte("maxReasonableProcessFailuresPerMinute", 0);

@@ -173,5 +197,2 @@ if (errors.length > 0) {

}
function map(obj, f) {
return obj != null ? f(obj) : undefined;
}
/**

@@ -193,8 +214,72 @@ * BatchCluster instances manage 0 or more homogenious child processes, and

this._procs = [];
this._pendingTasks = [];
this.tasks = [];
this.startErrorRate = new Rate_1.Rate();
this._spawnedProcs = 0;
this._ended = false;
this._internalErrorCount = 0;
this.beforeExitListener = function () { return _this.end(true); };
this.exitListener = function () { return _this.end(false); };
this.onIdle = Async_1.serial(function () { return __awaiter(_this, void 0, void 0, function () {
var beforeProcLen, minStart, readyProcs, execNextTask, proc_1;
var _this = this;
return __generator(this, function (_a) {
if (this._ended)
return [2 /*return*/];
beforeProcLen = this._procs.length;
minStart = Date.now() - this.opts.maxProcAgeMillis;
Array_1.filterInPlace(this._procs, function (proc) {
var old = proc.start < minStart && _this.opts.maxProcAgeMillis > 0;
var worn = proc.taskCount >= _this.opts.maxTasksPerProcess;
var broken = proc.exited;
if (proc.idle && (old || worn || broken)) {
proc.end(true, old ? "old" : worn ? "worn" : "broken");
return false;
}
else {
return true;
}
});
readyProcs = this._procs.filter(function (proc) { return proc.ready; });
Logger_1.logger().trace("BatchCluster.onIdle()", {
beforeProcLen: beforeProcLen,
readyProcs: readyProcs.map(function (ea) { return ea.pid; }),
pendingTasks: this.tasks.slice(0, 3).map(function (ea) { return ea.command; }),
pendingTaskCount: this.tasks.length
});
execNextTask = function () {
var idleProc = readyProcs.shift();
if (idleProc == null)
return;
var task = _this.tasks.shift();
if (task == null)
return;
if (idleProc.execTask(task)) {
Logger_1.logger().trace("BatchCluster.onIdle(): submitted " +
task.command +
" to child pid " +
idleProc.pid);
}
else {
Logger_1.logger().warn("BatchCluster.onIdle(): execTask for " +
task.command +
" to pid " +
idleProc.pid +
" returned false, re-enqueing.");
_this.enqueueTask(task);
}
return true;
};
while (!this._ended && execNextTask()) { }
if (!this._ended &&
this.tasks.length > 0 &&
this._procs.length < this.opts.maxProcs) {
proc_1 = new BatchProcess_1.BatchProcess(this.opts.processFactory(), this.opts, this.observer);
proc_1.exitedPromise.then(function () { return _this._tasksPerProc.push(proc_1.taskCount); });
this._procs.push(proc_1);
this._spawnedProcs++;
}
Logger_1.logger().trace("BatchCluster.onIdle() finished");
return [2 /*return*/];
});
}); });
this.opts = verifyOptions(opts);

@@ -206,5 +291,6 @@ if (this.opts.onIdleIntervalMillis > 0) {

this.observer = {
onIdle: this.onIdle.bind(this),
onStartError: this.onStartError.bind(this),
retryTask: this.retryTask.bind(this)
onIdle: function () { return _this.onIdle(); },
onStartError: function (err) { return _this.onStartError(err); },
onTaskError: function (err, task) { return _this.emitter.emit("taskError", err, task); },
onInternalError: function (err) { return _this.onInternalError(err); }
};

@@ -225,44 +311,47 @@ _p.once("beforeExit", this.beforeExitListener);

BatchCluster.prototype.end = function (gracefully) {
var _this = this;
if (gracefully === void 0) { gracefully = true; }
var alreadyEnded = this._ended;
this._ended = true;
if (!alreadyEnded) {
this.emitter.emit("beforeEnd");
if (this.onIdleInterval)
timers_1.clearInterval(this.onIdleInterval);
_p.removeListener("beforeExit", this.beforeExitListener);
_p.removeListener("exit", this.exitListener);
}
if (!gracefully || !alreadyEnded) {
// We don't need to wait for these promises:
this._procs.forEach(function (p) {
return p.end(gracefully).catch(function (err) { return _this.emitter.emit("endError", err); });
return __awaiter(this, void 0, void 0, function () {
var alreadyEnded;
var _this = this;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
alreadyEnded = this._ended;
this._ended = true;
if (!alreadyEnded) {
this.emitter.emit("beforeEnd");
if (this.onIdleInterval)
timers_1.clearInterval(this.onIdleInterval);
_p.removeListener("beforeExit", this.beforeExitListener);
_p.removeListener("exit", this.exitListener);
}
if (!(!gracefully || !alreadyEnded)) return [3 /*break*/, 2];
return [4 /*yield*/, Promise.all(this._procs.map(function (p) {
return p
.end(gracefully, "BatchCluster.end()")
.catch(function (err) { return _this.emitter.emit("endError", err); });
}))];
case 1:
_a.sent();
this._procs.length = 0;
_a.label = 2;
case 2: return [2 /*return*/, this.endPromise().then(function () {
if (!alreadyEnded)
_this.emitter.emit("end");
})];
}
});
}
return this.endPromise().then(function () {
if (!alreadyEnded)
_this.emitter.emit("end");
});
};
BatchCluster.prototype.enqueueTask = function (task) {
var _this = this;
if (this._ended) {
task.onError(new Error("BatchCluster has ended"));
task.reject(new Error("BatchCluster has ended"));
throw new Error("Cannot enqueue task " + task.command);
}
this._pendingTasks.push(task);
this.onIdle();
Logger_1.logger().trace("BatchCluster.enqueueTask(" + task.command + ")");
this.tasks.push(task);
setTimeout(function () { return _this.onIdle(); }, 1);
return task.promise;
};
Object.defineProperty(BatchCluster.prototype, "pids", {
/**
* @return the current, non-ended child process PIDs. Useful for integration
* tests, but most likely not generally interesting.
*/
get: function () {
return this.procs().map(function (p) { return p.pid; });
},
enumerable: true,
configurable: true
});
Object.defineProperty(BatchCluster.prototype, "pendingTasks", {

@@ -273,3 +362,3 @@ /**

get: function () {
return this._pendingTasks.length;
return this.tasks.length;
},

@@ -296,5 +385,8 @@ enumerable: true,

});
Object.defineProperty(BatchCluster.prototype, "pendingMaintenance", {
Object.defineProperty(BatchCluster.prototype, "internalErrorCount", {
/**
* For integration tests:
*/
get: function () {
return Promise.all(this._procs.filter(function (p) { return p.ended; }).map(function (p) { return p.end(); })).then(function () { return undefined; });
return this._internalErrorCount;
},

@@ -312,23 +404,5 @@ enumerable: true,

};
BatchCluster.prototype.retryTask = function (task, error) {
var _this = this;
if (task != null) {
Logger_1.logger().debug("BatchCluster.retryTask()", {
cmd: task.command,
error: error.stack || error,
retries: task.retries
});
if (task.retries < this.opts.taskRetries && !this.ended) {
task.retries++;
// fix for rapid-retry failure from mktags:
setTimeout(function () { return _this.enqueueTask(task); }, 20);
}
else {
if (error != null) {
// Only notify the observer if the task can't be retried:
this.emitter.emit("taskError", error, task);
}
task.onError(error);
}
}
BatchCluster.prototype.onInternalError = function (error) {
Logger_1.logger().error("BatchCluster: INTERNAL ERROR: " + error);
this._internalErrorCount++;
};

@@ -348,54 +422,31 @@ BatchCluster.prototype.onStartError = function (error) {

};
BatchCluster.prototype.procs = function () {
var minStart = Date.now() - this.opts.maxProcAgeMillis;
// Iterate the array backwards, as we'll be removing _procs as we go:
for (var i = this._procs.length - 1; i >= 0; i--) {
var proc = this._procs[i];
// Don't end procs that are currently servicing requests:
if (proc.idle &&
(proc.start < minStart ||
proc.taskCount >= this.opts.maxTasksPerProcess)) {
// No need to be graceful, just shut down.
var gracefully = false;
proc.end(gracefully);
}
// Only remove exited processes from _procs:
if (!proc.running) {
proc.end(); // make sure any pending task is re-enqueued
this._tasksPerProc.push(proc.taskCount);
this._procs.splice(i, 1);
}
}
return this._procs;
};
BatchCluster.prototype.onIdle = function () {
var _this = this;
if (this._ended) {
return;
}
var procs = this.procs();
var idleProcs = procs.filter(function (proc) { return proc.idle; });
var execNextTask = function () {
return map(idleProcs.shift(), function (idleProc) {
return map(_this._pendingTasks.shift(), function (task) {
if (!idleProc.execTask(task)) {
_this.enqueueTask(task);
}
else {
Logger_1.logger().debug("BatchCluster.execNextTask(): submitted " +
task.command +
" to child pid " +
idleProc.pid);
}
return true;
});
/**
* Exposed only for unit tests
*
* @return the spawned PIDs that are still in the process table.
*/
BatchCluster.prototype.pids = function () {
return __awaiter(this, void 0, void 0, function () {
var arr, _i, _a, pid;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
arr = [];
_i = 0, _a = this._procs.map(function (p) { return p.pid; });
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3 /*break*/, 4];
pid = _a[_i];
return [4 /*yield*/, Pids_1.pidExists(pid)];
case 2:
if (_b.sent())
arr.push(pid);
_b.label = 3;
case 3:
_i++;
return [3 /*break*/, 1];
case 4: return [2 /*return*/, arr];
}
});
};
while (!this._ended && execNextTask()) { }
if (this._pendingTasks.length > 0 && procs.length < this.opts.maxProcs) {
var bp = new BatchProcess_1.BatchProcess(this.opts.processFactory(), this.opts, this.observer);
this._procs.push(bp);
this._spawnedProcs++;
// onIdle() will be called by the new proc when its startup task completes.
}
});
};

@@ -402,0 +453,0 @@ return BatchCluster;

@@ -10,4 +10,5 @@ /// <reference types="node" />

onIdle(): void;
onTaskError(error: Error, task: Task<any>): void;
onStartError(error: Error): void;
retryTask(task: Task<any>, error: Error): void;
onInternalError(error: Error): void;
}

@@ -25,3 +26,5 @@ export interface InternalBatchProcessOptions extends BatchProcessOptions, BatchClusterOptions {

readonly observer: BatchProcessObserver;
readonly name: string;
readonly start: number;
private dead;
private _taskCount;

@@ -50,9 +53,11 @@ /**

readonly taskCount: number;
readonly exited: boolean;
readonly exitedPromise: Promise<void>;
readonly ready: boolean;
readonly idle: boolean;
readonly busy: boolean;
/**
* @return true if the child process is in the process table
*/
readonly running: boolean;
running(): Promise<boolean>;
notRunning(): Promise<boolean>;
/**

@@ -62,5 +67,7 @@ * @return {boolean} true if `this.end()` has been requested or the child

*/
readonly ended: boolean;
ended(): Promise<boolean>;
notEnded(): Promise<boolean>;
execTask(task: Task<any>): boolean;
end(gracefully?: boolean): Promise<void>;
end(gracefully: boolean | undefined, source: string): Promise<void>;
private _end;
private awaitNotRunning;

@@ -74,17 +81,1 @@ private onTimeout;

}
/**
* @export
* @param {number} pid process id. Required.
* @returns {boolean} true if the given process id is in the local process
* table.
*/
export declare function running(pid: number): boolean;
/**
* Send a signal to the given process id.
*
* @export
* @param {number} pid the process id. Required.
* @param {boolean} [force=false] if true, and the current user has
* permissions to send the signal, the pid will be forced to shut down.
*/
export declare function kill(pid: number, force?: boolean): void;

@@ -38,8 +38,7 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
var _cp = require("child_process");
var _os = require("os");
var _p = require("process");
var Async_1 = require("./Async");
var BatchCluster_1 = require("./BatchCluster");
var Deferred_1 = require("./Deferred");
var Delay_1 = require("./Delay");
var Object_1 = require("./Object");
var Pids_1 = require("./Pids");
var Task_1 = require("./Task");

@@ -56,2 +55,3 @@ /**

this.start = Date.now();
this.dead = false;
this._taskCount = -1; // don't count the startupTask

@@ -71,9 +71,9 @@ /**

this.buff = "";
this.name = "BatchProcess(" + proc.pid + ")";
// don't let node count the child processes as a reason to stay alive
this.proc.unref();
// forking or plumbing issues are not the task's fault, so retry:
this.proc.on("error", function (err) { return _this.onError("proc.error", err); });
this.proc.on("close", function () { return _this.onExit(); });
this.proc.on("exit", function () { return _this.onExit(); });
this.proc.on("disconnect", function () { return _this.onExit(); });
this.proc.on("close", function () { return _this.onExit("close"); });
this.proc.on("exit", function () { return _this.onExit("exit"); });
this.proc.on("disconnect", function () { return _this.onExit("disconnect"); });
this.proc.stdin.on("error", function (err) { return _this.onError("stdin.error", err); });

@@ -84,10 +84,14 @@ this.proc.stdout.on("error", function (err) { return _this.onError("stdout.error", err); });

this.proc.stderr.on("data", function (err) {
_this.onError("stderr.data", new Error(cleanError(err)));
return _this.onError("stderr.data", new Error(cleanError(err)));
});
this.startupTask = new Task_1.Task(opts.versionCommand, function (ea) { return ea; });
// Prevent unhandled startup task rejections from killing node:
this.startupTask.promise.catch(function (err) {
BatchCluster_1.logger().warn("BatchProcess startup task was rejected: " + err);
this.startupTask.promise
.then(function () { return BatchCluster_1.logger().trace(_this.name + " is ready"); })
// Prevent unhandled startup task rejections from killing node:
.catch(function (err) {
BatchCluster_1.logger().warn(_this.name + " startup task was rejected: " + err);
});
this.execTask(this.startupTask);
if (!this.execTask(this.startupTask)) {
this.observer.onInternalError(new Error(this.name + " startup task was not submitted"));
}
}

@@ -108,5 +112,5 @@ Object.defineProperty(BatchProcess.prototype, "pid", {

});
Object.defineProperty(BatchProcess.prototype, "exitedPromise", {
Object.defineProperty(BatchProcess.prototype, "exited", {
get: function () {
return this._exited.promise;
return !this._exited.pending;
},

@@ -116,5 +120,5 @@ enumerable: true,

});
Object.defineProperty(BatchProcess.prototype, "idle", {
Object.defineProperty(BatchProcess.prototype, "exitedPromise", {
get: function () {
return !this.ended && this.currentTask == null;
return this._exited.promise;
},

@@ -124,5 +128,5 @@ enumerable: true,

});
Object.defineProperty(BatchProcess.prototype, "busy", {
Object.defineProperty(BatchProcess.prototype, "ready", {
get: function () {
return !this.ended && this.currentTask != null;
return this.currentTask == null && !this._ended && !this.startupTask.pending;
},

@@ -132,8 +136,5 @@ enumerable: true,

});
Object.defineProperty(BatchProcess.prototype, "running", {
/**
* @return true if the child process is in the process table
*/
Object.defineProperty(BatchProcess.prototype, "idle", {
get: function () {
return running(this.pid);
return this.currentTask == null;
},

@@ -143,16 +144,58 @@ enumerable: true,

});
Object.defineProperty(BatchProcess.prototype, "ended", {
/**
* @return {boolean} true if `this.end()` has been requested or the child
* process has exited.
*/
get: function () {
return this._ended || !this.running;
},
enumerable: true,
configurable: true
});
/**
* @return true if the child process is in the process table
*/
BatchProcess.prototype.running = function () {
return __awaiter(this, void 0, void 0, function () {
var _this = this;
return __generator(this, function (_a) {
if (this.dead)
return [2 /*return*/, false];
else
return [2 /*return*/, Pids_1.pidExists(this.pid).then(function (alive) {
if (!alive) {
// once a PID leaves the process table, it's gone for good:
_this.dead = true;
_this._ended = true;
}
return alive;
})];
return [2 /*return*/];
});
});
};
BatchProcess.prototype.notRunning = function () {
return this.running().then(function (ea) { return !ea; });
};
/**
* @return {boolean} true if `this.end()` has been requested or the child
* process has exited.
*/
BatchProcess.prototype.ended = function () {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2 /*return*/, this._ended || this.notRunning()];
});
});
};
BatchProcess.prototype.notEnded = function () {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2 /*return*/, this.ended().then(function (ea) { return !ea; })];
});
});
};
// This must not be async, or new instances aren't started as busy (until the
// startup task is complete)
BatchProcess.prototype.execTask = function (task) {
var _this = this;
if (!this.idle) {
// We're not going to run this.running() here, because BatchCluster will
// already have pruned the processes that have exitted unexpectedly just
// milliseconds ago.
if (this._ended || this.currentTask != null) {
this.observer.onInternalError(new Error(this.name +
".execTask(" +
task.command +
"): already working on " +
this.currentTask));
return false;

@@ -167,3 +210,3 @@ }

if (timeoutMs > 0) {
BatchCluster_1.logger().trace("BatchProcess.execTask(): scheduling timeout", {
BatchCluster_1.logger().trace(this.name + ".execTask(): scheduling timeout", {
command: task.command,

@@ -175,13 +218,16 @@ timeoutMs: timeoutMs,

}
BatchCluster_1.logger().trace("BatchProcess.execTask(): starting", {
cmd: cmd,
retries: task.retries
});
BatchCluster_1.logger().trace(this.name + ".execTask(): starting", { cmd: cmd });
this.proc.stdin.write(cmd);
return true;
};
BatchProcess.prototype.end = function (gracefully) {
BatchProcess.prototype.end = function (gracefully, source) {
if (gracefully === void 0) { gracefully = true; }
var firstEnd = !this._ended;
this._ended = true;
return this._end(gracefully, source, firstEnd);
};
BatchProcess.prototype._end = function (gracefully, source, firstEnd) {
if (gracefully === void 0) { gracefully = true; }
return __awaiter(this, void 0, void 0, function () {
var cmd;
var cmd, err;
var _this = this;

@@ -191,7 +237,7 @@ return __generator(this, function (_a) {

case 0:
if (!!this._ended) return [3 /*break*/, 2];
BatchCluster_1.logger().debug(this.name + ".end()", { gracefully: gracefully, source: source });
this._ended = true;
cmd = this.opts.exitCommand
? ensureSuffix(this.opts.exitCommand, "\n")
: undefined;
if (!firstEnd) return [3 /*break*/, 2];
cmd = Object_1.map(this.opts.exitCommand, function (ea) { return ensureSuffix(ea, "\n"); });
if (!this.proc.stdin.writable) return [3 /*break*/, 2];
return [4 /*yield*/, end(this.proc.stdin, cmd)];

@@ -202,4 +248,13 @@ case 1:

case 2:
if (this.currentTask != null && this.currentTask !== this.startupTask) {
this.observer.retryTask(this.currentTask, new Error("process ended"));
if (this.currentTask != null) {
// This isn't an internal error, as this state would be expected if
// the user calls .end() when there are pending tasks.
BatchCluster_1.logger().warn(this.name + ".end(): called while not idle", {
source: source,
gracefully: gracefully,
cmd: this.currentTask.command
});
err = new Error("end() called when not idle");
this.observer.onTaskError(err, this.currentTask);
this.currentTask.reject(err);
}

@@ -213,25 +268,39 @@ this.clearCurrentTask();

]);
if (!(this.running && gracefully && this.opts.endGracefulWaitTimeMillis > 0)) return [3 /*break*/, 5];
return [4 /*yield*/, this.awaitNotRunning(this.opts.endGracefulWaitTimeMillis / 2)];
return [4 /*yield*/, this.running()];
case 3:
_a.sent();
if (this.running)
kill(this.proc.pid, false);
return [4 /*yield*/, this.awaitNotRunning(this.opts.endGracefulWaitTimeMillis / 2)];
if (!((_a.sent()) &&
gracefully &&
this.opts.endGracefulWaitTimeMillis > 0)) return [3 /*break*/, 9];
// Wait for the end command to take effect:
return [4 /*yield*/, this.awaitNotRunning(this.opts.endGracefulWaitTimeMillis / 2)
// If it's still running, send the pid a signal:
];
case 4:
// Wait for the end command to take effect:
_a.sent();
_a.label = 5;
return [4 /*yield*/, this.running()];
case 5:
if (this.running) {
BatchCluster_1.logger().info("BatchProcess.end(): killing PID " + this.pid);
kill(this.proc.pid, true);
}
return [4 /*yield*/, this.awaitNotRunning(5000)];
if (!_a.sent()) return [3 /*break*/, 7];
return [4 /*yield*/, Pids_1.kill(this.proc.pid)
// Wait for the signal handler to work:
];
case 6:
// The OS is srsly f@rked if `kill` doesn't respond within a couple ms. 5s
// should be 100x longer than necessary.
if (!(_a.sent())) {
BatchCluster_1.logger().error("BatchProcess.end(): PID " + this.pid + " did not respond to kill.");
}
return [2 /*return*/, this.exitedPromise];
_a.sent();
_a.label = 7;
case 7:
// Wait for the signal handler to work:
return [4 /*yield*/, this.awaitNotRunning(this.opts.endGracefulWaitTimeMillis / 2)];
case 8:
// Wait for the signal handler to work:
_a.sent();
_a.label = 9;
case 9: return [4 /*yield*/, this.running()];
case 10:
if (!_a.sent()) return [3 /*break*/, 12];
BatchCluster_1.logger().warn(this.name + ".end(): force-killing still-running child.");
return [4 /*yield*/, Pids_1.kill(this.proc.pid, true)];
case 11:
_a.sent();
_a.label = 12;
case 12: return [2 /*return*/, this.exitedPromise];
}

@@ -243,13 +312,10 @@ });

var _this = this;
return Delay_1.until(function () { return !_this.running; }, timeout);
return Async_1.until(function () { return _this.notRunning(); }, timeout);
};
BatchProcess.prototype.onTimeout = function (task, timeoutMs) {
if (task === this.currentTask && task.pending) {
this.onError("timeout", new Error("waited " + timeoutMs + "ms"), this.opts.retryTasksAfterTimeout, task);
BatchCluster_1.logger().warn("BatchProcess.onTimeout(): ending to prevent result pollution with other tasks.", { pid: this.pid, task: task.command });
this.end();
if (task.pending) {
this.onError("timeout", new Error("waited " + timeoutMs + "ms"), task);
}
};
BatchProcess.prototype.onError = function (source, _error, retryTask, task) {
if (retryTask === void 0) { retryTask = true; }
BatchProcess.prototype.onError = function (source, _error, task) {
return __awaiter(this, void 0, void 0, function () {

@@ -262,2 +328,7 @@ var error;

error = new Error(source + ": " + cleanError(_error.message));
BatchCluster_1.logger().warn(this.name + ".onError()", {
source: source,
task: Object_1.map(task, function (t) { return t.command; }),
error: error
});
if (_error.stack) {

@@ -269,23 +340,23 @@ // Error stacks, if set, will not be redefined from a rethrow:

this.clearCurrentTask();
this.end(false); // no need for grace, just clean up.
this.end(false, "onError(" + source + ")"); // no need for grace (there isn't a pending job)
if (task === this.startupTask) {
BatchCluster_1.logger().warn("BatchProcess.onError(): startup task failed: " + error);
BatchCluster_1.logger().warn(this.name + ".onError(): startup task failed: " + error);
this.observer.onStartError(error);
}
if (task != null) {
if (retryTask && task !== this.startupTask) {
BatchCluster_1.logger().debug("BatchProcess.onError(): task error, retrying", {
command: task.command,
pid: this.pid,
taskCount: this.taskCount
});
this.observer.retryTask(task, error);
BatchCluster_1.logger().debug(this.name + ".onError(): task failed", {
command: task.command,
pid: this.pid,
taskCount: this.taskCount
});
this.observer.onTaskError(error, task);
if (task.pending) {
task.reject(error);
}
else {
BatchCluster_1.logger().debug("BatchProcess.onError(): task failed", {
command: task.command,
pid: this.pid,
taskCount: this.taskCount
});
task.onError(error);
this.observer.onInternalError(new Error(this.name +
".onError(): cannot reject task " +
task.command +
" is it is already " +
task.state));
}

@@ -297,15 +368,4 @@ }

};
BatchProcess.prototype.onExit = function () {
if (this.running) {
BatchCluster_1.logger().error("BatchProcess.onExit() called on a running process", {
pid: this.pid,
currentTask: map(this.currentTask, function (ea) { return ea.command; })
});
}
this._ended = true;
var task = this.currentTask;
if (task != null && task !== this.startupTask) {
this.observer.retryTask(task, new Error("child process exited"));
}
this.clearCurrentTask();
BatchProcess.prototype.onExit = function (source) {
this.end(false, "onExit(" + source + ")"); // no need to be graceful, just do the bookkeeping.
this._exited.resolve();

@@ -324,3 +384,3 @@ };

var err = new Error(cleanError(fail[1]) || "command error");
this.onError("onData", err, true, this.currentTask);
this.onError("onData", err);
}

@@ -342,8 +402,21 @@ }

if (result.length > 0 && !this._ended) {
BatchCluster_1.logger().error("BatchProcess.resolveCurrentTask(): INTERNAL ERROR: no current task in resolveCurrentTask()", { result: result, pid: this.pid });
this.observer.onInternalError(new Error(this.name + ".resolveCurrentTask(): no current task"));
}
this.end();
this.end(false, "resolveCurrentTask(no current task)");
}
else {
task.onData(result);
BatchCluster_1.logger().trace(this.name + ".resolveCurrentTask()", {
task: task.command,
result: result
});
if (task.pending) {
task.resolve(result);
}
else {
this.observer.onInternalError(new Error(this.name +
".resolveCurrentTask(): cannot resolve task " +
task.command +
" as it is already " +
task.state));
}
this.observer.onIdle();

@@ -355,5 +428,2 @@ }

exports.BatchProcess = BatchProcess;
function map(obj, f) {
return obj != null ? f(obj) : undefined;
}
/**

@@ -371,47 +441,5 @@ * When we wrap errors, an Error always prefixes the toString() and stack with

}
/**
* @export
* @param {number} pid process id. Required.
* @returns {boolean} true if the given process id is in the local process
* table.
*/
function running(pid) {
var r = (function () {
try {
var result = process.kill(pid, 0); // node 7 and 8 return a boolean, but types are borked
return typeof result === "boolean" ? result : true;
}
catch (e) {
return e.code === "EPERM" || e.errno === "EPERM";
}
})();
return r;
}
exports.running = running;
var isWin = _os.platform().startsWith("win");
/**
* Send a signal to the given process id.
*
* @export
* @param {number} pid the process id. Required.
* @param {boolean} [force=false] if true, and the current user has
* permissions to send the signal, the pid will be forced to shut down.
*/
function kill(pid, force) {
if (force === void 0) { force = false; }
if (isWin) {
var args = ["/PID", pid.toString(), "/T"];
if (force) {
args.push("/F");
}
_cp.execFile("taskkill", args);
}
else {
_p.kill(pid, force ? "SIGKILL" : "SIGTERM");
}
}
exports.kill = kill;
function end(endable, contents) {
return new Promise(function (resolve) {
endable.end(contents, resolve);
contents == null ? endable.end(resolve) : endable.end(contents, resolve);
});

@@ -418,0 +446,0 @@ }

export declare class Mean {
private n;
private sum;
private _n;
private _min?;
private _max?;
constructor(n?: number, sum?: number);
push(x: number): void;
readonly n: number;
readonly min: number | undefined;
readonly max: number | undefined;
readonly mean: number;
clone(): Mean;
}

@@ -7,9 +7,34 @@ "use strict";

if (sum === void 0) { sum = 0; }
this.n = n;
this.sum = sum;
this._min = undefined;
this._max = undefined;
this._n = n;
}
Mean.prototype.push = function (x) {
this.n++;
this._n++;
this.sum += x;
this._min = this._min == null || this._min > x ? x : this._min;
this._max = this._max == null || this._max < x ? x : this._max;
};
Object.defineProperty(Mean.prototype, "n", {
get: function () {
return this._n;
},
enumerable: true,
configurable: true
});
Object.defineProperty(Mean.prototype, "min", {
get: function () {
return this._min;
},
enumerable: true,
configurable: true
});
Object.defineProperty(Mean.prototype, "max", {
get: function () {
return this._max;
},
enumerable: true,
configurable: true
});
Object.defineProperty(Mean.prototype, "mean", {

@@ -16,0 +41,0 @@ get: function () {

@@ -64,2 +64,4 @@ "use strict";

var minTime = Date.now() - this.ttlMs;
// If nothing's expired, findIndex should return index 0, so this should
// normally be quite cheap:
var firstGoodIndex = this.store.findIndex(function (ea) { return ea > minTime; });

@@ -66,0 +68,0 @@ if (firstGoodIndex == -1) {

@@ -15,3 +15,2 @@ /**

readonly parser: Parser<T>;
retries: number;
private readonly d;

@@ -30,2 +29,3 @@ /**

readonly pending: boolean;
readonly state: string;
toString(): string;

@@ -36,3 +36,3 @@ /**

*/
onData(data: string): void;
resolve(data: string): void;
/**

@@ -42,3 +42,3 @@ * This is for use by `BatchProcess` only, and will only be called when the

*/
onError(error: Error): void;
reject(error: Error): void;
}

@@ -20,3 +20,2 @@ "use strict";

this.parser = parser;
this.retries = 0;
this.d = new Deferred_1.Deferred();

@@ -41,2 +40,13 @@ }

});
Object.defineProperty(Task.prototype, "state", {
get: function () {
return this.d.pending
? "pending"
: this.d.fulfilled
? "resolved"
: "rejected";
},
enumerable: true,
configurable: true
});
Task.prototype.toString = function () {

@@ -49,3 +59,3 @@ return this.constructor.name + "(" + this.command + ")";

*/
Task.prototype.onData = function (data) {
Task.prototype.resolve = function (data) {
try {

@@ -60,3 +70,6 @@ var result = this.parser(data);

catch (error) {
Logger_1.logger().warn("Task.onData(): rejected", { command: this.command, error: error });
Logger_1.logger().warn("Task.onData(): rejected", {
command: this.command,
error: error
});
this.d.reject(error);

@@ -69,4 +82,7 @@ }

*/
Task.prototype.onError = function (error) {
Logger_1.logger().warn("Task.onError(): rejected", { command: this.command, error: error });
Task.prototype.reject = function (error) {
Logger_1.logger().warn("Task.reject()", {
cmd: this.command,
error: error
});
this.d.reject(error);

@@ -73,0 +89,0 @@ };

{
"name": "batch-cluster",
"version": "3.2.0",
"version": "4.0.0",
"description": "Manage a cluster of child processes",

@@ -34,4 +34,4 @@ "main": "dist/BatchCluster.js",

"@types/chai-string": "^1.4.1",
"@types/mocha": "^5.2.2",
"@types/node": "^10.3.3",
"@types/mocha": "^5.2.5",
"@types/node": "^10.7.0",
"chai": "^4.1.2",

@@ -42,12 +42,12 @@ "chai-as-promised": "^7.1.1",

"mocha": "^5.2.0",
"prettier": "^1.13.5",
"prettier": "^1.14.2",
"rimraf": "^2.6.2",
"seedrandom": "^2.4.3",
"serve": "^9.0.0",
"source-map-support": "^0.5.6",
"seedrandom": "^2.4.4",
"serve": "^9.6.0",
"source-map-support": "^0.5.8",
"timekeeper": "^2.1.2",
"typedoc": "^0.11.1",
"typescript": "^2.9.2",
"typescript": "^3.0.1",
"wtfnode": "^0.7.0"
}
}

@@ -8,2 +8,3 @@ # batch-cluster

[![Build status](https://ci.appveyor.com/api/projects/status/4564x6lvc8s6a55l/branch/master?svg=true)](https://ci.appveyor.com/project/mceachen/batch-cluster-js/branch/master)
![no dependencies](https://img.shields.io/badge/dependencies-0-brightgreen.svg)

@@ -13,11 +14,15 @@ Many command line tools, like

[GraphicsMagick](http://www.graphicsmagick.org/), support running in a "batch
mode" that accept commands provided through stdin and results through stdout. As
these tools can be fairly large, spinning them up can be expensive (especially
on Windows).
mode" that accept a series of discrete commands provided through stdin and
results through stdout. As these tools can be fairly large, spinning them up can
be expensive (especially on Windows).
This module expedites these commands, or "Tasks," by managing a cluster of these
"batch" processes, feeding tasks to idle processes, retrying tasks when the tool
crashes, and preventing memory leaks by restarting tasks after performing a
given number of tasks or after a given set of time has elapsed.
"batch" processes and queue of pending tasks, feeding processes pending tasks
when they are idle. Tasks are monitored for errors and have timeouts, which
cause the host process to be recycled. Batch processes are also recycled after
processing N tasks or running for N seconds, in an effort to minimize the impact
of memory leaks.
As of version 4, retry logic for tasks is a separate concern from this module.
This package powers

@@ -37,2 +42,6 @@ [exiftool-vendored](https://github.com/mceachen/exiftool-vendored.js), whose

## Changelog
See [CHANGELOG.md](CHANGELOG.md).
## Usage

@@ -74,202 +83,1 @@

order to test BatchCluster's retry and error handling code.
## Versioning
### The `MAJOR` or `API` version is incremented for
- 💔 Non-backwards-compatible API changes
### The `MINOR` or `UPDATE` version is incremented for
- ✨ Backwards-compatible features
### The `PATCH` version is incremented for
- 🐞 Backwards-compatible bug fixes
- 📦 Minor packaging changes
## Changelog
### v3.2.0
- 📦 New `Logger` methods, `withLevels`, `withTimestamps`, and `filterLevels`
were shoved into a new `Logger` namespace.
### v3.1.0
- ✨ Added simple timestamp and levels logger prefixer for tests
- 🐞 Errors rethrown via BatchProcess now strip extraneous `Error:` prefixes
- 🐞 For a couple internal errors (versionCommend startup errors and internal
state inconsistencies on `onExit` that aren't fatal), we now log `.error`
rather than throw Error() or ignore.
### v3.0.0
- ✨/💔 **`Task` promises are only rejected with `Error` instances now.** Note
that also means that `BatchProcessObserver` types are more strict. It could be
argued that this isn't an API breaking change as it only makes rejection
values more strict, but people may need to change their error handling, so I'm
bumping the major version to highlight that. Resolves
[#3](https://github.com/mceachen/batch-cluster.js/issues/3). Thanks for the
issue, [Nils Knappmeier](https://github.com/nknapp)!
### v2.2.0
- 🐞 Windows taskkill `/PID` option seemed to work downcased, but the docs say
to use uppercase, so I've updated it.
- 📦 Upgrade all deps including TypeScript to 2.9
(v2.1.2 is the same contents, but `np` had a crashbug during publish)
### v2.1.1
- 📦 More robust `end` for `BatchProcess`, which may prevent very long-lived
consumers from sporadically leaking child processes on Mac and linux.
- 📦 Added Node 10 to the build matrix.
### v2.1.0
- 📦 Introduced `Logger.trace` and moved logging related to per-task items down
to `trace`, as heavy load and large request or response payloads could
overwhelm loggers. If you really want to see on-the-wire requests and results,
enable `trace` in your debugger implementation. By default, the
`ConsoleLogger` omits log messages with this level.
### v2.0.0
- 💔 Replaced `BatchClusterObserver` with a simple EventEmitter API on
`BatchCluster` to be more idiomatic with node's API
- 💔 v1.11.0 added "process reuse" after errors, but that turned out to be
problematic in recovery, so that change was reverted (and with it, the
`maxTaskErrorsPerProcess` parameter was removed)
- ✨ `Rate` is simpler and more accurate now.
### v1.11.0
- ✨ Added new `BatchClusterObserver` for error and lifecycle monitoring
- 📦 Added a number of additional logging calls
### v1.10.0
- 🐞 Explicitly use `timers.setInterval`. May address [this
issue](https://stackoverflow.com/questions/48961238/electron-setinterval-implementation-difference-between-chrome-and-node).
Thanks for the PR, [Tim Fish](https://github.com/timfish)!
### v1.9.1
- 📦 Changed `BatchProcess.end()` to use `until()` rather than `Promise.race`,
and always use `kill(pid, forced)` after waiting the shutdown grace period
to prevent child process leaks.
### v1.9.0
- ✨ New `Logger.setLogger()` for debug, info, warning, and errors. `debug` and
`info` defaults to Node's
[debuglog](https://nodejs.org/api/util.html#util_util_debuglog_section),
`warn` and `error` default to `console.warn` and `console.error`,
respectively.
- 📦 docs generated by [typedoc](http://typedoc.org/)
- 📦 Upgraded dependencies (including TypeScript 2.7, which has more strict
verifications)
- 📦 Removed tslint, as `tsc` provides good lint coverage now
- 📦 The code is now [prettier](https://github.com/prettier/prettier)
- 🐞 `delay` now allows
[unref](https://nodejs.org/api/timers.html#timers_timeout_unref)ing the
timer, which, in certain circumstances, could prevent node processes from
exiting gracefully until their timeouts expired
### v1.8.0
- ✨ onIdle now runs as many tasks as it can, rather than just one. This should
provide higher throughput.
- 🐞 Removed stderr emit on race condition between onIdle and execTask. The
error condition was already handled appropriately--no need to console.error.
### v1.7.0
- 📦 Exported `kill()` and `running()` from `BatchProcess`
### v1.6.1
- 📦 De-flaked some tests on mac, and added Node 8 to the build matrix.
### v1.6.0
- ✨ Processes are forcefully shut down with `taskkill` on windows and `kill -9`
on other unix-like platforms if they don't terminate after sending the
`exitCommand`, closing `stdin`, and sending the proc a `SIGTERM`. Added a test
harness to exercise.
- 📦 Upgrade to TypeScript 2.6.1
- 🐞 `mocha` tests don't require the `--exit` hack anymore 🎉
### v1.5.0
- ✨ `.running()` works correctly for PIDs with different owners now.
- 📦 `yarn upgrade --latest`
### v1.4.2
- 📦 Ran code through `prettier` and delinted
- 📦 Massaged test assertions to pass through slower CI systems
### v1.4.1
- 📦 Replaced an errant `console.log` with a call to `log`.
### v1.4.0
- 🐞 Discovered `maxProcs` wasn't always utilized by `onIdle`, which meant in
certain circumstances, only 1 child process would be servicing pending
requests. Added breaking tests and fixed impl.
### v1.3.0
- 📦 Added tests to verify that the `kill(0)` calls to verify the child
processes are still running work across different node version and OSes
- 📦 Removed unused methods in `BatchProcess` (whose API should not be accessed
directly by consumers, so the major version remains at 1)
- 📦 Switched to yarn and upgraded dependencies
### v1.2.0
- ✨ Added a configurable cleanup signal to ensure child processes shut down on
`.end()`
- 📦 Moved child process management from `BatchCluster` to `BatchProcess`
- ✨ More test coverage around batch process concurrency, reuse, flaky task
retries, and proper process shutdown
### v1.1.0
- ✨ `BatchCluster` now has a force-shutdown `exit` handler to accompany the
graceful-shutdown `beforeExit` handler. For reference, from the
[Node docs](https://nodejs.org/api/process.html#process_event_beforeexit):
> The 'beforeExit' event is not emitted for conditions causing explicit
> termination, such as calling process.exit() or uncaught exceptions.
- ✨ Remove `Rate`'s time decay in the interests of simplicity
### v1.0.0
- ✨ Integration tests now throw deterministically random errors to simulate
flaky child procs, and ensure retries and disaster recovery work as expected.
- ✨ If the `processFactory` or `versionCommand` fails more often than a given
rate, `BatchCluster` will shut down and raise exceptions to subsequent
`enqueueTask` callers, rather than try forever to spin up processes that are
most likely misconfigured.
- ✨ Given the proliferation of construction options, those options are now
sanity-checked at construction time, and an error will be raised whose message
contains all incorrect option values.
### v0.0.2
- ✨ Added support and explicit tests for
[CR LF, CR, and LF](https://en.wikipedia.org/wiki/Newline) encoded streams
from spawned processes
- ✨ child processes are ended after `maxProcAgeMillis`, and restarted as needed
- 🐞 `BatchCluster` now practices good listener hygene for `process.beforeExit`
### v0.0.1
- ✨ Extracted implementation and tests from
[exiftool-vendored](https://github.com/mceachen/exiftool-vendored.js)

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc