jest-worker
Advanced tools
Comparing version 29.7.0 to 30.0.0-alpha.1
1685
build/index.js
@@ -1,7 +0,1615 @@ | ||
'use strict'; | ||
/*! | ||
* /** | ||
* * Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* * | ||
* * This source code is licensed under the MIT license found in the | ||
* * LICENSE file in the root directory of this source tree. | ||
* * / | ||
*/ | ||
/******/ (() => { // webpackBootstrap | ||
/******/ "use strict"; | ||
/******/ var __webpack_modules__ = ({ | ||
Object.defineProperty(exports, '__esModule', { | ||
/***/ "./src/Farm.ts": | ||
/***/ ((__unused_webpack_module, exports, __webpack_require__) => { | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
exports["default"] = void 0; | ||
var _FifoQueue = _interopRequireDefault(__webpack_require__("./src/FifoQueue.ts")); | ||
var _types = __webpack_require__("./src/types.ts"); | ||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
class Farm { | ||
_computeWorkerKey; | ||
_workerSchedulingPolicy; | ||
_cacheKeys = Object.create(null); | ||
_locks = []; | ||
_offset = 0; | ||
_taskQueue; | ||
constructor(_numOfWorkers, _callback, options = {}) { | ||
this._numOfWorkers = _numOfWorkers; | ||
this._callback = _callback; | ||
this._computeWorkerKey = options.computeWorkerKey; | ||
this._workerSchedulingPolicy = options.workerSchedulingPolicy ?? 'round-robin'; | ||
this._taskQueue = options.taskQueue ?? new _FifoQueue.default(); | ||
} | ||
doWork(method, ...args) { | ||
const customMessageListeners = new Set(); | ||
const addCustomMessageListener = listener => { | ||
customMessageListeners.add(listener); | ||
return () => { | ||
customMessageListeners.delete(listener); | ||
}; | ||
}; | ||
const onCustomMessage = message => { | ||
for (const listener of customMessageListeners) listener(message); | ||
}; | ||
const promise = new Promise( | ||
// Bind args to this function so it won't reference to the parent scope. | ||
// This prevents a memory leak in v8, because otherwise the function will | ||
// retain args for the closure. | ||
((args, resolve, reject) => { | ||
const computeWorkerKey = this._computeWorkerKey; | ||
const request = [_types.CHILD_MESSAGE_CALL, false, method, args]; | ||
let worker = null; | ||
let hash = null; | ||
if (computeWorkerKey) { | ||
hash = computeWorkerKey.call(this, method, ...args); | ||
worker = hash == null ? null : this._cacheKeys[hash]; | ||
} | ||
const onStart = worker => { | ||
if (hash != null) { | ||
this._cacheKeys[hash] = worker; | ||
} | ||
}; | ||
const onEnd = (error, result) => { | ||
customMessageListeners.clear(); | ||
if (error) { | ||
reject(error); | ||
} else { | ||
resolve(result); | ||
} | ||
}; | ||
const task = { | ||
onCustomMessage, | ||
onEnd, | ||
onStart, | ||
request | ||
}; | ||
if (worker) { | ||
this._taskQueue.enqueue(task, worker.getWorkerId()); | ||
this._process(worker.getWorkerId()); | ||
} else { | ||
this._push(task); | ||
} | ||
}).bind(null, args)); | ||
promise.UNSTABLE_onCustomMessage = addCustomMessageListener; | ||
return promise; | ||
} | ||
_process(workerId) { | ||
if (this._isLocked(workerId)) { | ||
return this; | ||
} | ||
const task = this._taskQueue.dequeue(workerId); | ||
if (!task) { | ||
return this; | ||
} | ||
if (task.request[1]) { | ||
throw new Error('Queue implementation returned processed task'); | ||
} | ||
// Reference the task object outside so it won't be retained by onEnd, | ||
// and other properties of the task object, such as task.request can be | ||
// garbage collected. | ||
let taskOnEnd = task.onEnd; | ||
const onEnd = (error, result) => { | ||
if (taskOnEnd) { | ||
taskOnEnd(error, result); | ||
} | ||
taskOnEnd = null; | ||
this._unlock(workerId); | ||
this._process(workerId); | ||
}; | ||
task.request[1] = true; | ||
this._lock(workerId); | ||
this._callback(workerId, task.request, task.onStart, onEnd, task.onCustomMessage); | ||
return this; | ||
} | ||
_push(task) { | ||
this._taskQueue.enqueue(task); | ||
const offset = this._getNextWorkerOffset(); | ||
for (let i = 0; i < this._numOfWorkers; i++) { | ||
this._process((offset + i) % this._numOfWorkers); | ||
if (task.request[1]) { | ||
break; | ||
} | ||
} | ||
return this; | ||
} | ||
_getNextWorkerOffset() { | ||
switch (this._workerSchedulingPolicy) { | ||
case 'in-order': | ||
return 0; | ||
case 'round-robin': | ||
return this._offset++; | ||
} | ||
} | ||
_lock(workerId) { | ||
this._locks[workerId] = true; | ||
} | ||
_unlock(workerId) { | ||
this._locks[workerId] = false; | ||
} | ||
_isLocked(workerId) { | ||
return this._locks[workerId]; | ||
} | ||
} | ||
exports["default"] = Farm; | ||
/***/ }), | ||
/***/ "./src/FifoQueue.ts": | ||
/***/ ((__unused_webpack_module, exports) => { | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
exports["default"] = void 0; | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
/** | ||
* First-in, First-out task queue that manages a dedicated pool | ||
* for each worker as well as a shared queue. The FIFO ordering is guaranteed | ||
* across the worker specific and shared queue. | ||
*/ | ||
class FifoQueue { | ||
_workerQueues = []; | ||
_sharedQueue = new InternalQueue(); | ||
enqueue(task, workerId) { | ||
if (workerId == null) { | ||
this._sharedQueue.enqueue(task); | ||
return; | ||
} | ||
let workerQueue = this._workerQueues[workerId]; | ||
if (workerQueue == null) { | ||
workerQueue = this._workerQueues[workerId] = new InternalQueue(); | ||
} | ||
const sharedTop = this._sharedQueue.peekLast(); | ||
const item = { | ||
previousSharedTask: sharedTop, | ||
task | ||
}; | ||
workerQueue.enqueue(item); | ||
} | ||
dequeue(workerId) { | ||
const workerTop = this._workerQueues[workerId]?.peek(); | ||
const sharedTaskIsProcessed = workerTop?.previousSharedTask?.request[1] ?? true; | ||
// Process the top task from the shared queue if | ||
// - there's no task in the worker specific queue or | ||
// - if the non-worker-specific task after which this worker specific task | ||
// has been queued wasn't processed yet | ||
if (workerTop != null && sharedTaskIsProcessed) { | ||
return this._workerQueues[workerId]?.dequeue()?.task ?? null; | ||
} | ||
return this._sharedQueue.dequeue(); | ||
} | ||
} | ||
exports["default"] = FifoQueue; | ||
/** | ||
* FIFO queue for a single worker / shared queue. | ||
*/ | ||
class InternalQueue { | ||
_head = null; | ||
_last = null; | ||
enqueue(value) { | ||
const item = { | ||
next: null, | ||
value | ||
}; | ||
if (this._last == null) { | ||
this._head = item; | ||
} else { | ||
this._last.next = item; | ||
} | ||
this._last = item; | ||
} | ||
dequeue() { | ||
if (this._head == null) { | ||
return null; | ||
} | ||
const item = this._head; | ||
this._head = item.next; | ||
if (this._head == null) { | ||
this._last = null; | ||
} | ||
return item.value; | ||
} | ||
peek() { | ||
return this._head?.value ?? null; | ||
} | ||
peekLast() { | ||
return this._last?.value ?? null; | ||
} | ||
} | ||
/***/ }), | ||
/***/ "./src/PriorityQueue.ts": | ||
/***/ ((__unused_webpack_module, exports) => { | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
exports["default"] = void 0; | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
/** | ||
* Priority queue that processes tasks in natural ordering (lower priority first) | ||
* according to the priority computed by the function passed in the constructor. | ||
* | ||
* FIFO ordering isn't guaranteed for tasks with the same priority. | ||
* | ||
* Worker specific tasks with the same priority as a non-worker specific task | ||
* are always processed first. | ||
*/ | ||
class PriorityQueue { | ||
_queue = []; | ||
_sharedQueue = new MinHeap(); | ||
constructor(_computePriority) { | ||
this._computePriority = _computePriority; | ||
} | ||
enqueue(task, workerId) { | ||
if (workerId == null) { | ||
this._enqueue(task, this._sharedQueue); | ||
} else { | ||
const queue = this._getWorkerQueue(workerId); | ||
this._enqueue(task, queue); | ||
} | ||
} | ||
_enqueue(task, queue) { | ||
const item = { | ||
priority: this._computePriority(task.request[2], ...task.request[3]), | ||
task | ||
}; | ||
queue.add(item); | ||
} | ||
dequeue(workerId) { | ||
const workerQueue = this._getWorkerQueue(workerId); | ||
const workerTop = workerQueue.peek(); | ||
const sharedTop = this._sharedQueue.peek(); | ||
// use the task from the worker queue if there's no task in the shared queue | ||
// or if the priority of the worker queue is smaller or equal to the | ||
// priority of the top task in the shared queue. The tasks of the | ||
// worker specific queue are preferred because no other worker can pick this | ||
// specific task up. | ||
if (sharedTop == null || workerTop != null && workerTop.priority <= sharedTop.priority) { | ||
return workerQueue.poll()?.task ?? null; | ||
} | ||
return this._sharedQueue.poll().task; | ||
} | ||
_getWorkerQueue(workerId) { | ||
let queue = this._queue[workerId]; | ||
if (queue == null) { | ||
queue = this._queue[workerId] = new MinHeap(); | ||
} | ||
return queue; | ||
} | ||
} | ||
exports["default"] = PriorityQueue; | ||
class MinHeap { | ||
_heap = []; | ||
peek() { | ||
return this._heap[0] ?? null; | ||
} | ||
add(item) { | ||
const nodes = this._heap; | ||
nodes.push(item); | ||
if (nodes.length === 1) { | ||
return; | ||
} | ||
let currentIndex = nodes.length - 1; | ||
// Bubble up the added node as long as the parent is bigger | ||
while (currentIndex > 0) { | ||
const parentIndex = Math.floor((currentIndex + 1) / 2) - 1; | ||
const parent = nodes[parentIndex]; | ||
if (parent.priority <= item.priority) { | ||
break; | ||
} | ||
nodes[currentIndex] = parent; | ||
nodes[parentIndex] = item; | ||
currentIndex = parentIndex; | ||
} | ||
} | ||
poll() { | ||
const nodes = this._heap; | ||
const result = nodes[0]; | ||
const lastElement = nodes.pop(); | ||
// heap was empty or removed the last element | ||
if (result == null || nodes.length === 0) { | ||
return result ?? null; | ||
} | ||
let index = 0; | ||
nodes[0] = lastElement ?? null; | ||
const element = nodes[0]; | ||
while (true) { | ||
let swapIndex = null; | ||
const rightChildIndex = (index + 1) * 2; | ||
const leftChildIndex = rightChildIndex - 1; | ||
const rightChild = nodes[rightChildIndex]; | ||
const leftChild = nodes[leftChildIndex]; | ||
// if the left child is smaller, swap with the left | ||
if (leftChild != null && leftChild.priority < element.priority) { | ||
swapIndex = leftChildIndex; | ||
} | ||
// If the right child is smaller or the right child is smaller than the left | ||
// then swap with the right child | ||
if (rightChild != null && rightChild.priority < (swapIndex == null ? element : leftChild).priority) { | ||
swapIndex = rightChildIndex; | ||
} | ||
if (swapIndex == null) { | ||
break; | ||
} | ||
nodes[index] = nodes[swapIndex]; | ||
nodes[swapIndex] = element; | ||
index = swapIndex; | ||
} | ||
return result; | ||
} | ||
} | ||
/***/ }), | ||
/***/ "./src/WorkerPool.ts": | ||
/***/ ((__unused_webpack_module, exports, __webpack_require__) => { | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
exports["default"] = void 0; | ||
var _BaseWorkerPool = _interopRequireDefault(__webpack_require__("./src/base/BaseWorkerPool.ts")); | ||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
class WorkerPool extends _BaseWorkerPool.default { | ||
send(workerId, request, onStart, onEnd, onCustomMessage) { | ||
this.restartWorkerIfShutDown(workerId); | ||
this.getWorkerById(workerId).send(request, onStart, onEnd, onCustomMessage); | ||
} | ||
createWorker(workerOptions) { | ||
let Worker; | ||
if (this._options.enableWorkerThreads) { | ||
Worker = (__webpack_require__("./src/workers/NodeThreadsWorker.ts")/* ["default"] */ .Z); | ||
} else { | ||
Worker = (__webpack_require__("./src/workers/ChildProcessWorker.ts")/* ["default"] */ .ZP); | ||
} | ||
return new Worker(workerOptions); | ||
} | ||
} | ||
var _default = exports["default"] = WorkerPool; | ||
/***/ }), | ||
/***/ "./src/base/BaseWorkerPool.ts": | ||
/***/ ((__unused_webpack_module, exports, __webpack_require__) => { | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
exports["default"] = void 0; | ||
function _mergeStream() { | ||
const data = _interopRequireDefault(require("merge-stream")); | ||
_mergeStream = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
var _types = __webpack_require__("./src/types.ts"); | ||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
// How long to wait for the child process to terminate | ||
// after CHILD_MESSAGE_END before sending force exiting. | ||
const FORCE_EXIT_DELAY = 500; | ||
/* istanbul ignore next */ | ||
// eslint-disable-next-line @typescript-eslint/no-empty-function | ||
const emptyMethod = () => {}; | ||
class BaseWorkerPool { | ||
_stderr; | ||
_stdout; | ||
_options; | ||
_workers; | ||
_workerPath; | ||
constructor(workerPath, options) { | ||
this._options = options; | ||
this._workerPath = workerPath; | ||
this._workers = new Array(options.numWorkers); | ||
const stdout = (0, _mergeStream().default)(); | ||
const stderr = (0, _mergeStream().default)(); | ||
const { | ||
forkOptions, | ||
maxRetries, | ||
resourceLimits, | ||
setupArgs | ||
} = options; | ||
for (let i = 0; i < options.numWorkers; i++) { | ||
const workerOptions = { | ||
forkOptions, | ||
idleMemoryLimit: this._options.idleMemoryLimit, | ||
maxRetries, | ||
resourceLimits, | ||
setupArgs, | ||
workerId: i, | ||
workerPath | ||
}; | ||
const worker = this.createWorker(workerOptions); | ||
const workerStdout = worker.getStdout(); | ||
const workerStderr = worker.getStderr(); | ||
if (workerStdout) { | ||
stdout.add(workerStdout); | ||
} | ||
if (workerStderr) { | ||
stderr.add(workerStderr); | ||
} | ||
this._workers[i] = worker; | ||
} | ||
this._stdout = stdout; | ||
this._stderr = stderr; | ||
} | ||
getStderr() { | ||
return this._stderr; | ||
} | ||
getStdout() { | ||
return this._stdout; | ||
} | ||
getWorkers() { | ||
return this._workers; | ||
} | ||
getWorkerById(workerId) { | ||
return this._workers[workerId]; | ||
} | ||
restartWorkerIfShutDown(workerId) { | ||
if (this._workers[workerId].state === _types.WorkerStates.SHUT_DOWN) { | ||
const { | ||
forkOptions, | ||
maxRetries, | ||
resourceLimits, | ||
setupArgs | ||
} = this._options; | ||
const workerOptions = { | ||
forkOptions, | ||
idleMemoryLimit: this._options.idleMemoryLimit, | ||
maxRetries, | ||
resourceLimits, | ||
setupArgs, | ||
workerId, | ||
workerPath: this._workerPath | ||
}; | ||
const worker = this.createWorker(workerOptions); | ||
this._workers[workerId] = worker; | ||
} | ||
} | ||
createWorker(_workerOptions) { | ||
throw Error('Missing method createWorker in WorkerPool'); | ||
} | ||
async start() { | ||
await Promise.all(this._workers.map(async worker => { | ||
await worker.waitForWorkerReady(); | ||
await new Promise((resolve, reject) => { | ||
worker.send([_types.CHILD_MESSAGE_CALL_SETUP], emptyMethod, error => { | ||
if (error) { | ||
reject(error); | ||
} else { | ||
resolve(); | ||
} | ||
}, emptyMethod); | ||
}); | ||
})); | ||
} | ||
async end() { | ||
// We do not cache the request object here. If so, it would only be only | ||
// processed by one of the workers, and we want them all to close. | ||
const workerExitPromises = this._workers.map(async worker => { | ||
worker.send([_types.CHILD_MESSAGE_END, false], emptyMethod, emptyMethod, emptyMethod); | ||
// Schedule a force exit in case worker fails to exit gracefully so | ||
// await worker.waitForExit() never takes longer than FORCE_EXIT_DELAY | ||
let forceExited = false; | ||
const forceExitTimeout = setTimeout(() => { | ||
worker.forceExit(); | ||
forceExited = true; | ||
}, FORCE_EXIT_DELAY); | ||
await worker.waitForExit(); | ||
// Worker ideally exited gracefully, don't send force exit then | ||
clearTimeout(forceExitTimeout); | ||
return forceExited; | ||
}); | ||
const workerExits = await Promise.all(workerExitPromises); | ||
return workerExits.reduce((result, forceExited) => ({ | ||
forceExited: result.forceExited || forceExited | ||
}), { | ||
forceExited: false | ||
}); | ||
} | ||
} | ||
exports["default"] = BaseWorkerPool; | ||
/***/ }), | ||
/***/ "./src/types.ts": | ||
/***/ ((__unused_webpack_module, exports) => { | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
exports.WorkerStates = exports.WorkerEvents = exports.PARENT_MESSAGE_SETUP_ERROR = exports.PARENT_MESSAGE_OK = exports.PARENT_MESSAGE_MEM_USAGE = exports.PARENT_MESSAGE_CUSTOM = exports.PARENT_MESSAGE_CLIENT_ERROR = exports.CHILD_MESSAGE_MEM_USAGE = exports.CHILD_MESSAGE_INITIALIZE = exports.CHILD_MESSAGE_END = exports.CHILD_MESSAGE_CALL_SETUP = exports.CHILD_MESSAGE_CALL = void 0; | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
// Because of the dynamic nature of a worker communication process, all messages | ||
// coming from any of the other processes cannot be typed. Thus, many types | ||
// include "unknown" as a TS type, which is (unfortunately) correct here. | ||
const CHILD_MESSAGE_INITIALIZE = exports.CHILD_MESSAGE_INITIALIZE = 0; | ||
const CHILD_MESSAGE_CALL = exports.CHILD_MESSAGE_CALL = 1; | ||
const CHILD_MESSAGE_END = exports.CHILD_MESSAGE_END = 2; | ||
const CHILD_MESSAGE_MEM_USAGE = exports.CHILD_MESSAGE_MEM_USAGE = 3; | ||
const CHILD_MESSAGE_CALL_SETUP = exports.CHILD_MESSAGE_CALL_SETUP = 4; | ||
const PARENT_MESSAGE_OK = exports.PARENT_MESSAGE_OK = 0; | ||
const PARENT_MESSAGE_CLIENT_ERROR = exports.PARENT_MESSAGE_CLIENT_ERROR = 1; | ||
const PARENT_MESSAGE_SETUP_ERROR = exports.PARENT_MESSAGE_SETUP_ERROR = 2; | ||
const PARENT_MESSAGE_CUSTOM = exports.PARENT_MESSAGE_CUSTOM = 3; | ||
const PARENT_MESSAGE_MEM_USAGE = exports.PARENT_MESSAGE_MEM_USAGE = 4; | ||
// Option objects. | ||
// Messages passed from the parent to the children. | ||
// Messages passed from the children to the parent. | ||
// Queue types. | ||
let WorkerStates = exports.WorkerStates = /*#__PURE__*/function (WorkerStates) { | ||
WorkerStates["STARTING"] = "starting"; | ||
WorkerStates["OK"] = "ok"; | ||
WorkerStates["OUT_OF_MEMORY"] = "oom"; | ||
WorkerStates["RESTARTING"] = "restarting"; | ||
WorkerStates["SHUTTING_DOWN"] = "shutting-down"; | ||
WorkerStates["SHUT_DOWN"] = "shut-down"; | ||
return WorkerStates; | ||
}({}); | ||
let WorkerEvents = exports.WorkerEvents = /*#__PURE__*/function (WorkerEvents) { | ||
WorkerEvents["STATE_CHANGE"] = "state-change"; | ||
return WorkerEvents; | ||
}({}); | ||
/***/ }), | ||
/***/ "./src/workers/ChildProcessWorker.ts": | ||
/***/ ((__unused_webpack_module, exports, __webpack_require__) => { | ||
var __webpack_unused_export__; | ||
__webpack_unused_export__ = ({ | ||
value: true | ||
}); | ||
Object.defineProperty(exports, 'FifoQueue', { | ||
exports.ZP = __webpack_unused_export__ = void 0; | ||
function _child_process() { | ||
const data = require("child_process"); | ||
_child_process = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
function _os() { | ||
const data = require("os"); | ||
_os = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
function _mergeStream() { | ||
const data = _interopRequireDefault(require("merge-stream")); | ||
_mergeStream = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
function _supportsColor() { | ||
const data = require("supports-color"); | ||
_supportsColor = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
var _types = __webpack_require__("./src/types.ts"); | ||
var _WorkerAbstract = _interopRequireDefault(__webpack_require__("./src/workers/WorkerAbstract.ts")); | ||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
const SIGNAL_BASE_EXIT_CODE = 128; | ||
const SIGKILL_EXIT_CODE = SIGNAL_BASE_EXIT_CODE + 9; | ||
const SIGTERM_EXIT_CODE = SIGNAL_BASE_EXIT_CODE + 15; | ||
// How long to wait after SIGTERM before sending SIGKILL | ||
const SIGKILL_DELAY = __webpack_unused_export__ = 500; | ||
/** | ||
* This class wraps the child process and provides a nice interface to | ||
* communicate with. It takes care of: | ||
* | ||
* - Re-spawning the process if it dies. | ||
* - Queues calls while the worker is busy. | ||
* - Re-sends the requests if the worker blew up. | ||
* | ||
* The reason for queueing them here (since childProcess.send also has an | ||
* internal queue) is because the worker could be doing asynchronous work, and | ||
* this would lead to the child process to read its receiving buffer and start a | ||
* second call. By queueing calls here, we don't send the next call to the | ||
* children until we receive the result of the previous one. | ||
* | ||
* As soon as a request starts to be processed by a worker, its "processed" | ||
* field is changed to "true", so that other workers which might encounter the | ||
* same call skip it. | ||
*/ | ||
class ChildProcessWorker extends _WorkerAbstract.default { | ||
_child; | ||
_options; | ||
_request; | ||
_retries; | ||
_onProcessEnd; | ||
_onCustomMessage; | ||
_stdout; | ||
_stderr; | ||
_stderrBuffer = []; | ||
_memoryUsagePromise; | ||
_resolveMemoryUsage; | ||
_childIdleMemoryUsage; | ||
_childIdleMemoryUsageLimit; | ||
_memoryUsageCheck = false; | ||
_childWorkerPath; | ||
constructor(options) { | ||
super(options); | ||
this._options = options; | ||
this._request = null; | ||
this._stdout = null; | ||
this._stderr = null; | ||
this._childIdleMemoryUsage = null; | ||
this._childIdleMemoryUsageLimit = options.idleMemoryLimit || null; | ||
this._childWorkerPath = options.childWorkerPath || require.resolve('./processChild'); | ||
this.state = _types.WorkerStates.STARTING; | ||
this.initialize(); | ||
} | ||
initialize() { | ||
if (this.state === _types.WorkerStates.OUT_OF_MEMORY || this.state === _types.WorkerStates.SHUTTING_DOWN || this.state === _types.WorkerStates.SHUT_DOWN) { | ||
return; | ||
} | ||
if (this._child && this._child.connected) { | ||
this._child.kill('SIGKILL'); | ||
} | ||
this.state = _types.WorkerStates.STARTING; | ||
const forceColor = _supportsColor().stdout ? { | ||
FORCE_COLOR: '1' | ||
} : {}; | ||
const silent = this._options.silent ?? true; | ||
if (!silent) { | ||
// NOTE: Detecting an out of memory crash is independent of idle memory usage monitoring. We want to | ||
// monitor for a crash occurring so that it can be handled as required and so we can tell the difference | ||
// between an OOM crash and another kind of crash. We need to do this because if a worker crashes due to | ||
// an OOM event sometimes it isn't seen by the worker pool and it just sits there waiting for the worker | ||
// to respond and it never will. | ||
console.warn('Unable to detect out of memory event if silent === false'); | ||
} | ||
this._stderrBuffer = []; | ||
const options = { | ||
cwd: process.cwd(), | ||
env: { | ||
...process.env, | ||
JEST_WORKER_ID: String(this._options.workerId + 1), | ||
// 0-indexed workerId, 1-indexed JEST_WORKER_ID | ||
...forceColor | ||
}, | ||
// Suppress --debug / --inspect flags while preserving others (like --harmony). | ||
execArgv: process.execArgv.filter(v => !/^--(debug|inspect)/.test(v)), | ||
// default to advanced serialization in order to match worker threads | ||
serialization: 'advanced', | ||
silent, | ||
...this._options.forkOptions | ||
}; | ||
this._child = (0, _child_process().fork)(this._childWorkerPath, [], options); | ||
if (this._child.stdout) { | ||
if (!this._stdout) { | ||
// We need to add a permanent stream to the merged stream to prevent it | ||
// from ending when the subprocess stream ends | ||
this._stdout = (0, _mergeStream().default)(this._getFakeStream()); | ||
} | ||
this._stdout.add(this._child.stdout); | ||
} | ||
if (this._child.stderr) { | ||
if (!this._stderr) { | ||
// We need to add a permanent stream to the merged stream to prevent it | ||
// from ending when the subprocess stream ends | ||
this._stderr = (0, _mergeStream().default)(this._getFakeStream()); | ||
} | ||
this._stderr.add(this._child.stderr); | ||
this._child.stderr.on('data', this.stderrDataHandler.bind(this)); | ||
} | ||
this._child.on('message', this._onMessage.bind(this)); | ||
this._child.on('exit', this._onExit.bind(this)); | ||
this._child.on('disconnect', this._onDisconnect.bind(this)); | ||
this._child.send([_types.CHILD_MESSAGE_INITIALIZE, false, this._options.workerPath, this._options.setupArgs]); | ||
this._retries++; | ||
// If we exceeded the amount of retries, we will emulate an error reply | ||
// coming from the child. This avoids code duplication related with cleaning | ||
// the queue, and scheduling the next call. | ||
if (this._retries > this._options.maxRetries) { | ||
const error = new Error(`Jest worker encountered ${this._retries} child process exceptions, exceeding retry limit`); | ||
this._onMessage([_types.PARENT_MESSAGE_CLIENT_ERROR, error.name, error.message, error.stack, { | ||
type: 'WorkerError' | ||
}]); | ||
// Clear the request so we don't keep executing it. | ||
this._request = null; | ||
} | ||
this.state = _types.WorkerStates.OK; | ||
if (this._resolveWorkerReady) { | ||
this._resolveWorkerReady(); | ||
} | ||
} | ||
stderrDataHandler(chunk) { | ||
if (chunk) { | ||
this._stderrBuffer.push(Buffer.from(chunk)); | ||
} | ||
this._detectOutOfMemoryCrash(); | ||
if (this.state === _types.WorkerStates.OUT_OF_MEMORY) { | ||
this._workerReadyPromise = undefined; | ||
this._resolveWorkerReady = undefined; | ||
this.killChild(); | ||
this._shutdown(); | ||
} | ||
} | ||
_detectOutOfMemoryCrash() { | ||
try { | ||
const bufferStr = Buffer.concat(this._stderrBuffer).toString('utf8'); | ||
if (bufferStr.includes('heap out of memory') || bufferStr.includes('allocation failure;') || bufferStr.includes('Last few GCs')) { | ||
if (this.state === _types.WorkerStates.OK || this.state === _types.WorkerStates.STARTING) { | ||
this.state = _types.WorkerStates.OUT_OF_MEMORY; | ||
} | ||
} | ||
} catch (err) { | ||
console.error('Error looking for out of memory crash', err); | ||
} | ||
} | ||
_onDisconnect() { | ||
this._workerReadyPromise = undefined; | ||
this._resolveWorkerReady = undefined; | ||
this._detectOutOfMemoryCrash(); | ||
if (this.state === _types.WorkerStates.OUT_OF_MEMORY) { | ||
this.killChild(); | ||
this._shutdown(); | ||
} | ||
} | ||
_onMessage(response) { | ||
// Ignore messages not intended for us | ||
if (!Array.isArray(response)) return; | ||
// TODO: Add appropriate type check | ||
let error; | ||
switch (response[0]) { | ||
case _types.PARENT_MESSAGE_OK: | ||
this._onProcessEnd(null, response[1]); | ||
break; | ||
case _types.PARENT_MESSAGE_CLIENT_ERROR: | ||
error = response[4]; | ||
if (error != null && typeof error === 'object') { | ||
const extra = error; | ||
// @ts-expect-error: no index | ||
const NativeCtor = globalThis[response[1]]; | ||
const Ctor = typeof NativeCtor === 'function' ? NativeCtor : Error; | ||
error = new Ctor(response[2]); | ||
error.type = response[1]; | ||
error.stack = response[3]; | ||
for (const key in extra) { | ||
error[key] = extra[key]; | ||
} | ||
} | ||
this._onProcessEnd(error, null); | ||
break; | ||
case _types.PARENT_MESSAGE_SETUP_ERROR: | ||
error = new Error(`Error when calling setup: ${response[2]}`); | ||
error.type = response[1]; | ||
error.stack = response[3]; | ||
this._onProcessEnd(error, null); | ||
break; | ||
case _types.PARENT_MESSAGE_CUSTOM: | ||
this._onCustomMessage(response[1]); | ||
break; | ||
case _types.PARENT_MESSAGE_MEM_USAGE: | ||
this._childIdleMemoryUsage = response[1]; | ||
if (this._resolveMemoryUsage) { | ||
this._resolveMemoryUsage(response[1]); | ||
this._resolveMemoryUsage = undefined; | ||
this._memoryUsagePromise = undefined; | ||
} | ||
this._performRestartIfRequired(); | ||
break; | ||
default: | ||
// Ignore messages not intended for us | ||
break; | ||
} | ||
} | ||
_performRestartIfRequired() { | ||
if (this._memoryUsageCheck) { | ||
this._memoryUsageCheck = false; | ||
let limit = this._childIdleMemoryUsageLimit; | ||
// TODO: At some point it would make sense to make use of | ||
// stringToBytes found in jest-config, however as this | ||
// package does not have any dependencies on an other jest | ||
// packages that can wait until some other time. | ||
if (limit && limit > 0 && limit <= 1) { | ||
limit = Math.floor((0, _os().totalmem)() * limit); | ||
} else if (limit) { | ||
limit = Math.floor(limit); | ||
} | ||
if (limit && this._childIdleMemoryUsage && this._childIdleMemoryUsage > limit) { | ||
this.state = _types.WorkerStates.RESTARTING; | ||
this.killChild(); | ||
} | ||
} | ||
} | ||
_onExit(exitCode, signal) { | ||
this._workerReadyPromise = undefined; | ||
this._resolveWorkerReady = undefined; | ||
this._detectOutOfMemoryCrash(); | ||
if (exitCode !== 0 && this.state === _types.WorkerStates.OUT_OF_MEMORY) { | ||
this._onProcessEnd(new Error('Jest worker ran out of memory and crashed'), null); | ||
this._shutdown(); | ||
} else if (exitCode !== 0 && exitCode !== null && exitCode !== SIGTERM_EXIT_CODE && exitCode !== SIGKILL_EXIT_CODE && this.state !== _types.WorkerStates.SHUTTING_DOWN || this.state === _types.WorkerStates.RESTARTING) { | ||
this.state = _types.WorkerStates.RESTARTING; | ||
this.initialize(); | ||
if (this._request) { | ||
this._child.send(this._request); | ||
} | ||
} else { | ||
// At this point, it's not clear why the child process exited. There could | ||
// be several reasons: | ||
// | ||
// 1. The child process exited successfully after finishing its work. | ||
// This is the most likely case. | ||
// 2. The child process crashed in a manner that wasn't caught through | ||
// any of the heuristic-based checks above. | ||
// 3. The child process was killed by another process or daemon unrelated | ||
// to Jest. For example, oom-killer on Linux may have picked the child | ||
// process to kill because overall system memory is constrained. | ||
// | ||
// If there's a pending request to the child process in any of those | ||
// situations, the request still needs to be handled in some manner before | ||
// entering the shutdown phase. Otherwise the caller expecting a response | ||
// from the worker will never receive indication that something unexpected | ||
// happened and hang forever. | ||
// | ||
// In normal operation, the request is handled and cleared before the | ||
// child process exits. If it's still present, it's not clear what | ||
// happened and probably best to throw an error. In practice, this usually | ||
// happens when the child process is killed externally. | ||
// | ||
// There's a reasonable argument that the child process should be retried | ||
// with request re-sent in this scenario. However, if the problem was due | ||
// to situations such as oom-killer attempting to free up system | ||
// resources, retrying would exacerbate the problem. | ||
const isRequestStillPending = !!this._request; | ||
if (isRequestStillPending) { | ||
// If a signal is present, we can be reasonably confident the process | ||
// was killed externally. Log this fact so it's more clear to users that | ||
// something went wrong externally, rather than a bug in Jest itself. | ||
const error = new Error(signal == null ? `A jest worker process (pid=${this._child.pid}) crashed for an unknown reason: exitCode=${exitCode}` : `A jest worker process (pid=${this._child.pid}) was terminated by another process: signal=${signal}, exitCode=${exitCode}. Operating system logs may contain more information on why this occurred.`); | ||
this._onProcessEnd(error, null); | ||
} | ||
this._shutdown(); | ||
} | ||
} | ||
send(request, onProcessStart, onProcessEnd, onCustomMessage) { | ||
this._stderrBuffer = []; | ||
onProcessStart(this); | ||
this._onProcessEnd = (...args) => { | ||
const hasRequest = !!this._request; | ||
// Clean the request to avoid sending past requests to workers that fail | ||
// while waiting for a new request (timers, unhandled rejections...) | ||
this._request = null; | ||
if (this._childIdleMemoryUsageLimit && this._child.connected && hasRequest) { | ||
this.checkMemoryUsage(); | ||
} | ||
return onProcessEnd(...args); | ||
}; | ||
this._onCustomMessage = (...arg) => onCustomMessage(...arg); | ||
this._request = request; | ||
this._retries = 0; | ||
// eslint-disable-next-line @typescript-eslint/no-empty-function | ||
this._child.send(request, () => {}); | ||
} | ||
waitForExit() { | ||
return this._exitPromise; | ||
} | ||
killChild() { | ||
// We store a reference so that there's no way we can accidentally | ||
// kill a new worker that has been spawned. | ||
const childToKill = this._child; | ||
childToKill.kill('SIGTERM'); | ||
return setTimeout(() => childToKill.kill('SIGKILL'), SIGKILL_DELAY); | ||
} | ||
forceExit() { | ||
this.state = _types.WorkerStates.SHUTTING_DOWN; | ||
const sigkillTimeout = this.killChild(); | ||
this._exitPromise.then(() => clearTimeout(sigkillTimeout)); | ||
} | ||
getWorkerId() { | ||
return this._options.workerId; | ||
} | ||
/** | ||
* Gets the process id of the worker. | ||
* | ||
* @returns Process id. | ||
*/ | ||
getWorkerSystemId() { | ||
return this._child.pid; | ||
} | ||
getStdout() { | ||
return this._stdout; | ||
} | ||
getStderr() { | ||
return this._stderr; | ||
} | ||
/** | ||
* Gets the last reported memory usage. | ||
* | ||
* @returns Memory usage in bytes. | ||
*/ | ||
getMemoryUsage() { | ||
if (!this._memoryUsagePromise) { | ||
let rejectCallback; | ||
const promise = new Promise((resolve, reject) => { | ||
this._resolveMemoryUsage = resolve; | ||
rejectCallback = reject; | ||
}); | ||
this._memoryUsagePromise = promise; | ||
if (!this._child.connected && rejectCallback) { | ||
rejectCallback(new Error('Child process is not running.')); | ||
this._memoryUsagePromise = undefined; | ||
this._resolveMemoryUsage = undefined; | ||
return promise; | ||
} | ||
this._child.send([_types.CHILD_MESSAGE_MEM_USAGE], err => { | ||
if (err && rejectCallback) { | ||
this._memoryUsagePromise = undefined; | ||
this._resolveMemoryUsage = undefined; | ||
rejectCallback(err); | ||
} | ||
}); | ||
return promise; | ||
} | ||
return this._memoryUsagePromise; | ||
} | ||
/** | ||
* Gets updated memory usage and restarts if required | ||
*/ | ||
checkMemoryUsage() { | ||
if (this._childIdleMemoryUsageLimit) { | ||
this._memoryUsageCheck = true; | ||
this._child.send([_types.CHILD_MESSAGE_MEM_USAGE], err => { | ||
if (err) { | ||
console.error('Unable to check memory usage', err); | ||
} | ||
}); | ||
} else { | ||
console.warn('Memory usage of workers can only be checked if a limit is set'); | ||
} | ||
} | ||
isWorkerRunning() { | ||
return this._child.connected && !this._child.killed; | ||
} | ||
} | ||
exports.ZP = ChildProcessWorker; | ||
/***/ }), | ||
/***/ "./src/workers/NodeThreadsWorker.ts": | ||
/***/ ((__unused_webpack_module, exports, __webpack_require__) => { | ||
var __webpack_unused_export__; | ||
__webpack_unused_export__ = ({ | ||
value: true | ||
}); | ||
exports.Z = void 0; | ||
function _os() { | ||
const data = require("os"); | ||
_os = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
function _worker_threads() { | ||
const data = require("worker_threads"); | ||
_worker_threads = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
function _mergeStream() { | ||
const data = _interopRequireDefault(require("merge-stream")); | ||
_mergeStream = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
var _types = __webpack_require__("./src/types.ts"); | ||
var _WorkerAbstract = _interopRequireDefault(__webpack_require__("./src/workers/WorkerAbstract.ts")); | ||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
class ExperimentalWorker extends _WorkerAbstract.default { | ||
_worker; | ||
_options; | ||
_request; | ||
_retries; | ||
_onProcessEnd; | ||
_onCustomMessage; | ||
_stdout; | ||
_stderr; | ||
_memoryUsagePromise; | ||
_resolveMemoryUsage; | ||
_childWorkerPath; | ||
_childIdleMemoryUsage; | ||
_childIdleMemoryUsageLimit; | ||
_memoryUsageCheck = false; | ||
constructor(options) { | ||
super(options); | ||
this._options = options; | ||
this._request = null; | ||
this._stdout = null; | ||
this._stderr = null; | ||
this._childWorkerPath = options.childWorkerPath || require.resolve('./threadChild'); | ||
this._childIdleMemoryUsage = null; | ||
this._childIdleMemoryUsageLimit = options.idleMemoryLimit || null; | ||
this.initialize(); | ||
} | ||
initialize() { | ||
if (this.state === _types.WorkerStates.OUT_OF_MEMORY || this.state === _types.WorkerStates.SHUTTING_DOWN || this.state === _types.WorkerStates.SHUT_DOWN) { | ||
return; | ||
} | ||
if (this._worker) { | ||
this._worker.terminate(); | ||
} | ||
this.state = _types.WorkerStates.STARTING; | ||
this._worker = new (_worker_threads().Worker)(this._childWorkerPath, { | ||
eval: false, | ||
resourceLimits: this._options.resourceLimits, | ||
stderr: true, | ||
stdout: true, | ||
workerData: this._options.workerData, | ||
...this._options.forkOptions | ||
}); | ||
if (this._worker.stdout) { | ||
if (!this._stdout) { | ||
// We need to add a permanent stream to the merged stream to prevent it | ||
// from ending when the subprocess stream ends | ||
this._stdout = (0, _mergeStream().default)(this._getFakeStream()); | ||
} | ||
this._stdout.add(this._worker.stdout); | ||
} | ||
if (this._worker.stderr) { | ||
if (!this._stderr) { | ||
// We need to add a permanent stream to the merged stream to prevent it | ||
// from ending when the subprocess stream ends | ||
this._stderr = (0, _mergeStream().default)(this._getFakeStream()); | ||
} | ||
this._stderr.add(this._worker.stderr); | ||
} | ||
// This can be useful for debugging. | ||
if (!(this._options.silent ?? true)) { | ||
this._worker.stdout.setEncoding('utf8'); | ||
// eslint-disable-next-line no-console | ||
this._worker.stdout.on('data', console.log); | ||
this._worker.stderr.setEncoding('utf8'); | ||
this._worker.stderr.on('data', console.error); | ||
} | ||
this._worker.on('message', this._onMessage.bind(this)); | ||
this._worker.on('exit', this._onExit.bind(this)); | ||
this._worker.on('error', this._onError.bind(this)); | ||
this._worker.postMessage([_types.CHILD_MESSAGE_INITIALIZE, false, this._options.workerPath, this._options.setupArgs, String(this._options.workerId + 1) // 0-indexed workerId, 1-indexed JEST_WORKER_ID | ||
]); | ||
this._retries++; | ||
// If we exceeded the amount of retries, we will emulate an error reply | ||
// coming from the child. This avoids code duplication related with cleaning | ||
// the queue, and scheduling the next call. | ||
if (this._retries > this._options.maxRetries) { | ||
const error = new Error('Call retries were exceeded'); | ||
this._onMessage([_types.PARENT_MESSAGE_CLIENT_ERROR, error.name, error.message, error.stack, { | ||
type: 'WorkerError' | ||
}]); | ||
} | ||
this.state = _types.WorkerStates.OK; | ||
if (this._resolveWorkerReady) { | ||
this._resolveWorkerReady(); | ||
} | ||
} | ||
_onError(error) { | ||
if (error.message.includes('heap out of memory')) { | ||
this.state = _types.WorkerStates.OUT_OF_MEMORY; | ||
// Threads don't behave like processes, they don't crash when they run out of | ||
// memory. But for consistency we want them to behave like processes so we call | ||
// terminate to simulate a crash happening that was not planned | ||
this._worker.terminate(); | ||
} | ||
} | ||
_onMessage(response) { | ||
// Ignore messages not intended for us | ||
if (!Array.isArray(response)) return; | ||
let error; | ||
switch (response[0]) { | ||
case _types.PARENT_MESSAGE_OK: | ||
this._onProcessEnd(null, response[1]); | ||
break; | ||
case _types.PARENT_MESSAGE_CLIENT_ERROR: | ||
error = response[4]; | ||
if (error != null && typeof error === 'object') { | ||
const extra = error; | ||
// @ts-expect-error: no index | ||
const NativeCtor = globalThis[response[1]]; | ||
const Ctor = typeof NativeCtor === 'function' ? NativeCtor : Error; | ||
error = new Ctor(response[2]); | ||
error.type = response[1]; | ||
error.stack = response[3]; | ||
for (const key in extra) { | ||
// @ts-expect-error: no index | ||
error[key] = extra[key]; | ||
} | ||
} | ||
this._onProcessEnd(error, null); | ||
break; | ||
case _types.PARENT_MESSAGE_SETUP_ERROR: | ||
error = new Error(`Error when calling setup: ${response[2]}`); | ||
// @ts-expect-error: adding custom properties to errors. | ||
error.type = response[1]; | ||
error.stack = response[3]; | ||
this._onProcessEnd(error, null); | ||
break; | ||
case _types.PARENT_MESSAGE_CUSTOM: | ||
this._onCustomMessage(response[1]); | ||
break; | ||
case _types.PARENT_MESSAGE_MEM_USAGE: | ||
this._childIdleMemoryUsage = response[1]; | ||
if (this._resolveMemoryUsage) { | ||
this._resolveMemoryUsage(response[1]); | ||
this._resolveMemoryUsage = undefined; | ||
this._memoryUsagePromise = undefined; | ||
} | ||
this._performRestartIfRequired(); | ||
break; | ||
default: | ||
// Ignore messages not intended for us | ||
break; | ||
} | ||
} | ||
_onExit(exitCode) { | ||
this._workerReadyPromise = undefined; | ||
this._resolveWorkerReady = undefined; | ||
if (exitCode !== 0 && this.state === _types.WorkerStates.OUT_OF_MEMORY) { | ||
this._onProcessEnd(new Error('Jest worker ran out of memory and crashed'), null); | ||
this._shutdown(); | ||
} else if (exitCode !== 0 && this.state !== _types.WorkerStates.SHUTTING_DOWN && this.state !== _types.WorkerStates.SHUT_DOWN || this.state === _types.WorkerStates.RESTARTING) { | ||
this.initialize(); | ||
if (this._request) { | ||
this._worker.postMessage(this._request); | ||
} | ||
} else { | ||
// If the worker thread exits while a request is still pending, throw an | ||
// error. This is unexpected and tests may not have run to completion. | ||
const isRequestStillPending = !!this._request; | ||
if (isRequestStillPending) { | ||
this._onProcessEnd(new Error('A Jest worker thread exited unexpectedly before finishing tests for an unknown reason. One of the ways this can happen is if process.exit() was called in testing code.'), null); | ||
} | ||
this._shutdown(); | ||
} | ||
} | ||
waitForExit() { | ||
return this._exitPromise; | ||
} | ||
forceExit() { | ||
this.state = _types.WorkerStates.SHUTTING_DOWN; | ||
this._worker.terminate(); | ||
} | ||
send(request, onProcessStart, onProcessEnd, onCustomMessage) { | ||
onProcessStart(this); | ||
this._onProcessEnd = (...args) => { | ||
const hasRequest = !!this._request; | ||
// Clean the request to avoid sending past requests to workers that fail | ||
// while waiting for a new request (timers, unhandled rejections...) | ||
this._request = null; | ||
if (this._childIdleMemoryUsageLimit && hasRequest) { | ||
this.checkMemoryUsage(); | ||
} | ||
const res = onProcessEnd?.(...args); | ||
// Clean up the reference so related closures can be garbage collected. | ||
onProcessEnd = null; | ||
return res; | ||
}; | ||
this._onCustomMessage = (...arg) => onCustomMessage(...arg); | ||
this._request = request; | ||
this._retries = 0; | ||
this._worker.postMessage(request); | ||
} | ||
getWorkerId() { | ||
return this._options.workerId; | ||
} | ||
getStdout() { | ||
return this._stdout; | ||
} | ||
getStderr() { | ||
return this._stderr; | ||
} | ||
_performRestartIfRequired() { | ||
if (this._memoryUsageCheck) { | ||
this._memoryUsageCheck = false; | ||
let limit = this._childIdleMemoryUsageLimit; | ||
// TODO: At some point it would make sense to make use of | ||
// stringToBytes found in jest-config, however as this | ||
// package does not have any dependencies on an other jest | ||
// packages that can wait until some other time. | ||
if (limit && limit > 0 && limit <= 1) { | ||
limit = Math.floor((0, _os().totalmem)() * limit); | ||
} else if (limit) { | ||
limit = Math.floor(limit); | ||
} | ||
if (limit && this._childIdleMemoryUsage && this._childIdleMemoryUsage > limit) { | ||
this.state = _types.WorkerStates.RESTARTING; | ||
this._worker.terminate(); | ||
} | ||
} | ||
} | ||
/** | ||
* Gets the last reported memory usage. | ||
* | ||
* @returns Memory usage in bytes. | ||
*/ | ||
getMemoryUsage() { | ||
if (!this._memoryUsagePromise) { | ||
let rejectCallback; | ||
const promise = new Promise((resolve, reject) => { | ||
this._resolveMemoryUsage = resolve; | ||
rejectCallback = reject; | ||
}); | ||
this._memoryUsagePromise = promise; | ||
if (!this._worker.threadId) { | ||
rejectCallback(new Error('Child process is not running.')); | ||
this._memoryUsagePromise = undefined; | ||
this._resolveMemoryUsage = undefined; | ||
return promise; | ||
} | ||
try { | ||
this._worker.postMessage([_types.CHILD_MESSAGE_MEM_USAGE]); | ||
} catch (err) { | ||
this._memoryUsagePromise = undefined; | ||
this._resolveMemoryUsage = undefined; | ||
rejectCallback(err); | ||
} | ||
return promise; | ||
} | ||
return this._memoryUsagePromise; | ||
} | ||
/** | ||
* Gets updated memory usage and restarts if required | ||
*/ | ||
checkMemoryUsage() { | ||
if (this._childIdleMemoryUsageLimit) { | ||
this._memoryUsageCheck = true; | ||
this._worker.postMessage([_types.CHILD_MESSAGE_MEM_USAGE]); | ||
} else { | ||
console.warn('Memory usage of workers can only be checked if a limit is set'); | ||
} | ||
} | ||
/** | ||
* Gets the thread id of the worker. | ||
* | ||
* @returns Thread id. | ||
*/ | ||
getWorkerSystemId() { | ||
return this._worker.threadId; | ||
} | ||
isWorkerRunning() { | ||
return this._worker.threadId >= 0; | ||
} | ||
} | ||
exports.Z = ExperimentalWorker; | ||
/***/ }), | ||
/***/ "./src/workers/WorkerAbstract.ts": | ||
/***/ ((__unused_webpack_module, exports, __webpack_require__) => { | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
exports["default"] = void 0; | ||
function _stream() { | ||
const data = require("stream"); | ||
_stream = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
var _types = __webpack_require__("./src/types.ts"); | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
class WorkerAbstract extends _stream().EventEmitter { | ||
/** | ||
* DO NOT WRITE TO THIS DIRECTLY. | ||
* Use this.state getter/setters so events are emitted correctly. | ||
*/ | ||
#state = _types.WorkerStates.STARTING; | ||
_fakeStream = null; | ||
_exitPromise; | ||
_resolveExitPromise; | ||
_workerReadyPromise; | ||
_resolveWorkerReady; | ||
get state() { | ||
return this.#state; | ||
} | ||
set state(value) { | ||
if (this.#state !== value) { | ||
const oldState = this.#state; | ||
this.#state = value; | ||
this.emit(_types.WorkerEvents.STATE_CHANGE, value, oldState); | ||
} | ||
} | ||
constructor(options) { | ||
super(); | ||
if (typeof options.on === 'object') { | ||
for (const [event, handlers] of Object.entries(options.on)) { | ||
// Can't do Array.isArray on a ReadonlyArray<T>. | ||
// https://github.com/microsoft/TypeScript/issues/17002 | ||
if (typeof handlers === 'function') { | ||
super.on(event, handlers); | ||
} else { | ||
for (const handler of handlers) { | ||
super.on(event, handler); | ||
} | ||
} | ||
} | ||
} | ||
this._exitPromise = new Promise(resolve => { | ||
this._resolveExitPromise = resolve; | ||
}); | ||
this._exitPromise.then(() => { | ||
this.state = _types.WorkerStates.SHUT_DOWN; | ||
}); | ||
} | ||
/** | ||
* Wait for the worker child process to be ready to handle requests. | ||
* | ||
* @returns Promise which resolves when ready. | ||
*/ | ||
waitForWorkerReady() { | ||
if (!this._workerReadyPromise) { | ||
this._workerReadyPromise = new Promise((resolve, reject) => { | ||
let settled = false; | ||
let to; | ||
switch (this.state) { | ||
case _types.WorkerStates.OUT_OF_MEMORY: | ||
case _types.WorkerStates.SHUTTING_DOWN: | ||
case _types.WorkerStates.SHUT_DOWN: | ||
settled = true; | ||
reject(new Error(`Worker state means it will never be ready: ${this.state}`)); | ||
break; | ||
case _types.WorkerStates.STARTING: | ||
case _types.WorkerStates.RESTARTING: | ||
this._resolveWorkerReady = () => { | ||
settled = true; | ||
resolve(); | ||
if (to) { | ||
clearTimeout(to); | ||
} | ||
}; | ||
break; | ||
case _types.WorkerStates.OK: | ||
settled = true; | ||
resolve(); | ||
break; | ||
} | ||
if (!settled) { | ||
to = setTimeout(() => { | ||
if (!settled) { | ||
reject(new Error('Timeout starting worker')); | ||
} | ||
}, 500); | ||
} | ||
}); | ||
} | ||
return this._workerReadyPromise; | ||
} | ||
/** | ||
* Used to shut down the current working instance once the children have been | ||
* killed off. | ||
*/ | ||
_shutdown() { | ||
this.state === _types.WorkerStates.SHUT_DOWN; | ||
// End the permanent stream so the merged stream end too | ||
if (this._fakeStream) { | ||
this._fakeStream.end(); | ||
this._fakeStream = null; | ||
} | ||
this._resolveExitPromise(); | ||
} | ||
_getFakeStream() { | ||
if (!this._fakeStream) { | ||
this._fakeStream = new (_stream().PassThrough)(); | ||
} | ||
return this._fakeStream; | ||
} | ||
} | ||
exports["default"] = WorkerAbstract; | ||
/***/ }), | ||
/***/ "./src/workers/messageParent.ts": | ||
/***/ ((__unused_webpack_module, exports, __webpack_require__) => { | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
exports["default"] = messageParent; | ||
function _worker_threads() { | ||
const data = require("worker_threads"); | ||
_worker_threads = function () { | ||
return data; | ||
}; | ||
return data; | ||
} | ||
var _types = __webpack_require__("./src/types.ts"); | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
*/ | ||
function messageParent(message, parentProcess = process) { | ||
if (!_worker_threads().isMainThread && _worker_threads().parentPort != null) { | ||
_worker_threads().parentPort.postMessage([_types.PARENT_MESSAGE_CUSTOM, message]); | ||
} else if (typeof parentProcess.send === 'function') { | ||
parentProcess.send([_types.PARENT_MESSAGE_CUSTOM, message]); | ||
} else { | ||
throw new Error('"messageParent" can only be used inside a worker'); | ||
} | ||
} | ||
/***/ }) | ||
/******/ }); | ||
/************************************************************************/ | ||
/******/ // The module cache | ||
/******/ var __webpack_module_cache__ = {}; | ||
/******/ | ||
/******/ // The require function | ||
/******/ function __webpack_require__(moduleId) { | ||
/******/ // Check if module is in cache | ||
/******/ var cachedModule = __webpack_module_cache__[moduleId]; | ||
/******/ if (cachedModule !== undefined) { | ||
/******/ return cachedModule.exports; | ||
/******/ } | ||
/******/ // Create a new module (and put it into the cache) | ||
/******/ var module = __webpack_module_cache__[moduleId] = { | ||
/******/ // no module.id needed | ||
/******/ // no module.loaded needed | ||
/******/ exports: {} | ||
/******/ }; | ||
/******/ | ||
/******/ // Execute the module function | ||
/******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__); | ||
/******/ | ||
/******/ // Return the exports of the module | ||
/******/ return module.exports; | ||
/******/ } | ||
/******/ | ||
/************************************************************************/ | ||
var __webpack_exports__ = {}; | ||
// This entry need to be wrapped in an IIFE because it need to be isolated against other modules in the chunk. | ||
(() => { | ||
var exports = __webpack_exports__; | ||
Object.defineProperty(exports, "__esModule", ({ | ||
value: true | ||
})); | ||
Object.defineProperty(exports, "FifoQueue", ({ | ||
enumerable: true, | ||
@@ -11,4 +1619,4 @@ get: function () { | ||
} | ||
}); | ||
Object.defineProperty(exports, 'PriorityQueue', { | ||
})); | ||
Object.defineProperty(exports, "PriorityQueue", ({ | ||
enumerable: true, | ||
@@ -18,5 +1626,5 @@ get: function () { | ||
} | ||
}); | ||
})); | ||
exports.Worker = void 0; | ||
Object.defineProperty(exports, 'messageParent', { | ||
Object.defineProperty(exports, "messageParent", ({ | ||
enumerable: true, | ||
@@ -26,5 +1634,5 @@ get: function () { | ||
} | ||
}); | ||
})); | ||
function _os() { | ||
const data = require('os'); | ||
const data = require("os"); | ||
_os = function () { | ||
@@ -36,3 +1644,3 @@ return data; | ||
function _path() { | ||
const data = require('path'); | ||
const data = require("path"); | ||
_path = function () { | ||
@@ -44,3 +1652,3 @@ return data; | ||
function _url() { | ||
const data = require('url'); | ||
const data = require("url"); | ||
_url = function () { | ||
@@ -51,10 +1659,8 @@ return data; | ||
} | ||
var _Farm = _interopRequireDefault(require('./Farm')); | ||
var _WorkerPool = _interopRequireDefault(require('./WorkerPool')); | ||
var _PriorityQueue = _interopRequireDefault(require('./PriorityQueue')); | ||
var _FifoQueue = _interopRequireDefault(require('./FifoQueue')); | ||
var _messageParent = _interopRequireDefault(require('./workers/messageParent')); | ||
function _interopRequireDefault(obj) { | ||
return obj && obj.__esModule ? obj : {default: obj}; | ||
} | ||
var _Farm = _interopRequireDefault(__webpack_require__("./src/Farm.ts")); | ||
var _WorkerPool = _interopRequireDefault(__webpack_require__("./src/WorkerPool.ts")); | ||
var _PriorityQueue = _interopRequireDefault(__webpack_require__("./src/PriorityQueue.ts")); | ||
var _FifoQueue = _interopRequireDefault(__webpack_require__("./src/FifoQueue.ts")); | ||
var _messageParent = _interopRequireDefault(__webpack_require__("./src/workers/messageParent.ts")); | ||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } | ||
/** | ||
@@ -73,5 +1679,3 @@ * Copyright (c) Meta Platforms, Inc. and affiliates. | ||
const module = require(workerPath); | ||
exposedMethods = Object.keys(module).filter( | ||
name => typeof module[name] === 'function' | ||
); | ||
exposedMethods = Object.keys(module).filter(name => typeof module[name] === 'function'); | ||
if (typeof module === 'function') { | ||
@@ -84,5 +1688,3 @@ exposedMethods = [...exposedMethods, 'default']; | ||
function getNumberOfCpus() { | ||
return typeof _os().availableParallelism === 'function' | ||
? (0, _os().availableParallelism)() | ||
: (0, _os().cpus)().length; | ||
return typeof _os().availableParallelism === 'function' ? (0, _os().availableParallelism)() : (0, _os().cpus)().length; | ||
} | ||
@@ -138,4 +1740,3 @@ | ||
maxRetries: this._options.maxRetries ?? 3, | ||
numWorkers: | ||
this._options.numWorkers ?? Math.max(getNumberOfCpus() - 1, 1), | ||
numWorkers: this._options.numWorkers ?? Math.max(getNumberOfCpus() - 1, 1), | ||
resourceLimits: this._options.resourceLimits ?? {}, | ||
@@ -145,24 +1746,17 @@ setupArgs: this._options.setupArgs ?? [] | ||
if (this._options.WorkerPool) { | ||
this._workerPool = new this._options.WorkerPool( | ||
workerPath, | ||
workerPoolOptions | ||
); | ||
this._workerPool = new this._options.WorkerPool(workerPath, workerPoolOptions); | ||
} else { | ||
this._workerPool = new _WorkerPool.default(workerPath, workerPoolOptions); | ||
} | ||
this._farm = new _Farm.default( | ||
workerPoolOptions.numWorkers, | ||
this._workerPool.send.bind(this._workerPool), | ||
{ | ||
computeWorkerKey: this._options.computeWorkerKey, | ||
taskQueue: this._options.taskQueue, | ||
workerSchedulingPolicy: this._options.workerSchedulingPolicy | ||
} | ||
); | ||
this._farm = new _Farm.default(workerPoolOptions.numWorkers, this._workerPool.send.bind(this._workerPool), { | ||
computeWorkerKey: this._options.computeWorkerKey, | ||
taskQueue: this._options.taskQueue, | ||
workerSchedulingPolicy: this._options.workerSchedulingPolicy | ||
}); | ||
this._bindExposedWorkerMethods(workerPath, this._options); | ||
} | ||
_bindExposedWorkerMethods(workerPath, options) { | ||
getExposedMethods(workerPath, options).forEach(name => { | ||
for (const name of getExposedMethods(workerPath, options)) { | ||
if (name.startsWith('_')) { | ||
return; | ||
continue; | ||
} | ||
@@ -177,3 +1771,3 @@ | ||
this[name] = this._callFunctionWithArgs.bind(this, name); | ||
}); | ||
} | ||
} | ||
@@ -204,1 +1798,6 @@ _callFunctionWithArgs(method, ...args) { | ||
exports.Worker = Worker; | ||
})(); | ||
module.exports = __webpack_exports__; | ||
/******/ })() | ||
; |
{ | ||
"name": "jest-worker", | ||
"version": "29.7.0", | ||
"version": "30.0.0-alpha.1", | ||
"repository": { | ||
@@ -15,2 +15,4 @@ "type": "git", | ||
"types": "./build/index.d.ts", | ||
"require": "./build/index.js", | ||
"import": "./build/index.mjs", | ||
"default": "./build/index.js" | ||
@@ -22,3 +24,3 @@ }, | ||
"@types/node": "*", | ||
"jest-util": "^29.7.0", | ||
"jest-util": "30.0.0-alpha.1", | ||
"merge-stream": "^2.0.0", | ||
@@ -33,8 +35,8 @@ "supports-color": "^8.0.0" | ||
"get-stream": "^6.0.0", | ||
"jest-leak-detector": "^29.7.0", | ||
"tsd-lite": "^0.7.0", | ||
"jest-leak-detector": "30.0.0-alpha.1", | ||
"tsd-lite": "^0.8.0", | ||
"worker-farm": "^1.6.0" | ||
}, | ||
"engines": { | ||
"node": "^14.15.0 || ^16.10.0 || >=18.0.0" | ||
"node": "^16.10.0 || ^18.12.0 || >=20.0.0" | ||
}, | ||
@@ -44,3 +46,3 @@ "publishConfig": { | ||
}, | ||
"gitHead": "4e56991693da7cd4c3730dc3579a1dd1403ee630" | ||
"gitHead": "d005cb2505c041583e0c5636d006e08666a54b63" | ||
} |
@@ -236,3 +236,3 @@ # jest-worker | ||
// Wait a bit. | ||
await sleep(10000); | ||
await sleep(10_000); | ||
@@ -239,0 +239,0 @@ // Transform the same file again. Will immediately return because the |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
100096
2469
8
2
2
+ Added@jest/schemas@30.0.0-alpha.1(transitive)
+ Added@jest/types@30.0.0-alpha.1(transitive)
+ Added@sinclair/typebox@0.31.28(transitive)
+ Addedci-info@4.0.0(transitive)
+ Addedjest-util@30.0.0-alpha.1(transitive)
- Removed@jest/schemas@29.6.3(transitive)
- Removed@jest/types@29.6.3(transitive)
- Removed@sinclair/typebox@0.27.8(transitive)
- Removedci-info@3.9.0(transitive)
- Removedjest-util@29.7.0(transitive)
Updatedjest-util@30.0.0-alpha.1