@d-fischer/rate-limiter
Advanced tools
Comparing version 0.7.5 to 1.0.0
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.CustomError = void 0; | ||
var tslib_1 = require("tslib"); | ||
/** @private */ | ||
var CustomError = /** @class */ (function (_super) { | ||
tslib_1.__extends(CustomError, _super); | ||
function CustomError() { | ||
var params = []; | ||
for (var _i = 0; _i < arguments.length; _i++) { | ||
params[_i] = arguments[_i]; | ||
} | ||
var _newTarget = this.constructor; | ||
var _this = this; | ||
class CustomError extends Error { | ||
constructor(...params) { | ||
var _a; | ||
// @ts-ignore | ||
_this = _super.apply(this, tslib_1.__spreadArray([], tslib_1.__read(params), false)) || this; | ||
super(...params); | ||
// restore prototype chain | ||
Object.setPrototypeOf(_this, _newTarget.prototype); | ||
Object.setPrototypeOf(this, new.target.prototype); | ||
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition | ||
(_a = Error.captureStackTrace) === null || _a === void 0 ? void 0 : _a.call(Error, _this, _newTarget.constructor); | ||
return _this; | ||
(_a = Error.captureStackTrace) === null || _a === void 0 ? void 0 : _a.call(Error, this, new.target.constructor); | ||
} | ||
Object.defineProperty(CustomError.prototype, "name", { | ||
get: function () { | ||
return this.constructor.name; | ||
}, | ||
enumerable: false, | ||
configurable: true | ||
}); | ||
return CustomError; | ||
}(Error)); | ||
get name() { | ||
return this.constructor.name; | ||
} | ||
} | ||
exports.CustomError = CustomError; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.RateLimitReachedError = void 0; | ||
var tslib_1 = require("tslib"); | ||
var CustomError_1 = require("./CustomError"); | ||
var RateLimitReachedError = /** @class */ (function (_super) { | ||
tslib_1.__extends(RateLimitReachedError, _super); | ||
function RateLimitReachedError() { | ||
return _super !== null && _super.apply(this, arguments) || this; | ||
} | ||
return RateLimitReachedError; | ||
}(CustomError_1.CustomError)); | ||
const CustomError_1 = require("./CustomError"); | ||
class RateLimitReachedError extends CustomError_1.CustomError { | ||
} | ||
exports.RateLimitReachedError = RateLimitReachedError; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.RetryAfterError = void 0; | ||
var tslib_1 = require("tslib"); | ||
var CustomError_1 = require("./CustomError"); | ||
var RetryAfterError = /** @class */ (function (_super) { | ||
tslib_1.__extends(RetryAfterError, _super); | ||
function RetryAfterError(after) { | ||
var _this = _super.call(this, "Need to retry after ".concat(after, " ms")) || this; | ||
_this._retryAt = Date.now() + after; | ||
return _this; | ||
const CustomError_1 = require("./CustomError"); | ||
class RetryAfterError extends CustomError_1.CustomError { | ||
constructor(after) { | ||
super(`Need to retry after ${after} ms`); | ||
this._retryAt = Date.now() + after; | ||
} | ||
Object.defineProperty(RetryAfterError.prototype, "retryAt", { | ||
get: function () { | ||
return this._retryAt; | ||
}, | ||
enumerable: false, | ||
configurable: true | ||
}); | ||
return RetryAfterError; | ||
}(CustomError_1.CustomError)); | ||
get retryAt() { | ||
return this._retryAt; | ||
} | ||
} | ||
exports.RetryAfterError = RetryAfterError; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.NullRateLimiter = void 0; | ||
var tslib_1 = require("tslib"); | ||
var NullRateLimiter = /** @class */ (function () { | ||
function NullRateLimiter(_callback) { | ||
class NullRateLimiter { | ||
constructor(_callback) { | ||
this._callback = _callback; | ||
} | ||
NullRateLimiter.prototype.request = function (req) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
return tslib_1.__generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: return [4 /*yield*/, this._callback(req)]; | ||
case 1: return [2 /*return*/, _a.sent()]; | ||
} | ||
}); | ||
}); | ||
}; | ||
NullRateLimiter.prototype.clear = function () { | ||
async request(req) { | ||
return await this._callback(req); | ||
} | ||
clear() { | ||
// noop | ||
}; | ||
NullRateLimiter.prototype.pause = function () { | ||
} | ||
pause() { | ||
// noop | ||
}; | ||
NullRateLimiter.prototype.resume = function () { | ||
} | ||
resume() { | ||
// noop | ||
}; | ||
return NullRateLimiter; | ||
}()); | ||
} | ||
} | ||
exports.NullRateLimiter = NullRateLimiter; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.PartitionedRateLimiter = void 0; | ||
var tslib_1 = require("tslib"); | ||
var ResponseBasedRateLimiter_1 = require("./ResponseBasedRateLimiter"); | ||
var PartitionedRateLimiter = /** @class */ (function () { | ||
function PartitionedRateLimiter(options) { | ||
const ResponseBasedRateLimiter_1 = require("./ResponseBasedRateLimiter"); | ||
class PartitionedRateLimiter { | ||
constructor(options) { | ||
this._children = new Map(); | ||
@@ -13,71 +12,29 @@ this._paused = false; | ||
} | ||
PartitionedRateLimiter.prototype.request = function (req, options) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
var partitionKey, partitionChild; | ||
return tslib_1.__generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: | ||
partitionKey = this._partitionKeyCallback(req); | ||
partitionChild = this._getChild(partitionKey); | ||
return [4 /*yield*/, partitionChild.request(req, options)]; | ||
case 1: return [2 /*return*/, _a.sent()]; | ||
} | ||
}); | ||
}); | ||
}; | ||
PartitionedRateLimiter.prototype.clear = function () { | ||
var e_1, _a; | ||
try { | ||
for (var _b = tslib_1.__values(this._children.values()), _c = _b.next(); !_c.done; _c = _b.next()) { | ||
var child = _c.value; | ||
child.clear(); | ||
} | ||
async request(req, options) { | ||
const partitionKey = this._partitionKeyCallback(req); | ||
const partitionChild = this._getChild(partitionKey); | ||
return await partitionChild.request(req, options); | ||
} | ||
clear() { | ||
for (const child of this._children.values()) { | ||
child.clear(); | ||
} | ||
catch (e_1_1) { e_1 = { error: e_1_1 }; } | ||
finally { | ||
try { | ||
if (_c && !_c.done && (_a = _b.return)) _a.call(_b); | ||
} | ||
finally { if (e_1) throw e_1.error; } | ||
} | ||
}; | ||
PartitionedRateLimiter.prototype.pause = function () { | ||
var e_2, _a; | ||
} | ||
pause() { | ||
this._paused = true; | ||
try { | ||
for (var _b = tslib_1.__values(this._children.values()), _c = _b.next(); !_c.done; _c = _b.next()) { | ||
var child = _c.value; | ||
child.pause(); | ||
} | ||
for (const child of this._children.values()) { | ||
child.pause(); | ||
} | ||
catch (e_2_1) { e_2 = { error: e_2_1 }; } | ||
finally { | ||
try { | ||
if (_c && !_c.done && (_a = _b.return)) _a.call(_b); | ||
} | ||
finally { if (e_2) throw e_2.error; } | ||
} | ||
}; | ||
PartitionedRateLimiter.prototype.resume = function () { | ||
var e_3, _a; | ||
} | ||
resume() { | ||
this._paused = false; | ||
try { | ||
for (var _b = tslib_1.__values(this._children.values()), _c = _b.next(); !_c.done; _c = _b.next()) { | ||
var child = _c.value; | ||
child.resume(); | ||
} | ||
for (const child of this._children.values()) { | ||
child.resume(); | ||
} | ||
catch (e_3_1) { e_3 = { error: e_3_1 }; } | ||
finally { | ||
try { | ||
if (_c && !_c.done && (_a = _b.return)) _a.call(_b); | ||
} | ||
finally { if (e_3) throw e_3.error; } | ||
} | ||
}; | ||
PartitionedRateLimiter.prototype.getChildStats = function (partitionKey) { | ||
} | ||
getChildStats(partitionKey) { | ||
if (!this._children.has(partitionKey)) { | ||
return null; | ||
} | ||
var child = this._children.get(partitionKey); | ||
const child = this._children.get(partitionKey); | ||
if (!(child instanceof ResponseBasedRateLimiter_1.ResponseBasedRateLimiter)) { | ||
@@ -87,8 +44,8 @@ return null; | ||
return child.stats; | ||
}; | ||
PartitionedRateLimiter.prototype._getChild = function (partitionKey) { | ||
} | ||
_getChild(partitionKey) { | ||
if (this._children.has(partitionKey)) { | ||
return this._children.get(partitionKey); | ||
} | ||
var result = this._createChildCallback(partitionKey); | ||
const result = this._createChildCallback(partitionKey); | ||
if (this._paused) { | ||
@@ -99,5 +56,4 @@ result.pause(); | ||
return result; | ||
}; | ||
return PartitionedRateLimiter; | ||
}()); | ||
} | ||
} | ||
exports.PartitionedRateLimiter = PartitionedRateLimiter; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.PartitionedTimeBasedRateLimiter = void 0; | ||
var tslib_1 = require("tslib"); | ||
var logger_1 = require("@d-fischer/logger"); | ||
var RateLimitReachedError_1 = require("../errors/RateLimitReachedError"); | ||
var PartitionedTimeBasedRateLimiter = /** @class */ (function () { | ||
function PartitionedTimeBasedRateLimiter(_a) { | ||
var logger = _a.logger, bucketSize = _a.bucketSize, timeFrame = _a.timeFrame, doRequest = _a.doRequest, getPartitionKey = _a.getPartitionKey; | ||
const logger_1 = require("@d-fischer/logger"); | ||
const RateLimitReachedError_1 = require("../errors/RateLimitReachedError"); | ||
class PartitionedTimeBasedRateLimiter { | ||
constructor({ logger, bucketSize, timeFrame, doRequest, getPartitionKey }) { | ||
this._partitionedQueue = new Map(); | ||
this._usedFromBucket = new Map(); | ||
this._paused = false; | ||
this._logger = (0, logger_1.createLogger)(tslib_1.__assign({ name: 'rate-limiter', emoji: true }, logger)); | ||
this._logger = (0, logger_1.createLogger)({ name: 'rate-limiter', emoji: true, ...logger }); | ||
this._bucketSize = bucketSize; | ||
@@ -19,141 +17,105 @@ this._timeFrame = timeFrame; | ||
} | ||
PartitionedTimeBasedRateLimiter.prototype.request = function (req, options) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
var _this = this; | ||
return tslib_1.__generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: return [4 /*yield*/, new Promise(function (resolve, reject) { | ||
var _a, _b; | ||
var reqSpec = { | ||
req: req, | ||
resolve: resolve, | ||
reject: reject, | ||
limitReachedBehavior: (_a = options === null || options === void 0 ? void 0 : options.limitReachedBehavior) !== null && _a !== void 0 ? _a : 'enqueue' | ||
}; | ||
var partitionKey = _this._partitionKeyCallback(req); | ||
var usedFromBucket = (_b = _this._usedFromBucket.get(partitionKey)) !== null && _b !== void 0 ? _b : 0; | ||
if (usedFromBucket >= _this._bucketSize || _this._paused) { | ||
switch (reqSpec.limitReachedBehavior) { | ||
case 'enqueue': { | ||
var queue = _this._getPartitionedQueue(partitionKey); | ||
queue.push(reqSpec); | ||
if (usedFromBucket + queue.length >= _this._bucketSize) { | ||
_this._logger.warn("Rate limit of ".concat(_this._bucketSize, " for ").concat(partitionKey ? "partition ".concat(partitionKey) : 'default partition', " was reached, waiting for ").concat(_this._paused ? 'the limiter to be unpaused' : 'a free bucket entry', "; queue size is ").concat(queue.length)); | ||
} | ||
else { | ||
_this._logger.info("Enqueueing request for ".concat(partitionKey ? "partition ".concat(partitionKey) : 'default partition', " because the rate limiter is paused; queue size is ").concat(queue.length)); | ||
} | ||
break; | ||
} | ||
case 'null': { | ||
reqSpec.resolve(null); | ||
if (_this._paused) { | ||
_this._logger.info("Returning null for request for ".concat(partitionKey ? "partition ".concat(partitionKey) : 'default partition', " because the rate limiter is paused")); | ||
} | ||
else { | ||
_this._logger.warn("Rate limit of ".concat(_this._bucketSize, " for ").concat(partitionKey ? "partition ".concat(partitionKey) : 'default partition', " was reached, dropping request and returning null")); | ||
} | ||
break; | ||
} | ||
case 'throw': { | ||
reqSpec.reject(new RateLimitReachedError_1.RateLimitReachedError("Request dropped because ".concat(_this._paused | ||
? 'the rate limiter is paused' | ||
: "the rate limit for ".concat(partitionKey ? "partition ".concat(partitionKey) : 'default partition', " was reached")))); | ||
break; | ||
} | ||
default: { | ||
throw new Error('this should never happen'); | ||
} | ||
} | ||
} | ||
else { | ||
void _this._runRequest(reqSpec, partitionKey); | ||
} | ||
})]; | ||
case 1: return [2 /*return*/, _a.sent()]; | ||
async request(req, options) { | ||
return await new Promise((resolve, reject) => { | ||
var _a, _b; | ||
const reqSpec = { | ||
req, | ||
resolve, | ||
reject, | ||
limitReachedBehavior: (_a = options === null || options === void 0 ? void 0 : options.limitReachedBehavior) !== null && _a !== void 0 ? _a : 'enqueue' | ||
}; | ||
const partitionKey = this._partitionKeyCallback(req); | ||
const usedFromBucket = (_b = this._usedFromBucket.get(partitionKey)) !== null && _b !== void 0 ? _b : 0; | ||
if (usedFromBucket >= this._bucketSize || this._paused) { | ||
switch (reqSpec.limitReachedBehavior) { | ||
case 'enqueue': { | ||
const queue = this._getPartitionedQueue(partitionKey); | ||
queue.push(reqSpec); | ||
if (usedFromBucket + queue.length >= this._bucketSize) { | ||
this._logger.warn(`Rate limit of ${this._bucketSize} for ${partitionKey ? `partition ${partitionKey}` : 'default partition'} was reached, waiting for ${this._paused ? 'the limiter to be unpaused' : 'a free bucket entry'}; queue size is ${queue.length}`); | ||
} | ||
else { | ||
this._logger.info(`Enqueueing request for ${partitionKey ? `partition ${partitionKey}` : 'default partition'} because the rate limiter is paused; queue size is ${queue.length}`); | ||
} | ||
break; | ||
} | ||
case 'null': { | ||
reqSpec.resolve(null); | ||
if (this._paused) { | ||
this._logger.info(`Returning null for request for ${partitionKey ? `partition ${partitionKey}` : 'default partition'} because the rate limiter is paused`); | ||
} | ||
else { | ||
this._logger.warn(`Rate limit of ${this._bucketSize} for ${partitionKey ? `partition ${partitionKey}` : 'default partition'} was reached, dropping request and returning null`); | ||
} | ||
break; | ||
} | ||
case 'throw': { | ||
reqSpec.reject(new RateLimitReachedError_1.RateLimitReachedError(`Request dropped because ${this._paused | ||
? 'the rate limiter is paused' | ||
: `the rate limit for ${partitionKey ? `partition ${partitionKey}` : 'default partition'} was reached`}`)); | ||
break; | ||
} | ||
default: { | ||
throw new Error('this should never happen'); | ||
} | ||
} | ||
}); | ||
} | ||
else { | ||
void this._runRequest(reqSpec, partitionKey); | ||
} | ||
}); | ||
}; | ||
PartitionedTimeBasedRateLimiter.prototype.clear = function () { | ||
} | ||
clear() { | ||
this._partitionedQueue.clear(); | ||
}; | ||
PartitionedTimeBasedRateLimiter.prototype.pause = function () { | ||
} | ||
pause() { | ||
this._paused = true; | ||
}; | ||
PartitionedTimeBasedRateLimiter.prototype.resume = function () { | ||
var e_1, _a; | ||
} | ||
resume() { | ||
this._paused = false; | ||
try { | ||
for (var _b = tslib_1.__values(this._partitionedQueue.keys()), _c = _b.next(); !_c.done; _c = _b.next()) { | ||
var partitionKey = _c.value; | ||
this._runNextRequest(partitionKey); | ||
} | ||
for (const partitionKey of this._partitionedQueue.keys()) { | ||
this._runNextRequest(partitionKey); | ||
} | ||
catch (e_1_1) { e_1 = { error: e_1_1 }; } | ||
finally { | ||
try { | ||
if (_c && !_c.done && (_a = _b.return)) _a.call(_b); | ||
} | ||
finally { if (e_1) throw e_1.error; } | ||
} | ||
}; | ||
PartitionedTimeBasedRateLimiter.prototype._getPartitionedQueue = function (partitionKey) { | ||
} | ||
_getPartitionedQueue(partitionKey) { | ||
if (this._partitionedQueue.has(partitionKey)) { | ||
return this._partitionedQueue.get(partitionKey); | ||
} | ||
var newQueue = []; | ||
const newQueue = []; | ||
this._partitionedQueue.set(partitionKey, newQueue); | ||
return newQueue; | ||
}; | ||
PartitionedTimeBasedRateLimiter.prototype._runRequest = function (reqSpec, partitionKey) { | ||
} | ||
async _runRequest(reqSpec, partitionKey) { | ||
var _a; | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
var queue, req, resolve, reject, _b, e_2; | ||
var _this = this; | ||
return tslib_1.__generator(this, function (_c) { | ||
switch (_c.label) { | ||
case 0: | ||
queue = this._getPartitionedQueue(partitionKey); | ||
this._logger.debug("doing a request for ".concat(partitionKey ? "partition ".concat(partitionKey) : 'default partition', ", new queue length is ").concat(queue.length)); | ||
this._usedFromBucket.set(partitionKey, ((_a = this._usedFromBucket.get(partitionKey)) !== null && _a !== void 0 ? _a : 0) + 1); | ||
req = reqSpec.req, resolve = reqSpec.resolve, reject = reqSpec.reject; | ||
_c.label = 1; | ||
case 1: | ||
_c.trys.push([1, 3, 4, 5]); | ||
_b = resolve; | ||
return [4 /*yield*/, this._callback(req)]; | ||
case 2: | ||
_b.apply(void 0, [_c.sent()]); | ||
return [3 /*break*/, 5]; | ||
case 3: | ||
e_2 = _c.sent(); | ||
reject(e_2); | ||
return [3 /*break*/, 5]; | ||
case 4: | ||
setTimeout(function () { | ||
var newUsed = _this._usedFromBucket.get(partitionKey) - 1; | ||
_this._usedFromBucket.set(partitionKey, newUsed); | ||
if (queue.length && newUsed < _this._bucketSize) { | ||
_this._runNextRequest(partitionKey); | ||
} | ||
}, this._timeFrame); | ||
return [7 /*endfinally*/]; | ||
case 5: return [2 /*return*/]; | ||
const queue = this._getPartitionedQueue(partitionKey); | ||
this._logger.debug(`doing a request for ${partitionKey ? `partition ${partitionKey}` : 'default partition'}, new queue length is ${queue.length}`); | ||
this._usedFromBucket.set(partitionKey, ((_a = this._usedFromBucket.get(partitionKey)) !== null && _a !== void 0 ? _a : 0) + 1); | ||
const { req, resolve, reject } = reqSpec; | ||
try { | ||
resolve(await this._callback(req)); | ||
} | ||
catch (e) { | ||
reject(e); | ||
} | ||
finally { | ||
setTimeout(() => { | ||
const newUsed = this._usedFromBucket.get(partitionKey) - 1; | ||
this._usedFromBucket.set(partitionKey, newUsed); | ||
if (queue.length && newUsed < this._bucketSize) { | ||
this._runNextRequest(partitionKey); | ||
} | ||
}); | ||
}); | ||
}; | ||
PartitionedTimeBasedRateLimiter.prototype._runNextRequest = function (partitionKey) { | ||
}, this._timeFrame); | ||
} | ||
} | ||
_runNextRequest(partitionKey) { | ||
if (this._paused) { | ||
return; | ||
} | ||
var queue = this._getPartitionedQueue(partitionKey); | ||
var reqSpec = queue.shift(); | ||
const queue = this._getPartitionedQueue(partitionKey); | ||
const reqSpec = queue.shift(); | ||
if (reqSpec) { | ||
void this._runRequest(reqSpec, partitionKey); | ||
} | ||
}; | ||
return PartitionedTimeBasedRateLimiter; | ||
}()); | ||
} | ||
} | ||
exports.PartitionedTimeBasedRateLimiter = PartitionedTimeBasedRateLimiter; |
@@ -1,2 +0,2 @@ | ||
import type { LoggerOptions } from '@d-fischer/logger'; | ||
import { type LoggerOptions } from '@d-fischer/logger'; | ||
import type { RateLimiter, RateLimiterRequestOptions } from '../RateLimiter'; | ||
@@ -3,0 +3,0 @@ import { type RateLimiterStats } from '../RateLimiterStats'; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.ResponseBasedRateLimiter = void 0; | ||
var tslib_1 = require("tslib"); | ||
var logger_1 = require("@d-fischer/logger"); | ||
var promise_allsettled_1 = require("@d-fischer/promise.allsettled"); | ||
var shared_utils_1 = require("@d-fischer/shared-utils"); | ||
var RateLimitReachedError_1 = require("../errors/RateLimitReachedError"); | ||
var RetryAfterError_1 = require("../errors/RetryAfterError"); | ||
var ResponseBasedRateLimiter = /** @class */ (function () { | ||
function ResponseBasedRateLimiter(_a) { | ||
var logger = _a.logger; | ||
const logger_1 = require("@d-fischer/logger"); | ||
const shared_utils_1 = require("@d-fischer/shared-utils"); | ||
const RateLimitReachedError_1 = require("../errors/RateLimitReachedError"); | ||
const RetryAfterError_1 = require("../errors/RetryAfterError"); | ||
class ResponseBasedRateLimiter { | ||
constructor({ logger }) { | ||
this._queue = []; | ||
this._batchRunning = false; | ||
this._paused = false; | ||
this._logger = (0, logger_1.createLogger)(tslib_1.__assign({ name: 'rate-limiter', emoji: true }, logger)); | ||
this._logger = (0, logger_1.createLogger)({ name: 'rate-limiter', emoji: true, ...logger }); | ||
} | ||
ResponseBasedRateLimiter.prototype.request = function (req, options) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
var _this = this; | ||
return tslib_1.__generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: | ||
this._logger.trace('request start'); | ||
return [4 /*yield*/, new Promise(function (resolve, reject) { | ||
var _a; | ||
var reqSpec = { | ||
req: req, | ||
resolve: resolve, | ||
reject: reject, | ||
limitReachedBehavior: (_a = options === null || options === void 0 ? void 0 : options.limitReachedBehavior) !== null && _a !== void 0 ? _a : 'enqueue' | ||
}; | ||
if (_this._batchRunning || _this._nextBatchTimer || _this._paused) { | ||
_this._logger.trace("request queued batchRunning:".concat(_this._batchRunning.toString(), " hasNextBatchTimer:").concat((!!_this | ||
._nextBatchTimer).toString(), " paused:").concat(_this._paused.toString())); | ||
_this._queue.push(reqSpec); | ||
} | ||
else { | ||
void _this._runRequestBatch([reqSpec]); | ||
} | ||
})]; | ||
case 1: return [2 /*return*/, _a.sent()]; | ||
} | ||
}); | ||
async request(req, options) { | ||
this._logger.trace('request start'); | ||
return await new Promise((resolve, reject) => { | ||
var _a; | ||
const reqSpec = { | ||
req, | ||
resolve, | ||
reject, | ||
limitReachedBehavior: (_a = options === null || options === void 0 ? void 0 : options.limitReachedBehavior) !== null && _a !== void 0 ? _a : 'enqueue' | ||
}; | ||
if (this._batchRunning || !!this._nextBatchTimer || this._paused) { | ||
this._logger.trace(`request queued batchRunning:${this._batchRunning.toString()} hasNextBatchTimer:${(!!this | ||
._nextBatchTimer).toString()} paused:${this._paused.toString()}`); | ||
this._queue.push(reqSpec); | ||
} | ||
else { | ||
void this._runRequestBatch([reqSpec]); | ||
} | ||
}); | ||
}; | ||
ResponseBasedRateLimiter.prototype.clear = function () { | ||
} | ||
clear() { | ||
this._queue = []; | ||
}; | ||
ResponseBasedRateLimiter.prototype.pause = function () { | ||
} | ||
pause() { | ||
this._paused = true; | ||
}; | ||
ResponseBasedRateLimiter.prototype.resume = function () { | ||
} | ||
resume() { | ||
this._paused = false; | ||
this._runNextBatch(); | ||
}; | ||
Object.defineProperty(ResponseBasedRateLimiter.prototype, "stats", { | ||
get: function () { | ||
var _a, _b, _c, _d, _e; | ||
return { | ||
lastKnownLimit: (_b = (_a = this._parameters) === null || _a === void 0 ? void 0 : _a.limit) !== null && _b !== void 0 ? _b : null, | ||
lastKnownRemainingRequests: (_d = (_c = this._parameters) === null || _c === void 0 ? void 0 : _c.remaining) !== null && _d !== void 0 ? _d : null, | ||
lastKnownResetDate: (0, shared_utils_1.mapNullable)((_e = this._parameters) === null || _e === void 0 ? void 0 : _e.resetsAt, function (v) { return new Date(v); }) | ||
}; | ||
}, | ||
enumerable: false, | ||
configurable: true | ||
}); | ||
ResponseBasedRateLimiter.prototype._runRequestBatch = function (reqSpecs) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
var promises, settledPromises, rejectedPromises, now, retryAt, retryAfter, params, delay; | ||
var _this = this; | ||
return tslib_1.__generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: | ||
this._logger.trace("runRequestBatch start specs:".concat(reqSpecs.length)); | ||
this._batchRunning = true; | ||
if (this._parameters) { | ||
this._logger.debug("Remaining requests: ".concat(this._parameters.remaining)); | ||
} | ||
this._logger.debug("Doing ".concat(reqSpecs.length, " requests, new queue length is ").concat(this._queue.length)); | ||
promises = reqSpecs.map(function (reqSpec) { return tslib_1.__awaiter(_this, void 0, void 0, function () { | ||
var req, resolve, reject, result, retry, params, e_1; | ||
return tslib_1.__generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: | ||
req = reqSpec.req, resolve = reqSpec.resolve, reject = reqSpec.reject; | ||
_a.label = 1; | ||
case 1: | ||
_a.trys.push([1, 3, , 4]); | ||
return [4 /*yield*/, this.doRequest(req)]; | ||
case 2: | ||
result = _a.sent(); | ||
retry = this.needsToRetryAfter(result); | ||
if (retry !== null) { | ||
this._queue.unshift(reqSpec); | ||
this._logger.info("Retrying after ".concat(retry, " ms")); | ||
throw new RetryAfterError_1.RetryAfterError(retry); | ||
} | ||
params = this.getParametersFromResponse(result); | ||
resolve(result); | ||
return [2 /*return*/, params]; | ||
case 3: | ||
e_1 = _a.sent(); | ||
if (e_1 instanceof RetryAfterError_1.RetryAfterError) { | ||
throw e_1; | ||
} | ||
reject(e_1); | ||
return [2 /*return*/, undefined]; | ||
case 4: return [2 /*return*/]; | ||
} | ||
}); | ||
}); }); | ||
return [4 /*yield*/, promise_allsettled_1.default.call(Promise, promises)]; | ||
case 1: | ||
settledPromises = _a.sent(); | ||
rejectedPromises = settledPromises.filter(function (p) { return p.status === 'rejected'; }); | ||
now = Date.now(); | ||
if (rejectedPromises.length) { | ||
this._logger.trace('runRequestBatch some rejected'); | ||
retryAt = Math.max.apply(Math, tslib_1.__spreadArray([now], tslib_1.__read(rejectedPromises.map(function (p) { return p.reason.retryAt; })), false)); | ||
retryAfter = retryAt - now; | ||
this._logger.warn("Waiting for ".concat(retryAfter, " ms because the rate limit was exceeded")); | ||
this._nextBatchTimer = setTimeout(function () { | ||
_this._parameters = undefined; | ||
_this._runNextBatch(); | ||
}, retryAfter); | ||
} | ||
else { | ||
this._logger.trace('runRequestBatch none rejected'); | ||
params = settledPromises | ||
.filter(function (p) { | ||
return p.status === 'fulfilled' && p.value !== undefined; | ||
}) | ||
.map(function (p) { return p.value; }) | ||
.reduce(function (carry, v) { | ||
if (!carry) { | ||
return v; | ||
} | ||
// return v.resetsAt > carry.resetsAt ? v : carry; | ||
return v.remaining < carry.remaining ? v : carry; | ||
}, undefined); | ||
this._batchRunning = false; | ||
if (params) { | ||
this._parameters = params; | ||
if (params.resetsAt < now || params.remaining > 0) { | ||
this._logger.trace('runRequestBatch canRunMore'); | ||
this._runNextBatch(); | ||
} | ||
else { | ||
delay = params.resetsAt - now; | ||
this._logger.trace("runRequestBatch delay:".concat(delay)); | ||
this._logger.warn("Waiting for ".concat(delay, " ms because the rate limit was reached")); | ||
this._queue = this._queue.filter(function (entry) { | ||
switch (entry.limitReachedBehavior) { | ||
case 'enqueue': { | ||
return true; | ||
} | ||
case 'null': { | ||
entry.resolve(null); | ||
return false; | ||
} | ||
case 'throw': { | ||
entry.reject(new RateLimitReachedError_1.RateLimitReachedError('Request removed from queue because the rate limit was reached')); | ||
return false; | ||
} | ||
default: { | ||
throw new Error('this should never happen'); | ||
} | ||
} | ||
}); | ||
this._nextBatchTimer = setTimeout(function () { | ||
_this._parameters = undefined; | ||
_this._runNextBatch(); | ||
}, delay); | ||
} | ||
} | ||
get stats() { | ||
var _a, _b, _c, _d, _e; | ||
return { | ||
lastKnownLimit: (_b = (_a = this._parameters) === null || _a === void 0 ? void 0 : _a.limit) !== null && _b !== void 0 ? _b : null, | ||
lastKnownRemainingRequests: (_d = (_c = this._parameters) === null || _c === void 0 ? void 0 : _c.remaining) !== null && _d !== void 0 ? _d : null, | ||
lastKnownResetDate: (0, shared_utils_1.mapNullable)((_e = this._parameters) === null || _e === void 0 ? void 0 : _e.resetsAt, v => new Date(v)) | ||
}; | ||
} | ||
async _runRequestBatch(reqSpecs) { | ||
this._logger.trace(`runRequestBatch start specs:${reqSpecs.length}`); | ||
this._batchRunning = true; | ||
if (this._parameters) { | ||
this._logger.debug(`Remaining requests: ${this._parameters.remaining}`); | ||
} | ||
this._logger.debug(`Doing ${reqSpecs.length} requests, new queue length is ${this._queue.length}`); | ||
const promises = reqSpecs.map(async (reqSpec) => { | ||
const { req, resolve, reject } = reqSpec; | ||
try { | ||
const result = await this.doRequest(req); | ||
const retry = this.needsToRetryAfter(result); | ||
if (retry !== null) { | ||
this._queue.unshift(reqSpec); | ||
this._logger.info(`Retrying after ${retry} ms`); | ||
throw new RetryAfterError_1.RetryAfterError(retry); | ||
} | ||
const params = this.getParametersFromResponse(result); | ||
resolve(result); | ||
return params; | ||
} | ||
catch (e) { | ||
if (e instanceof RetryAfterError_1.RetryAfterError) { | ||
throw e; | ||
} | ||
reject(e); | ||
return undefined; | ||
} | ||
}); | ||
// downleveling problem hack, see https://github.com/es-shims/Promise.allSettled/issues/5 | ||
const settledPromises = await Promise.allSettled(promises); | ||
const rejectedPromises = settledPromises.filter((p) => p.status === 'rejected'); | ||
const now = Date.now(); | ||
if (rejectedPromises.length) { | ||
this._logger.trace('runRequestBatch some rejected'); | ||
const retryAt = Math.max(now, ...rejectedPromises.map((p) => p.reason.retryAt)); | ||
const retryAfter = retryAt - now; | ||
this._logger.warn(`Waiting for ${retryAfter} ms because the rate limit was exceeded`); | ||
this._nextBatchTimer = setTimeout(() => { | ||
this._parameters = undefined; | ||
this._runNextBatch(); | ||
}, retryAfter); | ||
} | ||
else { | ||
this._logger.trace('runRequestBatch none rejected'); | ||
const params = settledPromises | ||
.filter((p) => p.status === 'fulfilled' && p.value !== undefined) | ||
.map(p => p.value) | ||
.reduce((carry, v) => { | ||
if (!carry) { | ||
return v; | ||
} | ||
// return v.resetsAt > carry.resetsAt ? v : carry; | ||
return v.remaining < carry.remaining ? v : carry; | ||
}, undefined); | ||
this._batchRunning = false; | ||
if (params) { | ||
this._parameters = params; | ||
if (params.resetsAt < now || params.remaining > 0) { | ||
this._logger.trace('runRequestBatch canRunMore'); | ||
this._runNextBatch(); | ||
} | ||
else { | ||
const delay = params.resetsAt - now; | ||
this._logger.trace(`runRequestBatch delay:${delay}`); | ||
this._logger.warn(`Waiting for ${delay} ms because the rate limit was reached`); | ||
this._queue = this._queue.filter(entry => { | ||
switch (entry.limitReachedBehavior) { | ||
case 'enqueue': { | ||
return true; | ||
} | ||
case 'null': { | ||
entry.resolve(null); | ||
return false; | ||
} | ||
case 'throw': { | ||
entry.reject(new RateLimitReachedError_1.RateLimitReachedError('Request removed from queue because the rate limit was reached')); | ||
return false; | ||
} | ||
default: { | ||
throw new Error('this should never happen'); | ||
} | ||
} | ||
this._logger.trace('runRequestBatch end'); | ||
return [2 /*return*/]; | ||
}); | ||
this._nextBatchTimer = setTimeout(() => { | ||
this._parameters = undefined; | ||
this._runNextBatch(); | ||
}, delay); | ||
} | ||
}); | ||
}); | ||
}; | ||
ResponseBasedRateLimiter.prototype._runNextBatch = function () { | ||
} | ||
} | ||
this._logger.trace('runRequestBatch end'); | ||
} | ||
_runNextBatch() { | ||
if (this._paused) { | ||
@@ -194,4 +155,4 @@ return; | ||
} | ||
var amount = this._parameters ? Math.min(this._parameters.remaining, this._parameters.limit / 10) : 1; | ||
var reqSpecs = this._queue.splice(0, amount); | ||
const amount = this._parameters ? Math.min(this._parameters.remaining, this._parameters.limit / 10) : 1; | ||
const reqSpecs = this._queue.splice(0, amount); | ||
if (reqSpecs.length) { | ||
@@ -201,5 +162,4 @@ void this._runRequestBatch(reqSpecs); | ||
this._logger.trace('runNextBatch end'); | ||
}; | ||
return ResponseBasedRateLimiter; | ||
}()); | ||
} | ||
} | ||
exports.ResponseBasedRateLimiter = ResponseBasedRateLimiter; |
@@ -1,2 +0,2 @@ | ||
import type { LoggerOptions } from '@d-fischer/logger'; | ||
import { type LoggerOptions } from '@d-fischer/logger'; | ||
import type { RateLimiter, RateLimiterRequestOptions } from '../RateLimiter'; | ||
@@ -3,0 +3,0 @@ export interface TimeBasedRateLimiterConfig<Req, Res> { |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.TimeBasedRateLimiter = void 0; | ||
var tslib_1 = require("tslib"); | ||
var logger_1 = require("@d-fischer/logger"); | ||
var RateLimitReachedError_1 = require("../errors/RateLimitReachedError"); | ||
var TimeBasedRateLimiter = /** @class */ (function () { | ||
function TimeBasedRateLimiter(_a) { | ||
var logger = _a.logger, bucketSize = _a.bucketSize, timeFrame = _a.timeFrame, doRequest = _a.doRequest; | ||
const logger_1 = require("@d-fischer/logger"); | ||
const RateLimitReachedError_1 = require("../errors/RateLimitReachedError"); | ||
class TimeBasedRateLimiter { | ||
constructor({ logger, bucketSize, timeFrame, doRequest }) { | ||
this._queue = []; | ||
this._usedFromBucket = 0; | ||
this._paused = false; | ||
this._logger = (0, logger_1.createLogger)(tslib_1.__assign({ name: 'rate-limiter', emoji: true }, logger)); | ||
this._logger = (0, logger_1.createLogger)({ name: 'rate-limiter', emoji: true, ...logger }); | ||
this._bucketSize = bucketSize; | ||
@@ -18,112 +16,87 @@ this._timeFrame = timeFrame; | ||
} | ||
TimeBasedRateLimiter.prototype.request = function (req, options) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
var _this = this; | ||
return tslib_1.__generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: return [4 /*yield*/, new Promise(function (resolve, reject) { | ||
var _a; | ||
var reqSpec = { | ||
req: req, | ||
resolve: resolve, | ||
reject: reject, | ||
limitReachedBehavior: (_a = options === null || options === void 0 ? void 0 : options.limitReachedBehavior) !== null && _a !== void 0 ? _a : 'enqueue' | ||
}; | ||
if (_this._usedFromBucket >= _this._bucketSize || _this._paused) { | ||
switch (reqSpec.limitReachedBehavior) { | ||
case 'enqueue': { | ||
_this._queue.push(reqSpec); | ||
if (_this._usedFromBucket + _this._queue.length >= _this._bucketSize) { | ||
_this._logger.warn("Rate limit of ".concat(_this._bucketSize, " was reached, waiting for ").concat(_this._paused ? 'the limiter to be unpaused' : 'a free bucket entry', "; queue size is ").concat(_this._queue.length)); | ||
} | ||
else { | ||
_this._logger.info("Enqueueing request because the rate limiter is paused; queue size is ".concat(_this._queue.length)); | ||
} | ||
break; | ||
} | ||
case 'null': { | ||
reqSpec.resolve(null); | ||
_this._logger.warn("Rate limit of ".concat(_this._bucketSize, " was reached, dropping request and returning null")); | ||
if (_this._paused) { | ||
_this._logger.info('Returning null for request because the rate limiter is paused'); | ||
} | ||
else { | ||
_this._logger.warn("Rate limit of ".concat(_this._bucketSize, " was reached, dropping request and returning null")); | ||
} | ||
break; | ||
} | ||
case 'throw': { | ||
reqSpec.reject(new RateLimitReachedError_1.RateLimitReachedError("Request dropped because ".concat(_this._paused ? 'the rate limiter is paused' : 'the rate limit was reached'))); | ||
break; | ||
} | ||
default: { | ||
throw new Error('this should never happen'); | ||
} | ||
} | ||
} | ||
else { | ||
void _this._runRequest(reqSpec); | ||
} | ||
})]; | ||
case 1: return [2 /*return*/, _a.sent()]; | ||
async request(req, options) { | ||
return await new Promise((resolve, reject) => { | ||
var _a; | ||
const reqSpec = { | ||
req, | ||
resolve, | ||
reject, | ||
limitReachedBehavior: (_a = options === null || options === void 0 ? void 0 : options.limitReachedBehavior) !== null && _a !== void 0 ? _a : 'enqueue' | ||
}; | ||
if (this._usedFromBucket >= this._bucketSize || this._paused) { | ||
switch (reqSpec.limitReachedBehavior) { | ||
case 'enqueue': { | ||
this._queue.push(reqSpec); | ||
if (this._usedFromBucket + this._queue.length >= this._bucketSize) { | ||
this._logger.warn(`Rate limit of ${this._bucketSize} was reached, waiting for ${this._paused ? 'the limiter to be unpaused' : 'a free bucket entry'}; queue size is ${this._queue.length}`); | ||
} | ||
else { | ||
this._logger.info(`Enqueueing request because the rate limiter is paused; queue size is ${this._queue.length}`); | ||
} | ||
break; | ||
} | ||
case 'null': { | ||
reqSpec.resolve(null); | ||
this._logger.warn(`Rate limit of ${this._bucketSize} was reached, dropping request and returning null`); | ||
if (this._paused) { | ||
this._logger.info('Returning null for request because the rate limiter is paused'); | ||
} | ||
else { | ||
this._logger.warn(`Rate limit of ${this._bucketSize} was reached, dropping request and returning null`); | ||
} | ||
break; | ||
} | ||
case 'throw': { | ||
reqSpec.reject(new RateLimitReachedError_1.RateLimitReachedError(`Request dropped because ${this._paused ? 'the rate limiter is paused' : 'the rate limit was reached'}`)); | ||
break; | ||
} | ||
default: { | ||
throw new Error('this should never happen'); | ||
} | ||
} | ||
}); | ||
} | ||
else { | ||
void this._runRequest(reqSpec); | ||
} | ||
}); | ||
}; | ||
TimeBasedRateLimiter.prototype.clear = function () { | ||
} | ||
clear() { | ||
this._queue = []; | ||
}; | ||
TimeBasedRateLimiter.prototype.pause = function () { | ||
} | ||
pause() { | ||
this._paused = true; | ||
}; | ||
TimeBasedRateLimiter.prototype.resume = function () { | ||
} | ||
resume() { | ||
this._paused = false; | ||
this._runNextRequest(); | ||
}; | ||
TimeBasedRateLimiter.prototype._runRequest = function (reqSpec) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
var req, resolve, reject, _a, e_1; | ||
var _this = this; | ||
return tslib_1.__generator(this, function (_b) { | ||
switch (_b.label) { | ||
case 0: | ||
this._logger.debug("doing a request, new queue length is ".concat(this._queue.length)); | ||
this._usedFromBucket += 1; | ||
req = reqSpec.req, resolve = reqSpec.resolve, reject = reqSpec.reject; | ||
_b.label = 1; | ||
case 1: | ||
_b.trys.push([1, 3, 4, 5]); | ||
_a = resolve; | ||
return [4 /*yield*/, this._callback(req)]; | ||
case 2: | ||
_a.apply(void 0, [_b.sent()]); | ||
return [3 /*break*/, 5]; | ||
case 3: | ||
e_1 = _b.sent(); | ||
reject(e_1); | ||
return [3 /*break*/, 5]; | ||
case 4: | ||
setTimeout(function () { | ||
_this._usedFromBucket -= 1; | ||
if (_this._queue.length && _this._usedFromBucket < _this._bucketSize) { | ||
_this._runNextRequest(); | ||
} | ||
}, this._timeFrame); | ||
return [7 /*endfinally*/]; | ||
case 5: return [2 /*return*/]; | ||
} | ||
async _runRequest(reqSpec) { | ||
this._logger.debug(`doing a request, new queue length is ${this._queue.length}`); | ||
this._usedFromBucket += 1; | ||
const { req, resolve, reject } = reqSpec; | ||
try { | ||
resolve(await this._callback(req)); | ||
} | ||
catch (e) { | ||
reject(e); | ||
} | ||
finally { | ||
setTimeout(() => { | ||
this._usedFromBucket -= 1; | ||
if (this._queue.length && this._usedFromBucket < this._bucketSize) { | ||
this._runNextRequest(); | ||
} | ||
}); | ||
}); | ||
}; | ||
TimeBasedRateLimiter.prototype._runNextRequest = function () { | ||
}, this._timeFrame); | ||
} | ||
} | ||
_runNextRequest() { | ||
if (this._paused) { | ||
return; | ||
} | ||
var reqSpec = this._queue.shift(); | ||
const reqSpec = this._queue.shift(); | ||
if (reqSpec) { | ||
void this._runRequest(reqSpec); | ||
} | ||
}; | ||
return TimeBasedRateLimiter; | ||
}()); | ||
} | ||
} | ||
exports.TimeBasedRateLimiter = TimeBasedRateLimiter; |
import type { RateLimiter } from '../RateLimiter'; | ||
import type { TimeBasedRateLimiterConfig } from './TimeBasedRateLimiter'; | ||
import { TimeBasedRateLimiter } from './TimeBasedRateLimiter'; | ||
import { TimeBasedRateLimiter, type TimeBasedRateLimiterConfig } from './TimeBasedRateLimiter'; | ||
export declare class TimedPassthruRateLimiter<Req, Res> extends TimeBasedRateLimiter<Req, Res> { | ||
constructor(child: RateLimiter<Req, Res>, config: Omit<TimeBasedRateLimiterConfig<Req, Res>, 'doRequest'>); | ||
} |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.TimedPassthruRateLimiter = void 0; | ||
var tslib_1 = require("tslib"); | ||
var TimeBasedRateLimiter_1 = require("./TimeBasedRateLimiter"); | ||
var TimedPassthruRateLimiter = /** @class */ (function (_super) { | ||
tslib_1.__extends(TimedPassthruRateLimiter, _super); | ||
function TimedPassthruRateLimiter(child, config) { | ||
return _super.call(this, tslib_1.__assign(tslib_1.__assign({}, config), { doRequest: function (req, options) { | ||
return tslib_1.__awaiter(this, void 0, void 0, function () { | ||
return tslib_1.__generator(this, function (_a) { | ||
switch (_a.label) { | ||
case 0: return [4 /*yield*/, child.request(req, options)]; | ||
case 1: return [2 /*return*/, _a.sent()]; | ||
} | ||
}); | ||
}); | ||
} })) || this; | ||
const TimeBasedRateLimiter_1 = require("./TimeBasedRateLimiter"); | ||
class TimedPassthruRateLimiter extends TimeBasedRateLimiter_1.TimeBasedRateLimiter { | ||
constructor(child, config) { | ||
super({ | ||
...config, | ||
async doRequest(req, options) { | ||
return await child.request(req, options); | ||
} | ||
}); | ||
} | ||
return TimedPassthruRateLimiter; | ||
}(TimeBasedRateLimiter_1.TimeBasedRateLimiter)); | ||
} | ||
exports.TimedPassthruRateLimiter = TimedPassthruRateLimiter; |
{ | ||
"name": "@d-fischer/rate-limiter", | ||
"version": "0.7.5", | ||
"version": "1.0.0", | ||
"description": "Rate limit your requests.", | ||
@@ -25,17 +25,17 @@ "keywords": [ | ||
"author": "Daniel Fischer <daniel@d-fischer.dev>", | ||
"funding": "https://github.com/sponsors/d-fischer", | ||
"license": "MIT", | ||
"dependencies": { | ||
"@d-fischer/logger": "^4.2.1", | ||
"@d-fischer/promise.allsettled": "^2.0.2", | ||
"@d-fischer/shared-utils": "^3.2.0", | ||
"tslib": "^2.0.3" | ||
"@d-fischer/logger": "^4.2.3", | ||
"@d-fischer/shared-utils": "^3.6.3", | ||
"tslib": "^2.6.2" | ||
}, | ||
"devDependencies": { | ||
"@d-fischer/eslint-config": "^6.1.8", | ||
"eslint": "^8.30.0", | ||
"@d-fischer/eslint-config": "^6.2.2", | ||
"eslint": "^8.51.0", | ||
"husky": "^4.3.6", | ||
"lint-staged": "^13.1.0", | ||
"prettier": "^2.2.1", | ||
"tsukuru": "^0.8.0-pre.11", | ||
"typescript": "~4.9.4" | ||
"lint-staged": "^14.0.1", | ||
"prettier": "^3.0.3", | ||
"tsukuru": "^0.8.0", | ||
"typescript": "~5.2.2" | ||
}, | ||
@@ -42,0 +42,0 @@ "files": [ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
3
1
52693
1189
- Removed@d-fischer/promise.allsettled@^2.0.2
- Removed@d-fischer/promise.allsettled@2.0.2(transitive)
- Removedarray-buffer-byte-length@1.0.1(transitive)
- Removedarray.prototype.map@1.0.7(transitive)
- Removedarraybuffer.prototype.slice@1.0.3(transitive)
- Removedavailable-typed-arrays@1.0.7(transitive)
- Removedcall-bind@1.0.7(transitive)
- Removeddata-view-buffer@1.0.1(transitive)
- Removeddata-view-byte-length@1.0.1(transitive)
- Removeddata-view-byte-offset@1.0.0(transitive)
- Removeddefine-data-property@1.1.4(transitive)
- Removeddefine-properties@1.2.1(transitive)
- Removedes-abstract@1.23.5(transitive)
- Removedes-array-method-boxes-properly@1.0.0(transitive)
- Removedes-define-property@1.0.0(transitive)
- Removedes-errors@1.3.0(transitive)
- Removedes-get-iterator@1.1.3(transitive)
- Removedes-object-atoms@1.0.0(transitive)
- Removedes-set-tostringtag@2.0.3(transitive)
- Removedes-to-primitive@1.2.1(transitive)
- Removedfor-each@0.3.3(transitive)
- Removedfunction-bind@1.1.2(transitive)
- Removedfunction.prototype.name@1.1.6(transitive)
- Removedfunctions-have-names@1.2.3(transitive)
- Removedget-intrinsic@1.2.4(transitive)
- Removedget-symbol-description@1.0.2(transitive)
- Removedglobalthis@1.0.4(transitive)
- Removedgopd@1.0.1(transitive)
- Removedhas-bigints@1.0.2(transitive)
- Removedhas-property-descriptors@1.0.2(transitive)
- Removedhas-proto@1.0.3(transitive)
- Removedhas-symbols@1.0.3(transitive)
- Removedhas-tostringtag@1.0.2(transitive)
- Removedhasown@2.0.2(transitive)
- Removedinternal-slot@1.0.7(transitive)
- Removedis-arguments@1.1.1(transitive)
- Removedis-array-buffer@3.0.4(transitive)
- Removedis-bigint@1.0.4(transitive)
- Removedis-boolean-object@1.1.2(transitive)
- Removedis-callable@1.2.7(transitive)
- Removedis-data-view@1.0.1(transitive)
- Removedis-date-object@1.0.5(transitive)
- Removedis-map@2.0.3(transitive)
- Removedis-negative-zero@2.0.3(transitive)
- Removedis-number-object@1.0.7(transitive)
- Removedis-regex@1.1.4(transitive)
- Removedis-set@2.0.3(transitive)
- Removedis-shared-array-buffer@1.0.3(transitive)
- Removedis-string@1.0.7(transitive)
- Removedis-symbol@1.0.4(transitive)
- Removedis-typed-array@1.1.13(transitive)
- Removedis-weakref@1.0.2(transitive)
- Removedisarray@2.0.5(transitive)
- Removediterate-iterator@1.0.2(transitive)
- Removediterate-value@1.0.2(transitive)
- Removedobject-inspect@1.13.3(transitive)
- Removedobject-keys@1.1.1(transitive)
- Removedobject.assign@4.1.5(transitive)
- Removedpossible-typed-array-names@1.0.0(transitive)
- Removedregexp.prototype.flags@1.5.3(transitive)
- Removedsafe-array-concat@1.1.2(transitive)
- Removedsafe-regex-test@1.0.3(transitive)
- Removedset-function-length@1.2.2(transitive)
- Removedset-function-name@2.0.2(transitive)
- Removedside-channel@1.0.6(transitive)
- Removedstop-iteration-iterator@1.0.0(transitive)
- Removedstring.prototype.trim@1.2.9(transitive)
- Removedstring.prototype.trimend@1.0.8(transitive)
- Removedstring.prototype.trimstart@1.0.8(transitive)
- Removedtyped-array-buffer@1.0.2(transitive)
- Removedtyped-array-byte-length@1.0.1(transitive)
- Removedtyped-array-byte-offset@1.0.2(transitive)
- Removedtyped-array-length@1.0.6(transitive)
- Removedunbox-primitive@1.0.2(transitive)
- Removedwhich-boxed-primitive@1.0.2(transitive)
- Removedwhich-typed-array@1.1.15(transitive)
Updated@d-fischer/logger@^4.2.3
Updatedtslib@^2.6.2