Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

express-throttle

Package Overview
Dependencies
Maintainers
1
Versions
10
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

express-throttle - npm Package Compare versions

Comparing version 1.1.1 to 1.2.0

82

lib/throttle.js

@@ -11,3 +11,25 @@ "use strict";

var opts = parse_options(options);
var rate = opts.rate.amount / opts.rate.interval;
var refill;
if (opts.rate.fixed) {
var period = opts.rate.period;
refill = function(client, dt) {
// Accumulated delta times
var t = (client.t || 0) + dt;
if (t >= period) {
client.t = t % period;
return burst;
} else {
client.t = t;
return 0;
}
};
} else {
var rate = opts.rate.amount / opts.rate.period;
refill = function(client, dt) {
return rate * dt;
};
}
var burst = opts.burst || opts.rate.amount;

@@ -21,9 +43,11 @@ var store = opts.store || new MemoryStore(10000);

var cost_func;
// cost function, calculates the number of tokens to be subtracted per request
if (typeof(opts.cost) == "number") {
var cost_func = function() { return opts.cost; };
cost_func = function() { return opts.cost; };
} else if (typeof(opts.cost) == "function") {
var cost_func = opts.cost;
cost_func = opts.cost;
} else {
var cost_func = function() { return 1; };
cost_func = function() { return 1; };
}

@@ -39,3 +63,3 @@

store.get(key, function(err, entry) {
store.get(key, function(err, client) {
if (err) {

@@ -45,5 +69,5 @@ return next(err);

entry = entry || { "tokens": burst };
var passthrough = consume_tokens(entry, rate, burst, cost);
store.set(key, entry, function(err) {
client = client || { "tokens": burst };
var passthrough = update_tokens(client, refill, burst, cost);
store.set(key, client, function(err) {
if (err) {

@@ -109,3 +133,3 @@ return next(err);

var RATE_PATTERN = /^(\d+)\/(\d+)?(s|sec|second|m|min|minute|h|hour|d|day)$/;
var RATE_PATTERN = /^(\d+)\/(\d+)?(ms|s|sec|second|m|min|minute|h|hour|d|day)(:fixed)?$/;

@@ -122,6 +146,8 @@ function parse_rate(rate) {

var time_unit = parsed_rate[3];
var fixed = parsed_rate[4] == ":fixed";
return {
"amount": numerator,
"interval": denominator * time_unit_to_ms(time_unit)
"period": denominator * time_unit_to_ms(time_unit),
"fixed": fixed
};

@@ -132,28 +158,26 @@ }

switch (time_unit) {
case "s":
case "sec":
case "second": return 1000;
case "m":
case "min":
case "minute": return 60 * 1000;
case "h":
case "hour": return 60 * 60 * 1000;
case "d":
case "day": return 24 * 60 * 60 * 1000;
case "ms":
return 1;
case "s": case "sec": case "second":
return 1000;
case "m": case "min": case "minute":
return 60 * 1000;
case "h": case "hour":
return 60 * 60 * 1000;
case "d": case "day":
return 24 * 60 * 60 * 1000;
}
}
function consume_tokens(entry, rate, burst, cost) {
var now = Date.now();
function update_tokens(client, refill, burst, cost) {
var t = Date.now();
var dt = t - (client.accessed || t);
// The number of tokens we have refilled since last access
var new_tokens = rate * (now - (entry.accessed || now));
entry.accessed = now;
// Apply the refill first so it doesn't cancel out with the tokens we are
// about to consume
entry.tokens = clamp_max(entry.tokens + new_tokens, burst);
client.tokens = clamp_max(client.tokens + refill(client, dt), burst);
client.accessed = t;
if (entry.tokens >= cost) {
entry.tokens -= cost;
if (client.tokens >= cost) {
client.tokens -= cost;
return true;

@@ -160,0 +184,0 @@ } else {

{
"name": "express-throttle",
"version": "1.1.1",
"version": "1.2.0",
"description": "Request throttling middleware for Express",

@@ -5,0 +5,0 @@ "main": "index.js",

@@ -12,7 +12,7 @@ # express-throttle

The throttling is done using the canonical [token bucket](https://en.wikipedia.org/wiki/Token_bucket) algorithm, where tokens are "refilled" in a sliding window manner (as opposed to a fixed time interval). This means that if we a set maximum rate of 10 requests / minute, a client will not be able to send 10 requests 0:59, and 10 more 1:01. However, if the client sends 10 requests at 0:30, he will be able to send a new request at 0:36 (since tokens are refilled continuously 1 every 6 seconds).
The throttling is done using the canonical [token bucket](https://en.wikipedia.org/wiki/Token_bucket) algorithm, where tokens are "refilled" in a sliding window manner by default (can be configured to fixed time windows). This means that if we a set maximum rate of 10 requests / minute, a client will not be able to send 10 requests 0:59, and 10 more 1:01. However, if the client sends 10 requests at 0:30, he will be able to send a new request at 0:36 (since tokens are refilled continuously 1 every 6 seconds).
## Limitations
By default, throttling data is stored in memory and is thus not shared between multiple processes. If your application is behind a load balancer which distributes traffic amoung several node processes, then throttling will be applied per process, which is generally not what you want (unless you can ensure that a client always hits the same process). It is possible to customize the storage so that the throttling data gets saved to a shared backend (e.g Redis). However, the current implementation contains a race-condition and will likely fail (erroneously allow/block certain requests) under high load. My plan is to address this shortcoming in future versions.
By default, throttling data is stored in memory and is thus not shared between multiple processes. If your application is behind a load balancer which distributes traffic among several node processes, then throttling will be applied per process, which is generally not what you want (unless you can ensure that a client always hits the same process). It is possible to customize the storage so that the throttling data gets saved to a shared backend (e.g Redis). However, the current implementation contains a race-condition and will likely fail (erroneously allow/block certain requests) under high load. My plan is to address this shortcoming in future versions.

@@ -36,2 +36,7 @@ **TL;DR** - Use this package in production at your own risk, beware of the limitations.

});
// ...using fixed time windows instead
app.post("/search", throttle("5/s:fixed"), function(req, res, next) {
// ...
})
```

@@ -145,8 +150,10 @@ Combine it with a burst capacity of 10, meaning that the client can make 10 requests at any rate. The capacity is "refilled" with the specified rate (in this case 5/s).

`rate`: Determines the number of requests allowed within the specified time unit before subsequent requests get throttled. Must be specified according to the following format: *X/Yt*
`rate`: Determines the number of requests allowed within the specified time unit before subsequent requests get throttled. Must be specified according to the following format: *X/Yt(:fixed)*
where *X* and *Y* are integers and *t* is the time unit which can be any of the following: `s, sec, second, m, min, minute, h, hour, d, day`
where *X* and *Y* are integers and *t* is the time unit which can be any of the following: `ms, s, sec, second, m, min, minute, h, hour, d, day`
`burst`: The number of requests that can be made at any rate. The burst quota is refilled with the specified `rate`.
If you prefer tokens to be refilled in fixed intervals, append `:fixed`. E.g `5/min:fixed`.
`burst`: The number of requests that can be made at any rate. Defaults to *X* as defined above.
`store`: Custom storage class. Must implement a `get` and `set` method with the following signatures:

@@ -153,0 +160,0 @@ ```js

@@ -45,2 +45,7 @@ "use strict";

t.test("...with invalid rate option", st => {
st.throws(() => throttle("10/m:test"), new Error);
st.end();
});
t.test("...with empty option object", st => {

@@ -74,2 +79,3 @@ st.throws(() => throttle({}), new Error);

t.test("...rate", st => {
st.doesNotThrow(() => throttle("1/200ms"));
st.doesNotThrow(() => throttle("1/s"));

@@ -85,2 +91,3 @@ st.doesNotThrow(() => throttle("1/2sec"));

st.doesNotThrow(() => throttle("1/5day"));
st.doesNotThrow(() => throttle("1/m:fixed"));
st.end();

@@ -128,2 +135,10 @@ });

});
t.test("...2 requests with enough gap @ rate 5/s:fixed", st => {
var app = create_app({ "rate": "5/s:fixed", "burst": 1 });
request(app).get("/").end(verify(st));
setTimeout(() => {
request(app).get("/").end(verify(st, true));
}, 1050);
});
});

@@ -157,2 +172,10 @@

});
t.test("...2 requests without enough gap @ rate 5/s:fixed", st => {
var app = create_app({ "rate": "5/s:fixed", "burst": 1 });
request(app).get("/").end(() => true);
setTimeout(() => {
request(app).get("/").end(verify(st, true));
}, 950);
});
});

@@ -159,0 +182,0 @@

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc