You're Invited: Meet the Socket team at BSidesSF and RSAC - April 27 - May 1.RSVP
Socket
Sign inDemoInstall
Socket

@upstash/ratelimit

Package Overview
Dependencies
Maintainers
5
Versions
248
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@upstash/ratelimit - npm Package Compare versions

Comparing version

to
0.3.3

295

./dist/index.js

@@ -50,9 +50,21 @@ "use strict";

// src/analytics.ts
var import_core_analytics = require("@upstash/core-analytics");
var Analytics = class {
redis;
prefix;
analytics;
table = "events";
constructor(config) {
this.redis = config.redis;
this.prefix = config.prefix ?? "@upstash/ratelimit";
this.analytics = new import_core_analytics.Analytics({
redis: config.redis,
window: "1h",
prefix: config.prefix ?? "@upstash/ratelimit",
retention: "90d"
});
}
/**
* Try to extract the geo information from the request
*
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
* @param req
* @returns
*/
extractGeo(req) {

@@ -68,112 +80,26 @@ if (typeof req.geo !== "undefined") {

async record(event) {
const bucket = new Date().setUTCHours(0, 0, 0, 0).toFixed(0);
const key = [this.prefix, "events", bucket].join(":");
await this.redis.hincrby(
key,
JSON.stringify({
...event,
time: void 0
}),
1
);
await this.analytics.ingest(this.table, event);
}
async aggregate(aggregateBy, cutoff = 0) {
const keys = [];
let cursor = 0;
do {
const [nextCursor, found] = await this.redis.scan(cursor, {
match: [this.prefix, "events", "*"].join(":"),
count: 1e3
});
cursor = nextCursor;
for (const key of found) {
const timestamp = parseInt(key.split(":").pop());
if (timestamp >= cutoff) {
keys.push(key);
}
}
} while (cursor !== 0);
const days = {};
await Promise.all(
keys.sort().map(async (key) => {
const fields = await this.redis.hgetall(key);
if (!fields) {
return;
}
const day = {};
for (const [field, count] of Object.entries(fields)) {
const r = JSON.parse(field);
for (const [k, v] of Object.entries(r)) {
if (k !== aggregateBy) {
continue;
}
if (!day[v]) {
day[v] = {
success: 0,
blocked: 0
};
}
if (r.success) {
day[v].success += count;
} else {
day[v].blocked += count;
}
}
}
days[key.split(":")[2]] = day;
})
);
return days;
async series(filter, cutoff = 0) {
const records = await this.analytics.query(this.table, {
filter: [filter],
range: cutoff ? [cutoff] : void 0
});
return records;
}
async series(aggregateBy, cutoff = 0) {
const keys = [];
let cursor = 0;
do {
const [nextCursor, found] = await this.redis.scan(cursor, {
match: [this.prefix, "events", "*"].join(":"),
count: 1e3
});
cursor = nextCursor;
for (const key of found) {
const timestamp = parseInt(key.split(":").pop());
if (timestamp >= cutoff) {
keys.push(key);
}
}
} while (cursor !== 0);
const days = await Promise.all(
keys.sort().map(async (key) => {
const fields = await this.redis.hgetall(key);
const day = { time: parseInt(key.split(":")[2]) };
if (!fields) {
return day;
}
for (const [field, count] of Object.entries(fields)) {
const r = JSON.parse(field);
for (const [k, v] of Object.entries(r)) {
console.log({ k, v });
if (k !== aggregateBy) {
continue;
}
if (!day[v]) {
day[v] = 0;
}
day[v] += count;
}
}
return day;
})
);
return days;
}
async getUsage(cutoff = 0) {
const records = await this.aggregate("identifier", cutoff);
const records = await this.analytics.aggregateBy(this.table, "identifier", {
range: cutoff ? [cutoff] : void 0
});
const usage = {};
for (const day of Object.values(records)) {
for (const [k, v] of Object.entries(day)) {
for (const bucket of records) {
for (const [k, v] of Object.entries(bucket)) {
if (k === "time") {
continue;
}
if (!usage[k]) {
usage[k] = { success: 0, blocked: 0 };
}
usage[k].success += v.success;
usage[k].blocked += v.blocked;
usage[k].success += v;
usage[k].blocked += v;
}

@@ -187,2 +113,5 @@ }

var Cache = class {
/**
* Stores identifier -> reset (in milliseconds)
*/
cache;

@@ -242,2 +171,21 @@ constructor(cache) {

}
/**
* Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
*
* Use this if you want to reject all requests that you can not handle right now.
*
* @example
* ```ts
* const ratelimit = new Ratelimit({
* redis: Redis.fromEnv(),
* limiter: Ratelimit.slidingWindow(10, "10 s")
* })
*
* const { success } = await ratelimit.limit(id)
* if (!success){
* return "Nope"
* }
* return "Yes"
* ```
*/
limit = async (identifier, req) => {

@@ -281,2 +229,24 @@ const key = [this.prefix, identifier].join(":");

};
/**
* Block until the request may pass or timeout is reached.
*
* This method returns a promsie that resolves as soon as the request may be processed
* or after the timeoue has been reached.
*
* Use this if you want to delay the request until it is ready to get processed.
*
* @example
* ```ts
* const ratelimit = new Ratelimit({
* redis: Redis.fromEnv(),
* limiter: Ratelimit.slidingWindow(10, "10 s")
* })
*
* const { success } = await ratelimit.blockUntilReady(id, 60_000)
* if (!success){
* return "Nope"
* }
* return "Yes"
* ```
*/
blockUntilReady = async (identifier, timeout) => {

@@ -308,2 +278,5 @@ if (timeout <= 0) {

var RegionRatelimit = class extends Ratelimit {
/**
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
*/
constructor(config) {

@@ -321,2 +294,20 @@ super({

}
/**
* Each requests inside a fixed time increases a counter.
* Once the counter reaches a maxmimum allowed number, all further requests are
* rejected.
*
* **Pro:**
*
* - Newer requests are not starved by old ones.
* - Low storage cost.
*
* **Con:**
*
* A burst of requests near the boundary of a window can result in a very
* high request rate because two windows will be filled with requests quickly.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - A fixed timeframe
*/
static fixedWindow(tokens, window) {

@@ -367,2 +358,18 @@ const windowDuration = ms(window);

}
/**
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
* costs than `slidingLogs` and improved boundary behavior by calcualting a
* weighted score between two windows.
*
* **Pro:**
*
* Good performance allows this to scale to very high loads.
*
* **Con:**
*
* Nothing major.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - The duration in which the user can max X requests.
*/
static slidingWindow(tokens, window) {

@@ -433,2 +440,15 @@ const script = `

}
/**
* You have a bucket filled with `{maxTokens}` tokens that refills constantly
* at `{refillRate}` per `{interval}`.
* Every request will remove one token from the bucket and if there is no
* token to take, the request is rejected.
*
* **Pro:**
*
* - Bursts of requests are smoothed out and you can process them at a constant
* rate.
* - Allows to set a higher initial burst limit by setting `maxTokens` higher
* than `refillRate`
*/
static tokenBucket(refillRate, interval, maxTokens) {

@@ -508,2 +528,26 @@ const script = `

}
/**
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
* it asynchronously.
* This is experimental and not yet recommended for production use.
*
* @experimental
*
* Each requests inside a fixed time increases a counter.
* Once the counter reaches a maxmimum allowed number, all further requests are
* rejected.
*
* **Pro:**
*
* - Newer requests are not starved by old ones.
* - Low storage cost.
*
* **Con:**
*
* A burst of requests near the boundary of a window can result in a very
* high request rate because two windows will be filled with requests quickly.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - A fixed timeframe
*/
static cachedFixedWindow(tokens, window) {

@@ -571,2 +615,5 @@ const windowDuration = ms(window);

var MultiRegionRatelimit = class extends Ratelimit {
/**
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
*/
constructor(config) {

@@ -584,2 +631,20 @@ super({

}
/**
* Each requests inside a fixed time increases a counter.
* Once the counter reaches a maxmimum allowed number, all further requests are
* rejected.
*
* **Pro:**
*
* - Newer requests are not starved by old ones.
* - Low storage cost.
*
* **Con:**
*
* A burst of requests near the boundary of a window can result in a very
* high request rate because two windows will be filled with requests quickly.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - A fixed timeframe
*/
static fixedWindow(tokens, window) {

@@ -654,2 +719,18 @@ const windowDuration = ms(window);

}
/**
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
* costs than `slidingLogs` and improved boundary behavior by calcualting a
* weighted score between two windows.
*
* **Pro:**
*
* Good performance allows this to scale to very high loads.
*
* **Con:**
*
* Nothing major.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - The duration in which the user can max X requests.
*/
static slidingWindow(tokens, window) {

@@ -656,0 +737,0 @@ const windowSize = ms(window);

@@ -92,4 +92,4 @@ import { Redis } from '@upstash/redis';

declare class Analytics {
private readonly redis;
private readonly prefix;
private readonly analytics;
private readonly table;
constructor(config: AnalyticsConfig);

@@ -108,21 +108,3 @@ /**

record(event: Event): Promise<void>;
/**
* Aggregates the events by the given field and returns the number of successes and failures per value
*
* @param aggregateBy - The field to aggregate by
* @param cutoff - Timestamp in milliseconds to limit the aggregation to `cutoff` until now
* @returns
*/
aggregate<TAggregateBy extends keyof Omit<Event, "time">>(aggregateBy: TAggregateBy, cutoff?: number): Promise<Record<string, Record<string, {
success: number;
blocked: number;
}>>>;
/**
* Builds a timeseries of the aggreagated value
*
* @param aggregateBy - The field to aggregate by
* @param cutoff - Timestamp in milliseconds to limit the aggregation to `cutoff` until now
* @returns
*/
series<TAggregateBy extends keyof Omit<Event, "time">>(aggregateBy: TAggregateBy, cutoff?: number): Promise<({
series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff?: number): Promise<({
time: number;

@@ -129,0 +111,0 @@ } & Record<string, number>)[]>;

@@ -50,9 +50,21 @@ "use strict";

// src/analytics.ts
var import_core_analytics = require("@upstash/core-analytics");
var Analytics = class {
redis;
prefix;
analytics;
table = "events";
constructor(config) {
this.redis = config.redis;
this.prefix = config.prefix ?? "@upstash/ratelimit";
this.analytics = new import_core_analytics.Analytics({
redis: config.redis,
window: "1h",
prefix: config.prefix ?? "@upstash/ratelimit",
retention: "90d"
});
}
/**
* Try to extract the geo information from the request
*
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
* @param req
* @returns
*/
extractGeo(req) {

@@ -68,112 +80,26 @@ if (typeof req.geo !== "undefined") {

async record(event) {
const bucket = new Date().setUTCHours(0, 0, 0, 0).toFixed(0);
const key = [this.prefix, "events", bucket].join(":");
await this.redis.hincrby(
key,
JSON.stringify({
...event,
time: void 0
}),
1
);
await this.analytics.ingest(this.table, event);
}
async aggregate(aggregateBy, cutoff = 0) {
const keys = [];
let cursor = 0;
do {
const [nextCursor, found] = await this.redis.scan(cursor, {
match: [this.prefix, "events", "*"].join(":"),
count: 1e3
});
cursor = nextCursor;
for (const key of found) {
const timestamp = parseInt(key.split(":").pop());
if (timestamp >= cutoff) {
keys.push(key);
}
}
} while (cursor !== 0);
const days = {};
await Promise.all(
keys.sort().map(async (key) => {
const fields = await this.redis.hgetall(key);
if (!fields) {
return;
}
const day = {};
for (const [field, count] of Object.entries(fields)) {
const r = JSON.parse(field);
for (const [k, v] of Object.entries(r)) {
if (k !== aggregateBy) {
continue;
}
if (!day[v]) {
day[v] = {
success: 0,
blocked: 0
};
}
if (r.success) {
day[v].success += count;
} else {
day[v].blocked += count;
}
}
}
days[key.split(":")[2]] = day;
})
);
return days;
async series(filter, cutoff = 0) {
const records = await this.analytics.query(this.table, {
filter: [filter],
range: cutoff ? [cutoff] : void 0
});
return records;
}
async series(aggregateBy, cutoff = 0) {
const keys = [];
let cursor = 0;
do {
const [nextCursor, found] = await this.redis.scan(cursor, {
match: [this.prefix, "events", "*"].join(":"),
count: 1e3
});
cursor = nextCursor;
for (const key of found) {
const timestamp = parseInt(key.split(":").pop());
if (timestamp >= cutoff) {
keys.push(key);
}
}
} while (cursor !== 0);
const days = await Promise.all(
keys.sort().map(async (key) => {
const fields = await this.redis.hgetall(key);
const day = { time: parseInt(key.split(":")[2]) };
if (!fields) {
return day;
}
for (const [field, count] of Object.entries(fields)) {
const r = JSON.parse(field);
for (const [k, v] of Object.entries(r)) {
console.log({ k, v });
if (k !== aggregateBy) {
continue;
}
if (!day[v]) {
day[v] = 0;
}
day[v] += count;
}
}
return day;
})
);
return days;
}
async getUsage(cutoff = 0) {
const records = await this.aggregate("identifier", cutoff);
const records = await this.analytics.aggregateBy(this.table, "identifier", {
range: cutoff ? [cutoff] : void 0
});
const usage = {};
for (const day of Object.values(records)) {
for (const [k, v] of Object.entries(day)) {
for (const bucket of records) {
for (const [k, v] of Object.entries(bucket)) {
if (k === "time") {
continue;
}
if (!usage[k]) {
usage[k] = { success: 0, blocked: 0 };
}
usage[k].success += v.success;
usage[k].blocked += v.blocked;
usage[k].success += v;
usage[k].blocked += v;
}

@@ -187,2 +113,5 @@ }

var Cache = class {
/**
* Stores identifier -> reset (in milliseconds)
*/
cache;

@@ -242,2 +171,21 @@ constructor(cache) {

}
/**
* Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
*
* Use this if you want to reject all requests that you can not handle right now.
*
* @example
* ```ts
* const ratelimit = new Ratelimit({
* redis: Redis.fromEnv(),
* limiter: Ratelimit.slidingWindow(10, "10 s")
* })
*
* const { success } = await ratelimit.limit(id)
* if (!success){
* return "Nope"
* }
* return "Yes"
* ```
*/
limit = async (identifier, req) => {

@@ -281,2 +229,24 @@ const key = [this.prefix, identifier].join(":");

};
/**
* Block until the request may pass or timeout is reached.
*
* This method returns a promsie that resolves as soon as the request may be processed
* or after the timeoue has been reached.
*
* Use this if you want to delay the request until it is ready to get processed.
*
* @example
* ```ts
* const ratelimit = new Ratelimit({
* redis: Redis.fromEnv(),
* limiter: Ratelimit.slidingWindow(10, "10 s")
* })
*
* const { success } = await ratelimit.blockUntilReady(id, 60_000)
* if (!success){
* return "Nope"
* }
* return "Yes"
* ```
*/
blockUntilReady = async (identifier, timeout) => {

@@ -308,2 +278,5 @@ if (timeout <= 0) {

var RegionRatelimit = class extends Ratelimit {
/**
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
*/
constructor(config) {

@@ -321,2 +294,20 @@ super({

}
/**
* Each requests inside a fixed time increases a counter.
* Once the counter reaches a maxmimum allowed number, all further requests are
* rejected.
*
* **Pro:**
*
* - Newer requests are not starved by old ones.
* - Low storage cost.
*
* **Con:**
*
* A burst of requests near the boundary of a window can result in a very
* high request rate because two windows will be filled with requests quickly.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - A fixed timeframe
*/
static fixedWindow(tokens, window) {

@@ -367,2 +358,18 @@ const windowDuration = ms(window);

}
/**
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
* costs than `slidingLogs` and improved boundary behavior by calcualting a
* weighted score between two windows.
*
* **Pro:**
*
* Good performance allows this to scale to very high loads.
*
* **Con:**
*
* Nothing major.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - The duration in which the user can max X requests.
*/
static slidingWindow(tokens, window) {

@@ -433,2 +440,15 @@ const script = `

}
/**
* You have a bucket filled with `{maxTokens}` tokens that refills constantly
* at `{refillRate}` per `{interval}`.
* Every request will remove one token from the bucket and if there is no
* token to take, the request is rejected.
*
* **Pro:**
*
* - Bursts of requests are smoothed out and you can process them at a constant
* rate.
* - Allows to set a higher initial burst limit by setting `maxTokens` higher
* than `refillRate`
*/
static tokenBucket(refillRate, interval, maxTokens) {

@@ -508,2 +528,26 @@ const script = `

}
/**
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
* it asynchronously.
* This is experimental and not yet recommended for production use.
*
* @experimental
*
* Each requests inside a fixed time increases a counter.
* Once the counter reaches a maxmimum allowed number, all further requests are
* rejected.
*
* **Pro:**
*
* - Newer requests are not starved by old ones.
* - Low storage cost.
*
* **Con:**
*
* A burst of requests near the boundary of a window can result in a very
* high request rate because two windows will be filled with requests quickly.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - A fixed timeframe
*/
static cachedFixedWindow(tokens, window) {

@@ -571,2 +615,5 @@ const windowDuration = ms(window);

var MultiRegionRatelimit = class extends Ratelimit {
/**
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
*/
constructor(config) {

@@ -584,2 +631,20 @@ super({

}
/**
* Each requests inside a fixed time increases a counter.
* Once the counter reaches a maxmimum allowed number, all further requests are
* rejected.
*
* **Pro:**
*
* - Newer requests are not starved by old ones.
* - Low storage cost.
*
* **Con:**
*
* A burst of requests near the boundary of a window can result in a very
* high request rate because two windows will be filled with requests quickly.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - A fixed timeframe
*/
static fixedWindow(tokens, window) {

@@ -654,2 +719,18 @@ const windowDuration = ms(window);

}
/**
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
* costs than `slidingLogs` and improved boundary behavior by calcualting a
* weighted score between two windows.
*
* **Pro:**
*
* Good performance allows this to scale to very high loads.
*
* **Con:**
*
* Nothing major.
*
* @param tokens - How many requests a user can make in each time window.
* @param window - The duration in which the user can max X requests.
*/
static slidingWindow(tokens, window) {

@@ -656,0 +737,0 @@ const windowSize = ms(window);

{
"name": "@upstash/ratelimit",
"version": "0.3.2",
"version": "0.3.3",
"main": "./dist/index.js",

@@ -27,3 +27,3 @@ "types": "./dist/index.d.ts",

"@types/jest": "^29.2.5",
"@types/node": "^18.11.18",
"@types/node": "^18.13.0",
"@upstash/redis": "^1.19.3",

@@ -37,2 +37,5 @@ "dotenv-cli": "^7.0.0",

},
"dependencies": {
"@upstash/core-analytics": "0.0.0-canary.5"
},
"scripts": {

@@ -39,0 +42,0 @@ "build": "tsup",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet