@upstash/ratelimit
Advanced tools
Comparing version 0.4.1 to 0.4.2
@@ -29,26 +29,2 @@ "use strict"; | ||
// src/duration.ts | ||
function ms(d) { | ||
const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/); | ||
if (!match) { | ||
throw new Error(`Unable to parse window size: ${d}`); | ||
} | ||
const time = parseInt(match[1]); | ||
const unit = match[2]; | ||
switch (unit) { | ||
case "ms": | ||
return time; | ||
case "s": | ||
return time * 1e3; | ||
case "m": | ||
return time * 1e3 * 60; | ||
case "h": | ||
return time * 1e3 * 60 * 60; | ||
case "d": | ||
return time * 1e3 * 60 * 60 * 24; | ||
default: | ||
throw new Error(`Unable to parse window size: ${d}`); | ||
} | ||
} | ||
// src/analytics.ts | ||
@@ -151,2 +127,26 @@ var import_core_analytics = require("@upstash/core-analytics"); | ||
// src/duration.ts | ||
function ms(d) { | ||
const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/); | ||
if (!match) { | ||
throw new Error(`Unable to parse window size: ${d}`); | ||
} | ||
const time = parseInt(match[1]); | ||
const unit = match[2]; | ||
switch (unit) { | ||
case "ms": | ||
return time; | ||
case "s": | ||
return time * 1e3; | ||
case "m": | ||
return time * 1e3 * 60; | ||
case "h": | ||
return time * 1e3 * 60 * 60; | ||
case "d": | ||
return time * 1e3 * 60 * 60 * 24; | ||
default: | ||
throw new Error(`Unable to parse window size: ${d}`); | ||
} | ||
} | ||
// src/ratelimit.ts | ||
@@ -164,3 +164,3 @@ var Ratelimit = class { | ||
this.prefix = config.prefix ?? "@upstash/ratelimit"; | ||
this.analytics = config.analytics !== false ? new Analytics({ | ||
this.analytics = config.analytics ? new Analytics({ | ||
redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis, | ||
@@ -241,3 +241,3 @@ prefix: this.prefix | ||
* | ||
* This method returns a promsie that resolves as soon as the request may be processed | ||
* This method returns a promise that resolves as soon as the request may be processed | ||
* or after the timeoue has been reached. | ||
@@ -285,2 +285,220 @@ * | ||
// src/multi.ts | ||
function randomId() { | ||
let result = ""; | ||
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; | ||
const charactersLength = characters.length; | ||
for (let i = 0; i < 16; i++) { | ||
result += characters.charAt(Math.floor(Math.random() * charactersLength)); | ||
} | ||
return result; | ||
} | ||
var MultiRegionRatelimit = class extends Ratelimit { | ||
/** | ||
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice. | ||
*/ | ||
constructor(config) { | ||
super({ | ||
prefix: config.prefix, | ||
limiter: config.limiter, | ||
timeout: config.timeout, | ||
analytics: config.analytics, | ||
ctx: { | ||
redis: config.redis, | ||
cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0 | ||
} | ||
}); | ||
} | ||
/** | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static fixedWindow(tokens, window) { | ||
const windowDuration = ms(window); | ||
const script = ` | ||
local key = KEYS[1] | ||
local id = ARGV[1] | ||
local window = ARGV[2] | ||
redis.call("SADD", key, id) | ||
local members = redis.call("SMEMBERS", key) | ||
if #members == 1 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", key, window) | ||
end | ||
return members | ||
`; | ||
return async function(ctx, identifier) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const requestID = randomId(); | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(script, [key], [requestID, windowDuration]) | ||
})); | ||
const firstResponse = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = firstResponse.length; | ||
const remaining = tokens - usedTokens - 1; | ||
async function sync() { | ||
const individualIDs = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values()); | ||
for (const db of dbs) { | ||
const ids = await db.request; | ||
if (ids.length >= tokens) { | ||
continue; | ||
} | ||
const diff = allIDs.filter((id) => !ids.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
await db.redis.sadd(key, ...allIDs); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (bucket + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
} | ||
/** | ||
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage | ||
* costs than `slidingLogs` and improved boundary behavior by calcualting a | ||
* weighted score between two windows. | ||
* | ||
* **Pro:** | ||
* | ||
* Good performance allows this to scale to very high loads. | ||
* | ||
* **Con:** | ||
* | ||
* Nothing major. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - The duration in which the user can max X requests. | ||
*/ | ||
static slidingWindow(tokens, window) { | ||
const windowSize = ms(window); | ||
const script = ` | ||
local currentKey = KEYS[1] -- identifier including prefixes | ||
local previousKey = KEYS[2] -- key of the previous bucket | ||
local tokens = tonumber(ARGV[1]) -- tokens per window | ||
local now = ARGV[2] -- current timestamp in milliseconds | ||
local window = ARGV[3] -- interval in milliseconds | ||
local requestID = ARGV[4] -- uuid for this request | ||
local currentMembers = redis.call("SMEMBERS", currentKey) | ||
local requestsInCurrentWindow = #currentMembers | ||
local previousMembers = redis.call("SMEMBERS", previousKey) | ||
local requestsInPreviousWindow = #previousMembers | ||
local percentageInCurrent = ( now % window) / window | ||
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then | ||
return {currentMembers, previousMembers} | ||
end | ||
redis.call("SADD", currentKey, requestID) | ||
table.insert(currentMembers, requestID) | ||
if requestsInCurrentWindow == 0 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second | ||
end | ||
return {currentMembers, previousMembers} | ||
`; | ||
const windowDuration = ms(window); | ||
return async function(ctx, identifier) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const requestID = randomId(); | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - windowSize; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID]) | ||
})); | ||
const percentageInCurrent = now % windowDuration / windowDuration; | ||
const [current, previous] = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = previous.length * (1 - percentageInCurrent) + current.length; | ||
const remaining = tokens - usedTokens; | ||
async function sync() { | ||
const [individualIDs] = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values()); | ||
for (const db of dbs) { | ||
const [ids] = await db.request; | ||
if (ids.length >= tokens) { | ||
continue; | ||
} | ||
const diff = allIDs.filter((id) => !ids.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
await db.redis.sadd(currentKey, ...allIDs); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (currentWindow + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
} | ||
}; | ||
// src/single.ts | ||
@@ -608,220 +826,2 @@ var RegionRatelimit = class extends Ratelimit { | ||
}; | ||
// src/multi.ts | ||
function randomId() { | ||
let result = ""; | ||
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; | ||
const charactersLength = characters.length; | ||
for (let i = 0; i < 16; i++) { | ||
result += characters.charAt(Math.floor(Math.random() * charactersLength)); | ||
} | ||
return result; | ||
} | ||
var MultiRegionRatelimit = class extends Ratelimit { | ||
/** | ||
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice. | ||
*/ | ||
constructor(config) { | ||
super({ | ||
prefix: config.prefix, | ||
limiter: config.limiter, | ||
timeout: config.timeout, | ||
analytics: config.analytics, | ||
ctx: { | ||
redis: config.redis, | ||
cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0 | ||
} | ||
}); | ||
} | ||
/** | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static fixedWindow(tokens, window) { | ||
const windowDuration = ms(window); | ||
const script = ` | ||
local key = KEYS[1] | ||
local id = ARGV[1] | ||
local window = ARGV[2] | ||
redis.call("SADD", key, id) | ||
local members = redis.call("SMEMBERS", key) | ||
if #members == 1 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", key, window) | ||
end | ||
return members | ||
`; | ||
return async function(ctx, identifier) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const requestID = randomId(); | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(script, [key], [requestID, windowDuration]) | ||
})); | ||
const firstResponse = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = firstResponse.length; | ||
const remaining = tokens - usedTokens - 1; | ||
async function sync() { | ||
const individualIDs = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values()); | ||
for (const db of dbs) { | ||
const ids = await db.request; | ||
if (ids.length >= tokens) { | ||
continue; | ||
} | ||
const diff = allIDs.filter((id) => !ids.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
await db.redis.sadd(key, ...allIDs); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (bucket + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
} | ||
/** | ||
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage | ||
* costs than `slidingLogs` and improved boundary behavior by calcualting a | ||
* weighted score between two windows. | ||
* | ||
* **Pro:** | ||
* | ||
* Good performance allows this to scale to very high loads. | ||
* | ||
* **Con:** | ||
* | ||
* Nothing major. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - The duration in which the user can max X requests. | ||
*/ | ||
static slidingWindow(tokens, window) { | ||
const windowSize = ms(window); | ||
const script = ` | ||
local currentKey = KEYS[1] -- identifier including prefixes | ||
local previousKey = KEYS[2] -- key of the previous bucket | ||
local tokens = tonumber(ARGV[1]) -- tokens per window | ||
local now = ARGV[2] -- current timestamp in milliseconds | ||
local window = ARGV[3] -- interval in milliseconds | ||
local requestID = ARGV[4] -- uuid for this request | ||
local currentMembers = redis.call("SMEMBERS", currentKey) | ||
local requestsInCurrentWindow = #currentMembers | ||
local previousMembers = redis.call("SMEMBERS", previousKey) | ||
local requestsInPreviousWindow = #previousMembers | ||
local percentageInCurrent = ( now % window) / window | ||
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then | ||
return {currentMembers, previousMembers} | ||
end | ||
redis.call("SADD", currentKey, requestID) | ||
table.insert(currentMembers, requestID) | ||
if requestsInCurrentWindow == 0 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second | ||
end | ||
return {currentMembers, previousMembers} | ||
`; | ||
const windowDuration = ms(window); | ||
return async function(ctx, identifier) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const requestID = randomId(); | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - windowSize; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID]) | ||
})); | ||
const percentageInCurrent = now % windowDuration / windowDuration; | ||
const [current, previous] = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = previous.length * (1 - percentageInCurrent) + current.length; | ||
const remaining = tokens - usedTokens; | ||
async function sync() { | ||
const [individualIDs] = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values()); | ||
for (const db of dbs) { | ||
const [ids] = await db.request; | ||
if (ids.length >= tokens) { | ||
continue; | ||
} | ||
const diff = allIDs.filter((id) => !ids.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
await db.redis.sadd(currentKey, ...allIDs); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (currentWindow + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
} | ||
}; | ||
// Annotate the CommonJS export names for ESM import in node: | ||
@@ -828,0 +828,0 @@ 0 && (module.exports = { |
import { Redis } from '@upstash/redis'; | ||
type Geo = { | ||
country?: string; | ||
city?: string; | ||
region?: string; | ||
ip?: string; | ||
}; | ||
type Event = Geo & { | ||
identifier: string; | ||
time: number; | ||
success: boolean; | ||
}; | ||
type AnalyticsConfig = { | ||
redis: Redis; | ||
prefix?: string; | ||
}; | ||
/** | ||
* The Analytics package is experimental and can change at any time. | ||
*/ | ||
declare class Analytics { | ||
private readonly analytics; | ||
private readonly table; | ||
constructor(config: AnalyticsConfig); | ||
/** | ||
* Try to extract the geo information from the request | ||
* | ||
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties | ||
* @param req | ||
* @returns | ||
*/ | ||
extractGeo(req: { | ||
geo?: Geo; | ||
cf?: Geo; | ||
}): Geo; | ||
record(event: Event): Promise<void>; | ||
series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<({ | ||
time: number; | ||
} & Record<string, number>)[]>; | ||
getUsage(cutoff?: number): Promise<Record<string, { | ||
success: number; | ||
blocked: number; | ||
}>>; | ||
} | ||
type Unit = "ms" | "s" | "m" | "h" | "d"; | ||
@@ -73,45 +116,2 @@ type Duration = `${number} ${Unit}` | `${number}${Unit}`; | ||
type Geo = { | ||
country?: string; | ||
city?: string; | ||
region?: string; | ||
ip?: string; | ||
}; | ||
type Event = Geo & { | ||
identifier: string; | ||
time: number; | ||
success: boolean; | ||
}; | ||
type AnalyticsConfig = { | ||
redis: Redis; | ||
prefix?: string; | ||
}; | ||
/** | ||
* The Analytics package is experimental and can change at any time. | ||
*/ | ||
declare class Analytics { | ||
private readonly analytics; | ||
private readonly table; | ||
constructor(config: AnalyticsConfig); | ||
/** | ||
* Try to extract the geo information from the request | ||
* | ||
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties | ||
* @param req | ||
* @returns | ||
*/ | ||
extractGeo(req: { | ||
geo?: Geo; | ||
cf?: Geo; | ||
}): Geo; | ||
record(event: Event): Promise<void>; | ||
series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<({ | ||
time: number; | ||
} & Record<string, number>)[]>; | ||
getUsage(cutoff?: number): Promise<Record<string, { | ||
success: number; | ||
blocked: number; | ||
}>>; | ||
} | ||
type RatelimitConfig<TContext> = { | ||
@@ -165,3 +165,3 @@ /** | ||
* | ||
* @default true | ||
* @default false | ||
*/ | ||
@@ -217,3 +217,3 @@ analytics?: boolean; | ||
* | ||
* This method returns a promsie that resolves as soon as the request may be processed | ||
* This method returns a promise that resolves as soon as the request may be processed | ||
* or after the timeoue has been reached. | ||
@@ -240,8 +240,8 @@ * | ||
type RegionRatelimitConfig = { | ||
type MultiRegionRatelimitConfig = { | ||
/** | ||
* Instance of `@upstash/redis` | ||
* Instances of `@upstash/redis` | ||
* @see https://github.com/upstash/upstash-redis#quick-start | ||
*/ | ||
redis: Redis; | ||
redis: Redis[]; | ||
/** | ||
@@ -252,8 +252,5 @@ * The ratelimiter function to use. | ||
* Available algorithms are exposed via static methods: | ||
* - Ratelimiter.fixedWindow | ||
* - Ratelimiter.slidingLogs | ||
* - Ratelimiter.slidingWindow | ||
* - Ratelimiter.tokenBucket | ||
* - MultiRegionRatelimit.fixedWindow | ||
*/ | ||
limiter: Algorithm<RegionContext>; | ||
limiter: Algorithm<MultiRegionContext>; | ||
/** | ||
@@ -279,3 +276,3 @@ * All keys in redis are prefixed with this. | ||
* If left undefined, a map is created automatically, but it can only work | ||
* if the map or the ratelimit instance is created outside your serverless function handler. | ||
* if the map or th ratelimit instance is created outside your serverless function handler. | ||
*/ | ||
@@ -302,7 +299,7 @@ ephemeralCache?: Map<string, number> | false; | ||
* ```ts | ||
* const { limit } = new Ratelimit({ | ||
* const { limit } = new MultiRegionRatelimit({ | ||
* redis: Redis.fromEnv(), | ||
* limiter: Ratelimit.slidingWindow( | ||
* limiter: MultiRegionRatelimit.fixedWindow( | ||
* 10, // Allow 10 requests per window of 30 minutes | ||
* "30 m", // interval of 30 minutes | ||
* 10, // Allow 10 requests per window of 30 minutes | ||
* ) | ||
@@ -313,7 +310,7 @@ * }) | ||
*/ | ||
declare class RegionRatelimit extends Ratelimit<RegionContext> { | ||
declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> { | ||
/** | ||
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice. | ||
*/ | ||
constructor(config: RegionRatelimitConfig); | ||
constructor(config: MultiRegionRatelimitConfig); | ||
/** | ||
@@ -345,3 +342,3 @@ * Each requests inside a fixed time increases a counter. | ||
*/ | ||
window: Duration): Algorithm<RegionContext>; | ||
window: Duration): Algorithm<MultiRegionContext>; | ||
/** | ||
@@ -371,74 +368,11 @@ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage | ||
*/ | ||
window: Duration): Algorithm<RegionContext>; | ||
/** | ||
* You have a bucket filled with `{maxTokens}` tokens that refills constantly | ||
* at `{refillRate}` per `{interval}`. | ||
* Every request will remove one token from the bucket and if there is no | ||
* token to take, the request is rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Bursts of requests are smoothed out and you can process them at a constant | ||
* rate. | ||
* - Allows to set a higher initial burst limit by setting `maxTokens` higher | ||
* than `refillRate` | ||
*/ | ||
static tokenBucket( | ||
/** | ||
* How many tokens are refilled per `interval` | ||
* | ||
* An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds. | ||
*/ | ||
refillRate: number, | ||
/** | ||
* The interval for the `refillRate` | ||
*/ | ||
interval: Duration, | ||
/** | ||
* Maximum number of tokens. | ||
* A newly created bucket starts with this many tokens. | ||
* Useful to allow higher burst limits. | ||
*/ | ||
maxTokens: number): Algorithm<RegionContext>; | ||
/** | ||
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates | ||
* it asynchronously. | ||
* This is experimental and not yet recommended for production use. | ||
* | ||
* @experimental | ||
* | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static cachedFixedWindow( | ||
/** | ||
* How many requests are allowed per window. | ||
*/ | ||
tokens: number, | ||
/** | ||
* The duration in which `tokens` requests are allowed. | ||
*/ | ||
window: Duration): Algorithm<RegionContext>; | ||
window: Duration): Algorithm<MultiRegionContext>; | ||
} | ||
type MultiRegionRatelimitConfig = { | ||
type RegionRatelimitConfig = { | ||
/** | ||
* Instances of `@upstash/redis` | ||
* Instance of `@upstash/redis` | ||
* @see https://github.com/upstash/upstash-redis#quick-start | ||
*/ | ||
redis: Redis[]; | ||
redis: Redis; | ||
/** | ||
@@ -449,5 +383,8 @@ * The ratelimiter function to use. | ||
* Available algorithms are exposed via static methods: | ||
* - MultiRegionRatelimit.fixedWindow | ||
* - Ratelimiter.fixedWindow | ||
* - Ratelimiter.slidingLogs | ||
* - Ratelimiter.slidingWindow | ||
* - Ratelimiter.tokenBucket | ||
*/ | ||
limiter: Algorithm<MultiRegionContext>; | ||
limiter: Algorithm<RegionContext>; | ||
/** | ||
@@ -473,3 +410,3 @@ * All keys in redis are prefixed with this. | ||
* If left undefined, a map is created automatically, but it can only work | ||
* if the map or th ratelimit instance is created outside your serverless function handler. | ||
* if the map or the ratelimit instance is created outside your serverless function handler. | ||
*/ | ||
@@ -496,7 +433,7 @@ ephemeralCache?: Map<string, number> | false; | ||
* ```ts | ||
* const { limit } = new MultiRegionRatelimit({ | ||
* const { limit } = new Ratelimit({ | ||
* redis: Redis.fromEnv(), | ||
* limiter: MultiRegionRatelimit.fixedWindow( | ||
* limiter: Ratelimit.slidingWindow( | ||
* "30 m", // interval of 30 minutes | ||
* 10, // Allow 10 requests per window of 30 minutes | ||
* "30 m", // interval of 30 minutes | ||
* ) | ||
@@ -507,7 +444,7 @@ * }) | ||
*/ | ||
declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> { | ||
declare class RegionRatelimit extends Ratelimit<RegionContext> { | ||
/** | ||
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice. | ||
*/ | ||
constructor(config: MultiRegionRatelimitConfig); | ||
constructor(config: RegionRatelimitConfig); | ||
/** | ||
@@ -539,3 +476,3 @@ * Each requests inside a fixed time increases a counter. | ||
*/ | ||
window: Duration): Algorithm<MultiRegionContext>; | ||
window: Duration): Algorithm<RegionContext>; | ||
/** | ||
@@ -565,5 +502,68 @@ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage | ||
*/ | ||
window: Duration): Algorithm<MultiRegionContext>; | ||
window: Duration): Algorithm<RegionContext>; | ||
/** | ||
* You have a bucket filled with `{maxTokens}` tokens that refills constantly | ||
* at `{refillRate}` per `{interval}`. | ||
* Every request will remove one token from the bucket and if there is no | ||
* token to take, the request is rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Bursts of requests are smoothed out and you can process them at a constant | ||
* rate. | ||
* - Allows to set a higher initial burst limit by setting `maxTokens` higher | ||
* than `refillRate` | ||
*/ | ||
static tokenBucket( | ||
/** | ||
* How many tokens are refilled per `interval` | ||
* | ||
* An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds. | ||
*/ | ||
refillRate: number, | ||
/** | ||
* The interval for the `refillRate` | ||
*/ | ||
interval: Duration, | ||
/** | ||
* Maximum number of tokens. | ||
* A newly created bucket starts with this many tokens. | ||
* Useful to allow higher burst limits. | ||
*/ | ||
maxTokens: number): Algorithm<RegionContext>; | ||
/** | ||
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates | ||
* it asynchronously. | ||
* This is experimental and not yet recommended for production use. | ||
* | ||
* @experimental | ||
* | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static cachedFixedWindow( | ||
/** | ||
* How many requests are allowed per window. | ||
*/ | ||
tokens: number, | ||
/** | ||
* The duration in which `tokens` requests are allowed. | ||
*/ | ||
window: Duration): Algorithm<RegionContext>; | ||
} | ||
export { Algorithm, Analytics, AnalyticsConfig, MultiRegionRatelimit, MultiRegionRatelimitConfig, RegionRatelimit as Ratelimit, RegionRatelimitConfig as RatelimitConfig }; |
@@ -29,26 +29,2 @@ "use strict"; | ||
// src/duration.ts | ||
function ms(d) { | ||
const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/); | ||
if (!match) { | ||
throw new Error(`Unable to parse window size: ${d}`); | ||
} | ||
const time = parseInt(match[1]); | ||
const unit = match[2]; | ||
switch (unit) { | ||
case "ms": | ||
return time; | ||
case "s": | ||
return time * 1e3; | ||
case "m": | ||
return time * 1e3 * 60; | ||
case "h": | ||
return time * 1e3 * 60 * 60; | ||
case "d": | ||
return time * 1e3 * 60 * 60 * 24; | ||
default: | ||
throw new Error(`Unable to parse window size: ${d}`); | ||
} | ||
} | ||
// src/analytics.ts | ||
@@ -151,2 +127,26 @@ var import_core_analytics = require("@upstash/core-analytics"); | ||
// src/duration.ts | ||
function ms(d) { | ||
const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/); | ||
if (!match) { | ||
throw new Error(`Unable to parse window size: ${d}`); | ||
} | ||
const time = parseInt(match[1]); | ||
const unit = match[2]; | ||
switch (unit) { | ||
case "ms": | ||
return time; | ||
case "s": | ||
return time * 1e3; | ||
case "m": | ||
return time * 1e3 * 60; | ||
case "h": | ||
return time * 1e3 * 60 * 60; | ||
case "d": | ||
return time * 1e3 * 60 * 60 * 24; | ||
default: | ||
throw new Error(`Unable to parse window size: ${d}`); | ||
} | ||
} | ||
// src/ratelimit.ts | ||
@@ -164,3 +164,3 @@ var Ratelimit = class { | ||
this.prefix = config.prefix ?? "@upstash/ratelimit"; | ||
this.analytics = config.analytics !== false ? new Analytics({ | ||
this.analytics = config.analytics ? new Analytics({ | ||
redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis, | ||
@@ -241,3 +241,3 @@ prefix: this.prefix | ||
* | ||
* This method returns a promsie that resolves as soon as the request may be processed | ||
* This method returns a promise that resolves as soon as the request may be processed | ||
* or after the timeoue has been reached. | ||
@@ -285,2 +285,220 @@ * | ||
// src/multi.ts | ||
function randomId() { | ||
let result = ""; | ||
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; | ||
const charactersLength = characters.length; | ||
for (let i = 0; i < 16; i++) { | ||
result += characters.charAt(Math.floor(Math.random() * charactersLength)); | ||
} | ||
return result; | ||
} | ||
var MultiRegionRatelimit = class extends Ratelimit { | ||
/** | ||
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice. | ||
*/ | ||
constructor(config) { | ||
super({ | ||
prefix: config.prefix, | ||
limiter: config.limiter, | ||
timeout: config.timeout, | ||
analytics: config.analytics, | ||
ctx: { | ||
redis: config.redis, | ||
cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0 | ||
} | ||
}); | ||
} | ||
/** | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static fixedWindow(tokens, window) { | ||
const windowDuration = ms(window); | ||
const script = ` | ||
local key = KEYS[1] | ||
local id = ARGV[1] | ||
local window = ARGV[2] | ||
redis.call("SADD", key, id) | ||
local members = redis.call("SMEMBERS", key) | ||
if #members == 1 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", key, window) | ||
end | ||
return members | ||
`; | ||
return async function(ctx, identifier) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const requestID = randomId(); | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(script, [key], [requestID, windowDuration]) | ||
})); | ||
const firstResponse = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = firstResponse.length; | ||
const remaining = tokens - usedTokens - 1; | ||
async function sync() { | ||
const individualIDs = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values()); | ||
for (const db of dbs) { | ||
const ids = await db.request; | ||
if (ids.length >= tokens) { | ||
continue; | ||
} | ||
const diff = allIDs.filter((id) => !ids.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
await db.redis.sadd(key, ...allIDs); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (bucket + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
} | ||
/** | ||
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage | ||
* costs than `slidingLogs` and improved boundary behavior by calcualting a | ||
* weighted score between two windows. | ||
* | ||
* **Pro:** | ||
* | ||
* Good performance allows this to scale to very high loads. | ||
* | ||
* **Con:** | ||
* | ||
* Nothing major. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - The duration in which the user can max X requests. | ||
*/ | ||
static slidingWindow(tokens, window) { | ||
const windowSize = ms(window); | ||
const script = ` | ||
local currentKey = KEYS[1] -- identifier including prefixes | ||
local previousKey = KEYS[2] -- key of the previous bucket | ||
local tokens = tonumber(ARGV[1]) -- tokens per window | ||
local now = ARGV[2] -- current timestamp in milliseconds | ||
local window = ARGV[3] -- interval in milliseconds | ||
local requestID = ARGV[4] -- uuid for this request | ||
local currentMembers = redis.call("SMEMBERS", currentKey) | ||
local requestsInCurrentWindow = #currentMembers | ||
local previousMembers = redis.call("SMEMBERS", previousKey) | ||
local requestsInPreviousWindow = #previousMembers | ||
local percentageInCurrent = ( now % window) / window | ||
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then | ||
return {currentMembers, previousMembers} | ||
end | ||
redis.call("SADD", currentKey, requestID) | ||
table.insert(currentMembers, requestID) | ||
if requestsInCurrentWindow == 0 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second | ||
end | ||
return {currentMembers, previousMembers} | ||
`; | ||
const windowDuration = ms(window); | ||
return async function(ctx, identifier) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const requestID = randomId(); | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - windowSize; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID]) | ||
})); | ||
const percentageInCurrent = now % windowDuration / windowDuration; | ||
const [current, previous] = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = previous.length * (1 - percentageInCurrent) + current.length; | ||
const remaining = tokens - usedTokens; | ||
async function sync() { | ||
const [individualIDs] = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values()); | ||
for (const db of dbs) { | ||
const [ids] = await db.request; | ||
if (ids.length >= tokens) { | ||
continue; | ||
} | ||
const diff = allIDs.filter((id) => !ids.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
await db.redis.sadd(currentKey, ...allIDs); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (currentWindow + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
} | ||
}; | ||
// src/single.ts | ||
@@ -608,220 +826,2 @@ var RegionRatelimit = class extends Ratelimit { | ||
}; | ||
// src/multi.ts | ||
function randomId() { | ||
let result = ""; | ||
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; | ||
const charactersLength = characters.length; | ||
for (let i = 0; i < 16; i++) { | ||
result += characters.charAt(Math.floor(Math.random() * charactersLength)); | ||
} | ||
return result; | ||
} | ||
var MultiRegionRatelimit = class extends Ratelimit { | ||
/** | ||
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice. | ||
*/ | ||
constructor(config) { | ||
super({ | ||
prefix: config.prefix, | ||
limiter: config.limiter, | ||
timeout: config.timeout, | ||
analytics: config.analytics, | ||
ctx: { | ||
redis: config.redis, | ||
cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0 | ||
} | ||
}); | ||
} | ||
/** | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static fixedWindow(tokens, window) { | ||
const windowDuration = ms(window); | ||
const script = ` | ||
local key = KEYS[1] | ||
local id = ARGV[1] | ||
local window = ARGV[2] | ||
redis.call("SADD", key, id) | ||
local members = redis.call("SMEMBERS", key) | ||
if #members == 1 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", key, window) | ||
end | ||
return members | ||
`; | ||
return async function(ctx, identifier) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const requestID = randomId(); | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(script, [key], [requestID, windowDuration]) | ||
})); | ||
const firstResponse = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = firstResponse.length; | ||
const remaining = tokens - usedTokens - 1; | ||
async function sync() { | ||
const individualIDs = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values()); | ||
for (const db of dbs) { | ||
const ids = await db.request; | ||
if (ids.length >= tokens) { | ||
continue; | ||
} | ||
const diff = allIDs.filter((id) => !ids.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
await db.redis.sadd(key, ...allIDs); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (bucket + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
} | ||
/** | ||
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage | ||
* costs than `slidingLogs` and improved boundary behavior by calcualting a | ||
* weighted score between two windows. | ||
* | ||
* **Pro:** | ||
* | ||
* Good performance allows this to scale to very high loads. | ||
* | ||
* **Con:** | ||
* | ||
* Nothing major. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - The duration in which the user can max X requests. | ||
*/ | ||
static slidingWindow(tokens, window) { | ||
const windowSize = ms(window); | ||
const script = ` | ||
local currentKey = KEYS[1] -- identifier including prefixes | ||
local previousKey = KEYS[2] -- key of the previous bucket | ||
local tokens = tonumber(ARGV[1]) -- tokens per window | ||
local now = ARGV[2] -- current timestamp in milliseconds | ||
local window = ARGV[3] -- interval in milliseconds | ||
local requestID = ARGV[4] -- uuid for this request | ||
local currentMembers = redis.call("SMEMBERS", currentKey) | ||
local requestsInCurrentWindow = #currentMembers | ||
local previousMembers = redis.call("SMEMBERS", previousKey) | ||
local requestsInPreviousWindow = #previousMembers | ||
local percentageInCurrent = ( now % window) / window | ||
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then | ||
return {currentMembers, previousMembers} | ||
end | ||
redis.call("SADD", currentKey, requestID) | ||
table.insert(currentMembers, requestID) | ||
if requestsInCurrentWindow == 0 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second | ||
end | ||
return {currentMembers, previousMembers} | ||
`; | ||
const windowDuration = ms(window); | ||
return async function(ctx, identifier) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const requestID = randomId(); | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - windowSize; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID]) | ||
})); | ||
const percentageInCurrent = now % windowDuration / windowDuration; | ||
const [current, previous] = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = previous.length * (1 - percentageInCurrent) + current.length; | ||
const remaining = tokens - usedTokens; | ||
async function sync() { | ||
const [individualIDs] = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values()); | ||
for (const db of dbs) { | ||
const [ids] = await db.request; | ||
if (ids.length >= tokens) { | ||
continue; | ||
} | ||
const diff = allIDs.filter((id) => !ids.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
await db.redis.sadd(currentKey, ...allIDs); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (currentWindow + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
} | ||
}; | ||
// Annotate the CommonJS export names for ESM import in node: | ||
@@ -828,0 +828,0 @@ 0 && (module.exports = { |
{ | ||
"name": "@upstash/ratelimit", | ||
"version": "0.4.1", | ||
"version": "0.4.2", | ||
"main": "./dist/index.js", | ||
@@ -27,4 +27,4 @@ "types": "./dist/index.d.ts", | ||
"@types/jest": "^29.5.0", | ||
"@types/node": "^18.15.10", | ||
"@upstash/redis": "^1.20.1", | ||
"@types/node": "^18.15.11", | ||
"@upstash/redis": "^1.20.2", | ||
"dotenv-cli": "^7.1.0", | ||
@@ -31,0 +31,0 @@ "jest": "^29.5.0", |
@@ -1,2 +0,2 @@ | ||
# Upstash RateLimit | ||
# Upstash Rate Limit | ||
@@ -89,3 +89,9 @@ [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml) | ||
limiter: Ratelimit.slidingWindow(10, "10 s"), | ||
analytics: true | ||
analytics: true, | ||
/** | ||
* Optional prefix for the keys used in redis. This is useful if you want to share a redis | ||
* instance with other applications and want to avoid key collisions. The default prefix is | ||
* "@upstash/ratelimit" | ||
*/ | ||
prefix: "@upstash/ratelimit", | ||
}); | ||
@@ -228,2 +234,37 @@ | ||
## Using multiple limits | ||
Sometimes you might want to apply different limits to different users. For example you might want to allow 10 requests per 10 seconds for free users, but 60 requests per 10 seconds for paid users. | ||
Here's how you could do that: | ||
```ts | ||
import { Redis } from "@upstash/redis" | ||
import { Ratelimit from "@upstash/ratelimit" | ||
const redis = Redis.fromEnv() | ||
const ratelimit = { | ||
free: new Ratelimit({ | ||
redis, | ||
analytics: true, | ||
prefix: "ratelimit:free", | ||
limiter: Ratelimit.slidingWindow(10, "10s"), | ||
}), | ||
paid: new Ratelimit({ | ||
redis, | ||
analytics: true, | ||
prefix: "ratelimit:paid", | ||
limiter: Ratelimit.slidingWindow(60, "10s"), | ||
}) | ||
} | ||
await ratelimit.free.limit(ip) | ||
// or for a paid user you might have an email or userId available: | ||
await ratelimit.paid.limit(userId) | ||
``` | ||
## MultiRegion replicated ratelimiting | ||
@@ -230,0 +271,0 @@ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
212932
483