@upstash/ratelimit
Advanced tools
Comparing version 1.0.3 to 1.1.0-canary-1
@@ -13,2 +13,4 @@ /** | ||
incr: (key: string) => number; | ||
pop: (key: string) => void; | ||
empty: () => void; | ||
} | ||
@@ -43,2 +45,3 @@ type RegionContext = { | ||
* For the MultiRegion setup we do some synchronizing in the background, after returning the current limit. | ||
* Or when analytics is enabled, we send the analytics asynchronously after returning the limit. | ||
* In most case you can simply ignore this. | ||
@@ -48,16 +51,11 @@ * | ||
* | ||
* **Vercel Edge:** | ||
* https://nextjs.org/docs/api-reference/next/server#nextfetchevent | ||
* | ||
* ```ts | ||
* const { pending } = await ratelimit.limit("id") | ||
* event.waitUntil(pending) | ||
* context.waitUntil(pending) | ||
* ``` | ||
* | ||
* **Cloudflare Worker:** | ||
* https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker | ||
* | ||
* ```ts | ||
* const { pending } = await ratelimit.limit("id") | ||
* context.waitUntil(pending) | ||
* See `waitUntil` documentation in | ||
* [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil) | ||
* and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil) | ||
* for more details. | ||
* ``` | ||
@@ -67,5 +65,9 @@ */ | ||
}; | ||
type Algorithm<TContext> = (ctx: TContext, identifier: string, rate?: number, opts?: { | ||
cache?: EphemeralCache; | ||
}) => Promise<RatelimitResponse>; | ||
type Algorithm<TContext> = () => { | ||
limit: (ctx: TContext, identifier: string, rate?: number, opts?: { | ||
cache?: EphemeralCache; | ||
}) => Promise<RatelimitResponse>; | ||
getRemaining: (ctx: TContext, identifier: string) => Promise<number>; | ||
resetTokens: (ctx: TContext, identifier: string) => void; | ||
}; | ||
/** | ||
@@ -264,2 +266,4 @@ * This is all we need from the redis sdk. | ||
blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>; | ||
resetUsedTokens: (identifier: string) => Promise<void>; | ||
getRemaining: (identifier: string) => Promise<number>; | ||
} | ||
@@ -266,0 +270,0 @@ |
@@ -125,2 +125,8 @@ "use strict"; | ||
} | ||
pop(key) { | ||
this.cache.delete(key); | ||
} | ||
empty() { | ||
this.cache.clear(); | ||
} | ||
}; | ||
@@ -153,3 +159,3 @@ | ||
// src/lua-scripts/multi.ts | ||
var fixedWindowScript = ` | ||
var fixedWindowLimitScript = ` | ||
local key = KEYS[1] | ||
@@ -170,3 +176,11 @@ local id = ARGV[1] | ||
`; | ||
var slidingWindowScript = ` | ||
var fixedWindowRemainingTokensScript = ` | ||
local key = KEYS[1] | ||
local tokens = 0 | ||
local fields = redis.call("HGETALL", key) | ||
return fields | ||
`; | ||
var slidingWindowLimitScript = ` | ||
local currentKey = KEYS[1] -- identifier including prefixes | ||
@@ -206,3 +220,51 @@ local previousKey = KEYS[2] -- key of the previous bucket | ||
`; | ||
var slidingWindowRemainingTokensScript = ` | ||
local currentKey = KEYS[1] -- identifier including prefixes | ||
local previousKey = KEYS[2] -- key of the previous bucket | ||
local now = ARGV[1] -- current timestamp in milliseconds | ||
local window = ARGV[2] -- interval in milliseconds | ||
local currentFields = redis.call("HGETALL", currentKey) | ||
local requestsInCurrentWindow = 0 | ||
for i = 2, #currentFields, 2 do | ||
requestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i]) | ||
end | ||
local previousFields = redis.call("HGETALL", previousKey) | ||
local requestsInPreviousWindow = 0 | ||
for i = 2, #previousFields, 2 do | ||
requestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i]) | ||
end | ||
local percentageInCurrent = ( now % window) / window | ||
requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow) | ||
return requestsInCurrentWindow + requestsInPreviousWindow | ||
`; | ||
// src/lua-scripts/reset.ts | ||
var resetScript = ` | ||
local pattern = KEYS[1] | ||
-- Initialize cursor to start from 0 | ||
local cursor = "0" | ||
repeat | ||
-- Scan for keys matching the pattern | ||
local scan_result = redis.call('SCAN', cursor, 'MATCH', pattern) | ||
-- Extract cursor for the next iteration | ||
cursor = scan_result[1] | ||
-- Extract keys from the scan result | ||
local keys = scan_result[2] | ||
for i=1, #keys do | ||
redis.call('DEL', keys[i]) | ||
end | ||
-- Continue scanning until cursor is 0 (end of keyspace) | ||
until cursor == "0" | ||
`; | ||
// src/ratelimit.ts | ||
@@ -270,3 +332,3 @@ var Ratelimit = class { | ||
try { | ||
const arr = [this.limiter(this.ctx, key, req?.rate)]; | ||
const arr = [this.limiter().limit(this.ctx, key, req?.rate)]; | ||
if (this.timeout > 0) { | ||
@@ -355,2 +417,10 @@ arr.push( | ||
}; | ||
resetUsedTokens = async (identifier) => { | ||
const pattern = [this.prefix, identifier].join(":"); | ||
await this.limiter().resetTokens(this.ctx, pattern); | ||
}; | ||
getRemaining = async (identifier) => { | ||
const pattern = [this.prefix, identifier].join(":"); | ||
return await this.limiter().getRemaining(this.ctx, pattern); | ||
}; | ||
}; | ||
@@ -404,87 +474,118 @@ | ||
const windowDuration = ms(window); | ||
return async (ctx, identifier, rate) => { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
return () => ({ | ||
async limit(ctx, identifier, rate) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
} | ||
const requestId = randomId(); | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval( | ||
fixedWindowScript, | ||
[key], | ||
[requestId, windowDuration, incrementBy] | ||
) | ||
})); | ||
const firstResponse = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = firstResponse.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
const remaining = tokens - usedTokens; | ||
async function sync() { | ||
const individualIDs = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from( | ||
new Set( | ||
individualIDs.flatMap((_) => _).reduce((acc, curr, index) => { | ||
const requestId = randomId(); | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval( | ||
fixedWindowLimitScript, | ||
[key], | ||
[requestId, windowDuration, incrementBy] | ||
) | ||
})); | ||
const firstResponse = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = firstResponse.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
const remaining = tokens - usedTokens; | ||
async function sync() { | ||
const individualIDs = await Promise.all(dbs.map((s) => s.request)); | ||
const allIDs = Array.from( | ||
new Set( | ||
individualIDs.flatMap((_) => _).reduce((acc, curr, index) => { | ||
if (index % 2 === 0) { | ||
acc.push(curr); | ||
} | ||
return acc; | ||
}, []) | ||
).values() | ||
); | ||
for (const db of dbs) { | ||
const usedDbTokens = (await db.request).reduce( | ||
(accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
return accTokens + parsedToken; | ||
}, | ||
0 | ||
); | ||
const dbIds = (await db.request).reduce((ids, currentId, index) => { | ||
if (index % 2 === 0) { | ||
acc.push(curr); | ||
ids.push(currentId); | ||
} | ||
return acc; | ||
}, []) | ||
).values() | ||
); | ||
for (const db of dbs) { | ||
const usedDbTokens = (await db.request).reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
return ids; | ||
}, []); | ||
if (usedDbTokens >= tokens) { | ||
continue; | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
const dbIds = (await db.request).reduce((ids, currentId, index) => { | ||
if (index % 2 === 0) { | ||
ids.push(currentId); | ||
const diff = allIDs.filter((id) => !dbIds.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
return ids; | ||
}, []); | ||
if (usedDbTokens >= tokens) { | ||
continue; | ||
for (const requestId2 of diff) { | ||
await db.redis.hset(key, { [requestId2]: incrementBy }); | ||
} | ||
} | ||
const diff = allIDs.filter((id) => !dbIds.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
const success = remaining > 0; | ||
const reset = (bucket + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}, | ||
async getRemaining(ctx, identifier) { | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval(fixedWindowRemainingTokensScript, [key], [null]) | ||
})); | ||
const firstResponse = await Promise.any(dbs.map((s) => s.request)); | ||
const usedTokens = firstResponse.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
for (const requestId2 of diff) { | ||
await db.redis.hset(key, { [requestId2]: incrementBy }); | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
return Math.max(0, tokens - usedTokens); | ||
}, | ||
async resetTokens(ctx, identifier) { | ||
const pattern = [identifier, "*"].join(":"); | ||
if (ctx.cache) { | ||
ctx.cache.pop(identifier); | ||
} | ||
for (const db of ctx.redis) { | ||
await db.eval(resetScript, [pattern], [null]); | ||
} | ||
} | ||
const success = remaining > 0; | ||
const reset = (bucket + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
}); | ||
} | ||
@@ -510,85 +611,114 @@ /** | ||
const windowDuration = ms(window); | ||
return async (ctx, identifier, rate) => { | ||
const requestId = randomId(); | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - 1; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval( | ||
slidingWindowScript, | ||
[currentKey, previousKey], | ||
[tokens, now, windowDuration, requestId, incrementBy] | ||
// lua seems to return `1` for true and `null` for false | ||
) | ||
})); | ||
const percentageInCurrent = now % windowDuration / windowDuration; | ||
const [current, previous, success] = await Promise.any(dbs.map((s) => s.request)); | ||
const previousUsedTokens = previous.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
const currentUsedTokens = current.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
const previousPartialUsed = previousUsedTokens * (1 - percentageInCurrent); | ||
const usedTokens = previousPartialUsed + currentUsedTokens; | ||
const remaining = tokens - usedTokens; | ||
async function sync() { | ||
const res = await Promise.all(dbs.map((s) => s.request)); | ||
const allCurrentIds = res.flatMap(([current2]) => current2).reduce((accCurrentIds, curr, index) => { | ||
if (index % 2 === 0) { | ||
accCurrentIds.push(curr); | ||
return () => ({ | ||
async limit(ctx, identifier, rate) { | ||
const requestId = randomId(); | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - 1; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval( | ||
slidingWindowLimitScript, | ||
[currentKey, previousKey], | ||
[tokens, now, windowDuration, requestId, incrementBy] | ||
// lua seems to return `1` for true and `null` for false | ||
) | ||
})); | ||
const percentageInCurrent = now % windowDuration / windowDuration; | ||
const [current, previous, success] = await Promise.any(dbs.map((s) => s.request)); | ||
const previousUsedTokens = previous.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
return accCurrentIds; | ||
}, []); | ||
for (const db of dbs) { | ||
const [_current, previous2, _success] = await db.request; | ||
const dbIds = previous2.reduce((ids, currentId, index) => { | ||
return accTokens + parsedToken; | ||
}, 0); | ||
const currentUsedTokens = current.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
const previousPartialUsed = previousUsedTokens * (1 - percentageInCurrent); | ||
const usedTokens = previousPartialUsed + currentUsedTokens; | ||
const remaining = tokens - usedTokens; | ||
async function sync() { | ||
const res = await Promise.all(dbs.map((s) => s.request)); | ||
const allCurrentIds = res.flatMap(([current2]) => current2).reduce((accCurrentIds, curr, index) => { | ||
if (index % 2 === 0) { | ||
ids.push(currentId); | ||
accCurrentIds.push(curr); | ||
} | ||
return ids; | ||
return accCurrentIds; | ||
}, []); | ||
const usedDbTokens = previous2.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
for (const db of dbs) { | ||
const [_current, previous2, _success] = await db.request; | ||
const dbIds = previous2.reduce((ids, currentId, index) => { | ||
if (index % 2 === 0) { | ||
ids.push(currentId); | ||
} | ||
return ids; | ||
}, []); | ||
const usedDbTokens = previous2.reduce((accTokens, usedToken, index) => { | ||
let parsedToken = 0; | ||
if (index % 2) { | ||
parsedToken = Number.parseInt(usedToken); | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
if (usedDbTokens >= tokens) { | ||
continue; | ||
} | ||
return accTokens + parsedToken; | ||
}, 0); | ||
if (usedDbTokens >= tokens) { | ||
continue; | ||
const diff = allCurrentIds.filter((id) => !dbIds.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
for (const requestId2 of diff) { | ||
await db.redis.hset(currentKey, { [requestId2]: incrementBy }); | ||
} | ||
} | ||
const diff = allCurrentIds.filter((id) => !dbIds.includes(id)); | ||
if (diff.length === 0) { | ||
continue; | ||
} | ||
for (const requestId2 of diff) { | ||
await db.redis.hset(currentKey, { [requestId2]: incrementBy }); | ||
} | ||
} | ||
const reset = (currentWindow + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success: Boolean(success), | ||
limit: tokens, | ||
remaining: Math.max(0, remaining), | ||
reset, | ||
pending: sync() | ||
}; | ||
}, | ||
async getRemaining(ctx, identifier) { | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - 1; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
const dbs = ctx.redis.map((redis) => ({ | ||
redis, | ||
request: redis.eval( | ||
slidingWindowRemainingTokensScript, | ||
[currentKey, previousKey], | ||
[now, windowSize] | ||
// lua seems to return `1` for true and `null` for false | ||
) | ||
})); | ||
const usedTokens = await Promise.any(dbs.map((s) => s.request)); | ||
return Math.max(0, tokens - usedTokens); | ||
}, | ||
async resetTokens(ctx, identifier) { | ||
const pattern = [identifier, "*"].join(":"); | ||
if (ctx.cache) { | ||
ctx.cache.pop(identifier); | ||
} | ||
for (const db of ctx.redis) { | ||
await db.eval(resetScript, [pattern], [null]); | ||
} | ||
} | ||
const reset = (currentWindow + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success: Boolean(success), | ||
limit: tokens, | ||
remaining: Math.max(0, remaining), | ||
reset, | ||
pending: sync() | ||
}; | ||
}; | ||
}); | ||
} | ||
@@ -598,3 +728,3 @@ }; | ||
// src/lua-scripts/single.ts | ||
var fixedWindowScript2 = ` | ||
var fixedWindowLimitScript2 = ` | ||
local key = KEYS[1] | ||
@@ -605,3 +735,3 @@ local window = ARGV[1] | ||
local r = redis.call("INCRBY", key, incrementBy) | ||
if r == incrementBy then | ||
if r == tonumber(incrementBy) then | ||
-- The first time this key is set, the value will be equal to incrementBy. | ||
@@ -614,3 +744,13 @@ -- So we only need the expire command once | ||
`; | ||
var slidingWindowScript2 = ` | ||
var fixedWindowRemainingTokensScript2 = ` | ||
local key = KEYS[1] | ||
local tokens = 0 | ||
local value = redis.call('GET', key) | ||
if value then | ||
tokens = value | ||
end | ||
return tokens | ||
`; | ||
var slidingWindowLimitScript2 = ` | ||
local currentKey = KEYS[1] -- identifier including prefixes | ||
@@ -640,3 +780,3 @@ local previousKey = KEYS[2] -- key of the previous bucket | ||
local newValue = redis.call("INCRBY", currentKey, incrementBy) | ||
if newValue == incrementBy then | ||
if newValue == tonumber(incrementBy) then | ||
-- The first time this key is set, the value will be equal to incrementBy. | ||
@@ -648,3 +788,25 @@ -- So we only need the expire command once | ||
`; | ||
var tokenBucketScript = ` | ||
var slidingWindowRemainingTokensScript2 = ` | ||
local currentKey = KEYS[1] -- identifier including prefixes | ||
local previousKey = KEYS[2] -- key of the previous bucket | ||
local now = ARGV[1] -- current timestamp in milliseconds | ||
local window = ARGV[2] -- interval in milliseconds | ||
local requestsInCurrentWindow = redis.call("GET", currentKey) | ||
if requestsInCurrentWindow == false then | ||
requestsInCurrentWindow = 0 | ||
end | ||
local requestsInPreviousWindow = redis.call("GET", previousKey) | ||
if requestsInPreviousWindow == false then | ||
requestsInPreviousWindow = 0 | ||
end | ||
local percentageInCurrent = ( now % window ) / window | ||
-- weighted requests to consider from the previous window | ||
requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow) | ||
return requestsInPreviousWindow + requestsInCurrentWindow | ||
`; | ||
var tokenBucketLimitScript = ` | ||
local key = KEYS[1] -- identifier including prefixes | ||
@@ -688,3 +850,15 @@ local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens | ||
`; | ||
var cachedFixedWindowScript = ` | ||
var tokenBucketRemainingTokensScript = ` | ||
local key = KEYS[1] | ||
local maxTokens = tonumber(ARGV[1]) | ||
local bucket = redis.call("HMGET", key, "tokens") | ||
if bucket[1] == false then | ||
return maxTokens | ||
end | ||
return tonumber(bucket[1]) | ||
`; | ||
var cachedFixedWindowLimitScript = ` | ||
local key = KEYS[1] | ||
@@ -703,3 +877,13 @@ local window = ARGV[1] | ||
`; | ||
var cachedFixedWindowRemainingTokenScript = ` | ||
local key = KEYS[1] | ||
local tokens = 0 | ||
local value = redis.call('GET', key) | ||
if value then | ||
tokens = value | ||
end | ||
return tokens | ||
`; | ||
// src/single.ts | ||
@@ -742,37 +926,56 @@ var RegionRatelimit = class extends Ratelimit { | ||
const windowDuration = ms(window); | ||
return async (ctx, identifier, rate) => { | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
return () => ({ | ||
async limit(ctx, identifier, rate) { | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const usedTokensAfterUpdate = await ctx.redis.eval( | ||
fixedWindowLimitScript2, | ||
[key], | ||
[windowDuration, incrementBy] | ||
); | ||
const success = usedTokensAfterUpdate <= tokens; | ||
const remainingTokens = Math.max(0, tokens - usedTokensAfterUpdate); | ||
const reset = (bucket + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining: remainingTokens, | ||
reset, | ||
pending: Promise.resolve() | ||
}; | ||
}, | ||
async getRemaining(ctx, identifier) { | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const usedTokens = await ctx.redis.eval( | ||
fixedWindowRemainingTokensScript2, | ||
[key], | ||
[null] | ||
); | ||
return Math.max(0, tokens - usedTokens); | ||
}, | ||
async resetTokens(ctx, identifier) { | ||
const pattern = [identifier, "*"].join(":"); | ||
if (ctx.cache) { | ||
ctx.cache.pop(identifier); | ||
} | ||
await ctx.redis.eval(resetScript, [pattern], [null]); | ||
} | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const usedTokensAfterUpdate = await ctx.redis.eval( | ||
fixedWindowScript2, | ||
[key], | ||
[windowDuration, incrementBy] | ||
); | ||
const success = usedTokensAfterUpdate <= tokens; | ||
const remainingTokens = Math.max(0, tokens - usedTokensAfterUpdate); | ||
const reset = (bucket + 1) * windowDuration; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining: remainingTokens, | ||
reset, | ||
pending: Promise.resolve() | ||
}; | ||
}; | ||
}); | ||
} | ||
@@ -797,39 +1000,61 @@ /** | ||
const windowSize = ms(window); | ||
return async (ctx, identifier, rate) => { | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - 1; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
return () => ({ | ||
async limit(ctx, identifier, rate) { | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - 1; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: tokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const remainingTokens = await ctx.redis.eval( | ||
slidingWindowLimitScript2, | ||
[currentKey, previousKey], | ||
[tokens, now, windowSize, incrementBy] | ||
); | ||
const success = remainingTokens >= 0; | ||
const reset = (currentWindow + 1) * windowSize; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining: Math.max(0, remainingTokens), | ||
reset, | ||
pending: Promise.resolve() | ||
}; | ||
}, | ||
async getRemaining(ctx, identifier) { | ||
const now = Date.now(); | ||
const currentWindow = Math.floor(now / windowSize); | ||
const currentKey = [identifier, currentWindow].join(":"); | ||
const previousWindow = currentWindow - 1; | ||
const previousKey = [identifier, previousWindow].join(":"); | ||
const usedTokens = await ctx.redis.eval( | ||
slidingWindowRemainingTokensScript2, | ||
[currentKey, previousKey], | ||
[now, windowSize] | ||
); | ||
return Math.max(0, tokens - usedTokens); | ||
}, | ||
async resetTokens(ctx, identifier) { | ||
const pattern = [identifier, "*"].join(":"); | ||
if (ctx.cache) { | ||
ctx.cache.pop(identifier); | ||
} | ||
await ctx.redis.eval(resetScript, [pattern], [null]); | ||
} | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const remainingTokens = await ctx.redis.eval( | ||
slidingWindowScript2, | ||
[currentKey, previousKey], | ||
[tokens, now, windowSize, incrementBy] | ||
); | ||
const success = remainingTokens >= 0; | ||
const reset = (currentWindow + 1) * windowSize; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining: Math.max(0, remainingTokens), | ||
reset, | ||
pending: Promise.resolve() | ||
}; | ||
}; | ||
}); | ||
} | ||
@@ -851,34 +1076,51 @@ /** | ||
const intervalDuration = ms(interval); | ||
return async (ctx, identifier, rate) => { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: maxTokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
return () => ({ | ||
async limit(ctx, identifier, rate) { | ||
if (ctx.cache) { | ||
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier); | ||
if (blocked) { | ||
return { | ||
success: false, | ||
limit: maxTokens, | ||
remaining: 0, | ||
reset: reset2, | ||
pending: Promise.resolve() | ||
}; | ||
} | ||
} | ||
const now = Date.now(); | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const [remaining, reset] = await ctx.redis.eval( | ||
tokenBucketLimitScript, | ||
[identifier], | ||
[maxTokens, intervalDuration, refillRate, now, incrementBy] | ||
); | ||
const success = remaining >= 0; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: maxTokens, | ||
remaining, | ||
reset, | ||
pending: Promise.resolve() | ||
}; | ||
}, | ||
async getRemaining(ctx, identifier) { | ||
const remainingTokens = await ctx.redis.eval( | ||
tokenBucketRemainingTokensScript, | ||
[identifier], | ||
[maxTokens] | ||
); | ||
return remainingTokens; | ||
}, | ||
async resetTokens(ctx, identifier) { | ||
const pattern = identifier; | ||
if (ctx.cache) { | ||
ctx.cache.pop(identifier); | ||
} | ||
await ctx.redis.eval(resetScript, [pattern], [null]); | ||
} | ||
const now = Date.now(); | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const [remaining, reset] = await ctx.redis.eval( | ||
tokenBucketScript, | ||
[identifier], | ||
[maxTokens, intervalDuration, refillRate, now, incrementBy] | ||
); | ||
const success = remaining >= 0; | ||
if (ctx.cache && !success) { | ||
ctx.cache.blockUntil(identifier, reset); | ||
} | ||
return { | ||
success, | ||
limit: maxTokens, | ||
remaining, | ||
reset, | ||
pending: Promise.resolve() | ||
}; | ||
}; | ||
}); | ||
} | ||
@@ -911,40 +1153,68 @@ /** | ||
const windowDuration = ms(window); | ||
return async (ctx, identifier, rate) => { | ||
if (!ctx.cache) { | ||
throw new Error("This algorithm requires a cache"); | ||
} | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const reset = (bucket + 1) * windowDuration; | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const hit = typeof ctx.cache.get(key) === "number"; | ||
if (hit) { | ||
const cachedTokensAfterUpdate = ctx.cache.incr(key); | ||
const success = cachedTokensAfterUpdate < tokens; | ||
const pending = success ? ctx.redis.eval(cachedFixedWindowScript, [key], [windowDuration, incrementBy]).then((t) => { | ||
ctx.cache.set(key, t); | ||
}) : Promise.resolve(); | ||
return () => ({ | ||
async limit(ctx, identifier, rate) { | ||
if (!ctx.cache) { | ||
throw new Error("This algorithm requires a cache"); | ||
} | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const reset = (bucket + 1) * windowDuration; | ||
const incrementBy = rate ? Math.max(1, rate) : 1; | ||
const hit = typeof ctx.cache.get(key) === "number"; | ||
if (hit) { | ||
const cachedTokensAfterUpdate = ctx.cache.incr(key); | ||
const success = cachedTokensAfterUpdate < tokens; | ||
const pending = success ? ctx.redis.eval(cachedFixedWindowLimitScript, [key], [windowDuration, incrementBy]).then((t) => { | ||
ctx.cache.set(key, t); | ||
}) : Promise.resolve(); | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining: tokens - cachedTokensAfterUpdate, | ||
reset, | ||
pending | ||
}; | ||
} | ||
const usedTokensAfterUpdate = await ctx.redis.eval( | ||
cachedFixedWindowLimitScript, | ||
[key], | ||
[windowDuration, incrementBy] | ||
); | ||
ctx.cache.set(key, usedTokensAfterUpdate); | ||
const remaining = tokens - usedTokensAfterUpdate; | ||
return { | ||
success, | ||
success: remaining >= 0, | ||
limit: tokens, | ||
remaining: tokens - cachedTokensAfterUpdate, | ||
remaining, | ||
reset, | ||
pending | ||
pending: Promise.resolve() | ||
}; | ||
}, | ||
async getRemaining(ctx, identifier) { | ||
if (!ctx.cache) { | ||
throw new Error("This algorithm requires a cache"); | ||
} | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const hit = typeof ctx.cache.get(key) === "number"; | ||
if (hit) { | ||
const cachedUsedTokens = ctx.cache.get(key) ?? 0; | ||
return Math.max(0, tokens - cachedUsedTokens); | ||
} | ||
const usedTokens = await ctx.redis.eval( | ||
cachedFixedWindowRemainingTokenScript, | ||
[key], | ||
[null] | ||
); | ||
return Math.max(0, tokens - usedTokens); | ||
}, | ||
async resetTokens(ctx, identifier) { | ||
const pattern = [identifier, "*"].join(":"); | ||
if (!ctx.cache) { | ||
throw new Error("This algorithm requires a cache"); | ||
} | ||
ctx.cache.pop(identifier); | ||
await ctx.redis.eval(resetScript, [pattern], [null]); | ||
} | ||
const usedTokensAfterUpdate = await ctx.redis.eval( | ||
cachedFixedWindowScript, | ||
[key], | ||
[windowDuration, incrementBy] | ||
); | ||
ctx.cache.set(key, usedTokensAfterUpdate); | ||
const remaining = tokens - usedTokensAfterUpdate; | ||
return { | ||
success: remaining >= 0, | ||
limit: tokens, | ||
remaining, | ||
reset, | ||
pending: Promise.resolve() | ||
}; | ||
}; | ||
}); | ||
} | ||
@@ -951,0 +1221,0 @@ }; |
@@ -1,1 +0,1 @@ | ||
{ "name": "@upstash/ratelimit", "version": "v1.0.3", "main": "./dist/index.js", "types": "./dist/index.d.ts", "files": [ "dist" ], "scripts": { "build": "tsup", "test": "bun test src --coverage", "fmt": "bunx @biomejs/biome check --apply ./src" }, "devDependencies": { "@upstash/redis": "^1.28.3", "bun-types": "latest", "rome": "^11.0.0", "turbo": "^1.10.15", "tsup": "^7.2.0", "typescript": "^5.0.0" }, "dependencies": { "@upstash/core-analytics": "^0.0.7" }, "license": "MIT" } | ||
{ "name": "@upstash/ratelimit", "version": "v1.1.0-canary-1", "main": "./dist/index.js", "types": "./dist/index.d.ts", "files": [ "dist" ], "scripts": { "build": "tsup", "test": "bun test src --coverage", "fmt": "bunx @biomejs/biome check --apply ./src" }, "devDependencies": { "@upstash/redis": "^1.28.3", "bun-types": "latest", "rome": "^11.0.0", "turbo": "^1.10.15", "tsup": "^7.2.0", "typescript": "^5.0.0" }, "dependencies": { "@upstash/core-analytics": "^0.0.7" }, "license": "MIT" } |
# Upstash Rate Limit | ||
[![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml) | ||
![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit) | ||
[![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)](https://www.npmjs.com/package/ratelimit) | ||
> [!NOTE] | ||
> **This project is in GA Stage.** | ||
> [!NOTE] > **This project is in GA Stage.** | ||
> The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes. The Upstash team is committed to maintaining and improving its functionality. | ||
@@ -35,3 +34,3 @@ | ||
```ts | ||
import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest" | ||
import { Ratelimit } from "https://cdn.skypack.dev/@upstash/ratelimit@latest"; | ||
``` | ||
@@ -109,20 +108,16 @@ | ||
* For the MultiRegion setup we do some synchronizing in the background, after returning the current limit. | ||
* Or when analytics is enabled, we send the analytics asynchronously after returning the limit. | ||
* In most case you can simply ignore this. | ||
* | ||
* On Vercel Edge or Cloudflare workers, you need to explicitely handle the pending Promise like this: | ||
* On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this: | ||
* | ||
* **Vercel Edge:** | ||
* https://nextjs.org/docs/api-reference/next/server#nextfetchevent | ||
* | ||
* ```ts | ||
* const { pending } = await ratelimit.limit("id") | ||
* event.waitUntil(pending) | ||
* context.waitUntil(pending) | ||
* ``` | ||
* | ||
* **Cloudflare Worker:** | ||
* https://developers.cloudflare.com/workers/runtime-apis/fetch-event/#syntax-module-worker | ||
* | ||
* ```ts | ||
* const { pending } = await ratelimit.limit("id") | ||
* context.waitUntil(pending) | ||
* See `waitUntil` documentation in | ||
* [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil) | ||
* and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil) | ||
* for more details. | ||
* ``` | ||
@@ -134,3 +129,26 @@ */ | ||
### Using with CloudFlare Workers and Vercel Edge | ||
When we use CloudFlare Workers and Vercel Edge, we need to be careful about | ||
making sure that the rate limiting operations complete correctly before the runtime ends | ||
after returning the response. | ||
This is important in two cases where we do some operations in the backgroung asynchronously after `limit` is called: | ||
1. Using MultiRegion: synchronize Redis instances in different regions | ||
2. Enabling analytics: send analytics to Redis | ||
In these cases, we need to wait for these operations to finish before sending the response to the user. Otherwise, the runtime will end and we won't be able to complete our chores. | ||
In order to wait for these operations to finish, use the `pending` promise: | ||
```ts | ||
const { pending } = await ratelimit.limit("id"); | ||
context.waitUntil(pending); | ||
``` | ||
See `waitUntil` documentation in [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil) and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil) for more details. | ||
### Docs | ||
See [the documentation](https://upstash.com/docs/oss/sdks/ts/ratelimit/overview) for details. | ||
@@ -137,0 +155,0 @@ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Manifest confusion
Supply chain riskThis package has inconsistent metadata. This could be malicious or caused by an error when publishing the package.
Found 1 instance in 1 package
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
Manifest confusion
Supply chain riskThis package has inconsistent metadata. This could be malicious or caused by an error when publishing the package.
Found 1 instance in 1 package
272019
2858
165
2