@upstash/ratelimit
Advanced tools
Comparing version 0.1.5 to 0.2.0-rc.0
@@ -28,2 +28,14 @@ export class Cache { | ||
} | ||
set(key, value) { | ||
this.cache.set(key, value); | ||
} | ||
get(key) { | ||
return this.cache.get(key) || null; | ||
} | ||
incr(key) { | ||
let value = this.cache.get(key) ?? 0; | ||
value += 1; | ||
this.cache.set(key, value); | ||
return value; | ||
} | ||
} |
@@ -0,1 +1,2 @@ | ||
import * as dntShim from "./_dnt.shims.js"; | ||
import { ms } from "./duration.js"; | ||
@@ -27,2 +28,3 @@ import { Ratelimit } from "./ratelimit.js"; | ||
limiter: config.limiter, | ||
timeout: config.timeout, | ||
ctx: { | ||
@@ -92,3 +94,3 @@ redis: config.redis, | ||
} | ||
const requestID = crypto.randomUUID(); | ||
const requestID = dntShim.crypto.randomUUID(); | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
@@ -213,3 +215,3 @@ const key = [identifier, bucket].join(":"); | ||
} | ||
const requestID = crypto.randomUUID(); | ||
const requestID = dntShim.crypto.randomUUID(); | ||
const now = Date.now(); | ||
@@ -216,0 +218,0 @@ const currentWindow = Math.floor(now / windowSize); |
import { Cache } from "./cache.js"; | ||
export class TimeoutError extends Error { | ||
constructor() { | ||
super("Timeout"); | ||
this.name = "TimeoutError"; | ||
} | ||
} | ||
/** | ||
@@ -12,3 +18,3 @@ * Ratelimiter using serverless redis from https://upstash.com/ | ||
* "30 m", // interval of 30 minutes | ||
* ) | ||
* ), | ||
* }) | ||
@@ -38,2 +44,8 @@ * | ||
}); | ||
Object.defineProperty(this, "timeout", { | ||
enumerable: true, | ||
configurable: true, | ||
writable: true, | ||
value: void 0 | ||
}); | ||
/** | ||
@@ -64,3 +76,25 @@ * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit. | ||
const key = [this.prefix, identifier].join(":"); | ||
return await this.limiter(this.ctx, key); | ||
let timeoutId = null; | ||
try { | ||
const arr = [this.limiter(this.ctx, key)]; | ||
if (this.timeout) { | ||
arr.push(new Promise((resolve) => { | ||
timeoutId = setTimeout(() => { | ||
resolve({ | ||
success: true, | ||
limit: 0, | ||
remaining: 0, | ||
reset: 0, | ||
pending: Promise.resolve(), | ||
}); | ||
}, this.timeout); | ||
})); | ||
} | ||
return await Promise.race(arr); | ||
} | ||
finally { | ||
if (timeoutId) { | ||
clearTimeout(timeoutId); | ||
} | ||
} | ||
} | ||
@@ -131,2 +165,3 @@ }); | ||
this.limiter = config.limiter; | ||
this.timeout = config.timeout; | ||
this.prefix = config.prefix ?? "@upstash/ratelimit"; | ||
@@ -133,0 +168,0 @@ if (config.ephemeralCache instanceof Map) { |
@@ -26,2 +26,3 @@ import { ms } from "./duration.js"; | ||
limiter: config.limiter, | ||
timeout: config.timeout, | ||
ctx: { | ||
@@ -292,2 +293,85 @@ redis: config.redis, | ||
} | ||
/** | ||
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates | ||
* it asynchronously. | ||
* This is experimental and not yet recommended for production use. | ||
* | ||
* @experimental | ||
* | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static cachedFixedWindow( | ||
/** | ||
* How many requests are allowed per window. | ||
*/ | ||
tokens, | ||
/** | ||
* The duration in which `tokens` requests are allowed. | ||
*/ | ||
window) { | ||
const windowDuration = ms(window); | ||
const script = ` | ||
local key = KEYS[1] | ||
local window = ARGV[1] | ||
local r = redis.call("INCR", key) | ||
if r == 1 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", key, window) | ||
end | ||
return r | ||
`; | ||
return async function (ctx, identifier) { | ||
if (!ctx.cache) { | ||
throw new Error("This algorithm requires a cache"); | ||
} | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const reset = (bucket + 1) * windowDuration; | ||
const hit = typeof ctx.cache.get(key) === "number"; | ||
if (hit) { | ||
const cachedTokensAfterUpdate = ctx.cache.incr(key); | ||
const success = cachedTokensAfterUpdate < tokens; | ||
const pending = success | ||
? ctx.redis.eval(script, [key], [windowDuration]).then((t) => { | ||
ctx.cache.set(key, t); | ||
}) | ||
: Promise.resolve(); | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining: tokens - cachedTokensAfterUpdate, | ||
reset: reset, | ||
pending, | ||
}; | ||
} | ||
const usedTokensAfterUpdate = (await ctx.redis.eval(script, [key], [windowDuration])); | ||
ctx.cache.set(key, usedTokensAfterUpdate); | ||
const remaining = tokens - usedTokensAfterUpdate; | ||
return { | ||
success: remaining >= 0, | ||
limit: tokens, | ||
remaining, | ||
reset: reset, | ||
pending: Promise.resolve(), | ||
}; | ||
}; | ||
} | ||
} |
@@ -6,3 +6,3 @@ { | ||
"name": "@upstash/ratelimit", | ||
"version": "v0.1.5", | ||
"version": "v0.2.0-rc.0", | ||
"description": "A serverless ratelimiter built on top of Upstash REST API.", | ||
@@ -47,7 +47,15 @@ "repository": { | ||
".": { | ||
"import": "./esm/mod.js", | ||
"require": "./script/mod.js", | ||
"types": "./types/mod.d.ts" | ||
"import": { | ||
"types": "./types/mod.d.ts", | ||
"default": "./esm/mod.js" | ||
}, | ||
"require": { | ||
"types": "./types/mod.d.ts", | ||
"default": "./script/mod.js" | ||
} | ||
} | ||
}, | ||
"dependencies": { | ||
"@deno/shim-crypto": "~0.3.1" | ||
} | ||
} |
@@ -9,5 +9,6 @@ # Upstash RateLimit | ||
- Serverless functions (AWS Lambda ...) | ||
- Serverless functions (AWS Lambda, Vercel ...) | ||
- Cloudflare Workers | ||
- Fastly Compute@Edge (see | ||
- Vercel Edge | ||
- Fastly Compute@Edge | ||
- Next.js, Jamstack ... | ||
@@ -29,3 +30,3 @@ - Client side web/mobile applications | ||
- [Ephemeral Cache](#ephemeral-cache) | ||
- [MultiRegion replicated ratelimiting](#multiregion-replicated-ratelimiting) | ||
- [MultiRegion replicated ratelimiting](#multiregion-replicated-ratelimiting) | ||
- [Usage](#usage) | ||
@@ -248,3 +249,3 @@ - [Asynchronous synchronization between databases](#asynchronous-synchronization-between-databases) | ||
], | ||
limiter: Ratelimit.slidingWindow(10, "10 s"), | ||
limiter: MultiRegionRatelimit.slidingWindow(10, "10 s"), | ||
}); | ||
@@ -251,0 +252,0 @@ |
@@ -31,3 +31,15 @@ "use strict"; | ||
} | ||
set(key, value) { | ||
this.cache.set(key, value); | ||
} | ||
get(key) { | ||
return this.cache.get(key) || null; | ||
} | ||
incr(key) { | ||
let value = this.cache.get(key) ?? 0; | ||
value += 1; | ||
this.cache.set(key, value); | ||
return value; | ||
} | ||
} | ||
exports.Cache = Cache; |
"use strict"; | ||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { | ||
if (k2 === undefined) k2 = k; | ||
var desc = Object.getOwnPropertyDescriptor(m, k); | ||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { | ||
desc = { enumerable: true, get: function() { return m[k]; } }; | ||
} | ||
Object.defineProperty(o, k2, desc); | ||
}) : (function(o, m, k, k2) { | ||
if (k2 === undefined) k2 = k; | ||
o[k2] = m[k]; | ||
})); | ||
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { | ||
Object.defineProperty(o, "default", { enumerable: true, value: v }); | ||
}) : function(o, v) { | ||
o["default"] = v; | ||
}); | ||
var __importStar = (this && this.__importStar) || function (mod) { | ||
if (mod && mod.__esModule) return mod; | ||
var result = {}; | ||
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); | ||
__setModuleDefault(result, mod); | ||
return result; | ||
}; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.MultiRegionRatelimit = void 0; | ||
const dntShim = __importStar(require("./_dnt.shims.js")); | ||
const duration_js_1 = require("./duration.js"); | ||
@@ -30,2 +54,3 @@ const ratelimit_js_1 = require("./ratelimit.js"); | ||
limiter: config.limiter, | ||
timeout: config.timeout, | ||
ctx: { | ||
@@ -95,3 +120,3 @@ redis: config.redis, | ||
} | ||
const requestID = crypto.randomUUID(); | ||
const requestID = dntShim.crypto.randomUUID(); | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
@@ -216,3 +241,3 @@ const key = [identifier, bucket].join(":"); | ||
} | ||
const requestID = crypto.randomUUID(); | ||
const requestID = dntShim.crypto.randomUUID(); | ||
const now = Date.now(); | ||
@@ -219,0 +244,0 @@ const currentWindow = Math.floor(now / windowSize); |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.Ratelimit = void 0; | ||
exports.Ratelimit = exports.TimeoutError = void 0; | ||
const cache_js_1 = require("./cache.js"); | ||
class TimeoutError extends Error { | ||
constructor() { | ||
super("Timeout"); | ||
this.name = "TimeoutError"; | ||
} | ||
} | ||
exports.TimeoutError = TimeoutError; | ||
/** | ||
@@ -15,3 +22,3 @@ * Ratelimiter using serverless redis from https://upstash.com/ | ||
* "30 m", // interval of 30 minutes | ||
* ) | ||
* ), | ||
* }) | ||
@@ -41,2 +48,8 @@ * | ||
}); | ||
Object.defineProperty(this, "timeout", { | ||
enumerable: true, | ||
configurable: true, | ||
writable: true, | ||
value: void 0 | ||
}); | ||
/** | ||
@@ -67,3 +80,25 @@ * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit. | ||
const key = [this.prefix, identifier].join(":"); | ||
return await this.limiter(this.ctx, key); | ||
let timeoutId = null; | ||
try { | ||
const arr = [this.limiter(this.ctx, key)]; | ||
if (this.timeout) { | ||
arr.push(new Promise((resolve) => { | ||
timeoutId = setTimeout(() => { | ||
resolve({ | ||
success: true, | ||
limit: 0, | ||
remaining: 0, | ||
reset: 0, | ||
pending: Promise.resolve(), | ||
}); | ||
}, this.timeout); | ||
})); | ||
} | ||
return await Promise.race(arr); | ||
} | ||
finally { | ||
if (timeoutId) { | ||
clearTimeout(timeoutId); | ||
} | ||
} | ||
} | ||
@@ -134,2 +169,3 @@ }); | ||
this.limiter = config.limiter; | ||
this.timeout = config.timeout; | ||
this.prefix = config.prefix ?? "@upstash/ratelimit"; | ||
@@ -136,0 +172,0 @@ if (config.ephemeralCache instanceof Map) { |
@@ -29,2 +29,3 @@ "use strict"; | ||
limiter: config.limiter, | ||
timeout: config.timeout, | ||
ctx: { | ||
@@ -295,3 +296,86 @@ redis: config.redis, | ||
} | ||
/** | ||
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates | ||
* it asynchronously. | ||
* This is experimental and not yet recommended for production use. | ||
* | ||
* @experimental | ||
* | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static cachedFixedWindow( | ||
/** | ||
* How many requests are allowed per window. | ||
*/ | ||
tokens, | ||
/** | ||
* The duration in which `tokens` requests are allowed. | ||
*/ | ||
window) { | ||
const windowDuration = (0, duration_js_1.ms)(window); | ||
const script = ` | ||
local key = KEYS[1] | ||
local window = ARGV[1] | ||
local r = redis.call("INCR", key) | ||
if r == 1 then | ||
-- The first time this key is set, the value will be 1. | ||
-- So we only need the expire command once | ||
redis.call("PEXPIRE", key, window) | ||
end | ||
return r | ||
`; | ||
return async function (ctx, identifier) { | ||
if (!ctx.cache) { | ||
throw new Error("This algorithm requires a cache"); | ||
} | ||
const bucket = Math.floor(Date.now() / windowDuration); | ||
const key = [identifier, bucket].join(":"); | ||
const reset = (bucket + 1) * windowDuration; | ||
const hit = typeof ctx.cache.get(key) === "number"; | ||
if (hit) { | ||
const cachedTokensAfterUpdate = ctx.cache.incr(key); | ||
const success = cachedTokensAfterUpdate < tokens; | ||
const pending = success | ||
? ctx.redis.eval(script, [key], [windowDuration]).then((t) => { | ||
ctx.cache.set(key, t); | ||
}) | ||
: Promise.resolve(); | ||
return { | ||
success, | ||
limit: tokens, | ||
remaining: tokens - cachedTokensAfterUpdate, | ||
reset: reset, | ||
pending, | ||
}; | ||
} | ||
const usedTokensAfterUpdate = (await ctx.redis.eval(script, [key], [windowDuration])); | ||
ctx.cache.set(key, usedTokensAfterUpdate); | ||
const remaining = tokens - usedTokensAfterUpdate; | ||
return { | ||
success: remaining >= 0, | ||
limit: tokens, | ||
remaining, | ||
reset: reset, | ||
pending: Promise.resolve(), | ||
}; | ||
}; | ||
} | ||
} | ||
exports.RegionRatelimit = RegionRatelimit; |
@@ -13,2 +13,5 @@ import { EphemeralCache } from "./types.js"; | ||
blockUntil(identifier: string, reset: number): void; | ||
set(key: string, value: number): void; | ||
get(key: string): number | null; | ||
incr(key: string): number; | ||
} |
@@ -1,3 +0,3 @@ | ||
declare type Unit = "ms" | "s" | "m" | "h" | "d"; | ||
export declare type Duration = `${number} ${Unit}`; | ||
type Unit = "ms" | "s" | "m" | "h" | "d"; | ||
export type Duration = `${number} ${Unit}`; | ||
/** | ||
@@ -4,0 +4,0 @@ * Convert a human readable duration to milliseconds |
@@ -5,3 +5,3 @@ import type { Duration } from "./duration.js"; | ||
import type { Redis } from "./types.js"; | ||
export declare type MultiRegionRatelimitConfig = { | ||
export type MultiRegionRatelimitConfig = { | ||
/** | ||
@@ -43,2 +43,8 @@ * Instances of `@upstash/redis` | ||
ephemeralCache?: Map<string, number> | false; | ||
/** | ||
* If set, the ratelimiter will allow requests to pass after this many milliseconds. | ||
* | ||
* Use this if you want to allow requests in case of network problems | ||
*/ | ||
timeout?: number; | ||
}; | ||
@@ -45,0 +51,0 @@ /** |
import type { Algorithm, Context, RatelimitResponse } from "./types.js"; | ||
export declare type RatelimitConfig<TContext> = { | ||
export declare class TimeoutError extends Error { | ||
constructor(); | ||
} | ||
export type RatelimitConfig<TContext> = { | ||
/** | ||
@@ -38,2 +41,8 @@ * The ratelimiter function to use. | ||
ephemeralCache?: Map<string, number> | false; | ||
/** | ||
* If set, the ratelimiter will allow requests to pass after this many milliseconds. | ||
* | ||
* Use this if you want to allow requests in case of network problems | ||
*/ | ||
timeout?: number; | ||
}; | ||
@@ -50,3 +59,3 @@ /** | ||
* "30 m", // interval of 30 minutes | ||
* ) | ||
* ), | ||
* }) | ||
@@ -60,2 +69,3 @@ * | ||
protected readonly prefix: string; | ||
protected readonly timeout?: number; | ||
constructor(config: RatelimitConfig<TContext>); | ||
@@ -62,0 +72,0 @@ /** |
@@ -5,3 +5,3 @@ import type { Duration } from "./duration.js"; | ||
import { Ratelimit } from "./ratelimit.js"; | ||
export declare type RegionRatelimitConfig = { | ||
export type RegionRatelimitConfig = { | ||
/** | ||
@@ -46,2 +46,8 @@ * Instance of `@upstash/redis` | ||
ephemeralCache?: Map<string, number> | false; | ||
/** | ||
* If set, the ratelimiter will allow requests to pass after this many milliseconds. | ||
* | ||
* Use this if you want to allow requests in case of network problems | ||
*/ | ||
timeout?: number; | ||
}; | ||
@@ -150,2 +156,35 @@ /** | ||
maxTokens: number): Algorithm<RegionContext>; | ||
/** | ||
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates | ||
* it asynchronously. | ||
* This is experimental and not yet recommended for production use. | ||
* | ||
* @experimental | ||
* | ||
* Each requests inside a fixed time increases a counter. | ||
* Once the counter reaches a maxmimum allowed number, all further requests are | ||
* rejected. | ||
* | ||
* **Pro:** | ||
* | ||
* - Newer requests are not starved by old ones. | ||
* - Low storage cost. | ||
* | ||
* **Con:** | ||
* | ||
* A burst of requests near the boundary of a window can result in a very | ||
* high request rate because two windows will be filled with requests quickly. | ||
* | ||
* @param tokens - How many requests a user can make in each time window. | ||
* @param window - A fixed timeframe | ||
*/ | ||
static cachedFixedWindow( | ||
/** | ||
* How many requests are allowed per window. | ||
*/ | ||
tokens: number, | ||
/** | ||
* The duration in which `tokens` requests are allowed. | ||
*/ | ||
window: Duration): Algorithm<RegionContext>; | ||
} |
@@ -14,13 +14,16 @@ export interface Redis { | ||
blockUntil: (identifier: string, reset: number) => void; | ||
set: (key: string, value: number) => void; | ||
get: (key: string) => number | null; | ||
incr: (key: string) => number; | ||
} | ||
export declare type RegionContext = { | ||
export type RegionContext = { | ||
redis: Redis; | ||
cache?: EphemeralCache; | ||
}; | ||
export declare type MultiRegionContext = { | ||
export type MultiRegionContext = { | ||
redis: Redis[]; | ||
cache?: EphemeralCache; | ||
}; | ||
export declare type Context = RegionContext | MultiRegionContext; | ||
export declare type RatelimitResponse = { | ||
export type Context = RegionContext | MultiRegionContext; | ||
export type RatelimitResponse = { | ||
/** | ||
@@ -66,4 +69,4 @@ * Whether the request may pass(true) or exceeded the limit(false) | ||
}; | ||
export declare type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: { | ||
export type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: { | ||
cache?: EphemeralCache; | ||
}) => Promise<RatelimitResponse>; |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Manifest confusion
Supply chain riskThis package has inconsistent metadata. This could be malicious or caused by an error when publishing the package.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Manifest confusion
Supply chain riskThis package has inconsistent metadata. This could be malicious or caused by an error when publishing the package.
Found 1 instance in 1 package
100354
30
2409
403
2
+ Added@deno/shim-crypto@~0.3.1
+ Added@deno/shim-crypto@0.3.1(transitive)