Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@upstash/ratelimit

Package Overview
Dependencies
Maintainers
5
Versions
160
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@upstash/ratelimit - npm Package Compare versions

Comparing version 0.1.3 to 0.1.4-rc.0

.releaserc

51

esm/multi.js
import { ms } from "./duration.js";
import { Ratelimit } from "./ratelimit.js";
import { Cache } from "./cache.js";
/**

@@ -26,3 +27,6 @@ * Ratelimiter using serverless redis from https://upstash.com/

limiter: config.limiter,
ctx: { redis: config.redis },
ctx: {
redis: config.redis,
cache: config.ephermeralCache ? new Cache() : undefined,
},
});

@@ -74,2 +78,14 @@ }

return async function (ctx, identifier) {
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: tokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const requestID = crypto.randomUUID();

@@ -113,7 +129,12 @@ const bucket = Math.floor(Date.now() / windowDuration);

*/
const success = remaining > 0;
const reset = (bucket + 1) * windowDuration;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: remaining > 0,
success,
limit: tokens,
remaining,
reset: (bucket + 1) * windowDuration,
reset,
pending: sync(),

@@ -179,2 +200,14 @@ };

return async function (ctx, identifier) {
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: tokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const requestID = crypto.randomUUID();

@@ -220,10 +253,12 @@ const now = Date.now();

}
/**
* Do not await sync. This should not run in the critical path.
*/
const success = remaining > 0;
const reset = (currentWindow + 1) * windowDuration;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: remaining > 0,
success,
limit: tokens,
remaining,
reset: (currentWindow + 1) * windowDuration,
reset,
pending: sync(),

@@ -230,0 +265,0 @@ };

@@ -0,1 +1,2 @@

import { Cache } from "./cache.js";
/**

@@ -128,3 +129,9 @@ * Ratelimiter using serverless redis from https://upstash.com/

this.prefix = config.prefix ?? "@upstash/ratelimit";
if (config.ephermeralCache instanceof Map) {
this.ctx.cache = new Cache(config.ephermeralCache);
}
else if (typeof config.ephermeralCache === "undefined") {
this.ctx.cache = new Cache(new Map());
}
}
}

@@ -26,3 +26,6 @@ import { ms } from "./duration.js";

limiter: config.limiter,
ctx: { redis: config.redis },
ctx: {
redis: config.redis,
},
ephermeralCache: config.ephermeralCache,
});

@@ -74,8 +77,25 @@ }

const key = [identifier, bucket].join(":");
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: tokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const usedTokensAfterUpdate = (await ctx.redis.eval(script, [key], [windowDuration]));
const success = usedTokensAfterUpdate <= tokens;
const reset = (bucket + 1) * windowDuration;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: usedTokensAfterUpdate <= tokens,
success,
limit: tokens,
remaining: tokens - usedTokensAfterUpdate,
reset: (bucket + 1) * windowDuration,
reset,
pending: Promise.resolve(),

@@ -147,8 +167,25 @@ };

const previousKey = [identifier, previousWindow].join(":");
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: tokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const remaining = (await ctx.redis.eval(script, [currentKey, previousKey], [tokens, now, windowSize]));
const success = remaining > 0;
const reset = (currentWindow + 1) * windowSize;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: remaining > 0,
success,
limit: tokens,
remaining,
reset: (currentWindow + 1) * windowSize,
reset,
pending: Promise.resolve(),

@@ -231,7 +268,23 @@ };

return async function (ctx, identifier) {
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: maxTokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const now = Date.now();
const key = [identifier, Math.floor(now / intervalDuration)].join(":");
const [remaining, reset] = (await ctx.redis.eval(script, [key], [maxTokens, intervalDuration, refillRate, now]));
const success = remaining > 0;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: remaining > 0,
success,
limit: maxTokens,

@@ -238,0 +291,0 @@ remaining,

4

package.json

@@ -6,3 +6,3 @@ {

"name": "@upstash/ratelimit",
"version": "v0.1.3",
"version": "v0.1.4-rc.0",
"description": "A serverless ratelimiter built on top of Upstash REST API.",

@@ -33,3 +33,3 @@ "repository": {

"peerDependencies": {
"@upstash/redis": "^1.3.4"
"@upstash/redis": "^1.4.0"
},

@@ -36,0 +36,0 @@ "size-limit": [

@@ -28,4 +28,5 @@ # Upstash Ratelimit

- [Block until ready](#block-until-ready)
- [Globally replicated ratelimiting](#globally-replicated-ratelimiting)
- [MultiRegionly replicated ratelimiting](#multiregionly-replicated-ratelimiting)
- [Usage](#usage)
- [Asynchronous synchronization between databases](#asynchronous-synchronization-between-databases)
- [Example](#example)

@@ -180,2 +181,32 @@ - [Ratelimiting algorithms](#ratelimiting-algorithms)

### Ephemeral Cache
For extreme load or denial of service attacks, it might be too expensive to call
redis for every incoming request, just to find out the request should be blocked
because they have exceeded the limit.
You can use an ephemeral in memory cache by passing the `ephemeralCache`
options:
```ts
const cache = new Map(); // must be outside of your serverless function handler
// ...
const ratelimit = new Ratelimit({
// ...
ephemeralCache: cache,
});
```
If enabled, the ratelimiter will keep a global cache of identifiers and a reset
timestamp, that have exhausted their ratelimit. In serverless environments this
is only possible if you create the ratelimiter instance outside of your handler
function. While the function is still hot, the ratelimiter can block requests
without having to request data from redis, thus saving time and money.
Whenever an identifier has exceeded its limit, the ratelimiter will add it to an
internal list together with its reset timestamp. If the same identifier makes a
new request before it is reset, we can immediately reject it.
## MultiRegionly replicated ratelimiting

@@ -182,0 +213,0 @@

@@ -6,2 +6,3 @@ "use strict";

const ratelimit_js_1 = require("./ratelimit.js");
const cache_js_1 = require("./cache.js");
/**

@@ -30,3 +31,6 @@ * Ratelimiter using serverless redis from https://upstash.com/

limiter: config.limiter,
ctx: { redis: config.redis },
ctx: {
redis: config.redis,
cache: config.ephermeralCache ? new cache_js_1.Cache() : undefined,
},
});

@@ -78,2 +82,14 @@ }

return async function (ctx, identifier) {
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: tokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const requestID = crypto.randomUUID();

@@ -117,7 +133,12 @@ const bucket = Math.floor(Date.now() / windowDuration);

*/
const success = remaining > 0;
const reset = (bucket + 1) * windowDuration;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: remaining > 0,
success,
limit: tokens,
remaining,
reset: (bucket + 1) * windowDuration,
reset,
pending: sync(),

@@ -183,2 +204,14 @@ };

return async function (ctx, identifier) {
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: tokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const requestID = crypto.randomUUID();

@@ -224,10 +257,12 @@ const now = Date.now();

}
/**
* Do not await sync. This should not run in the critical path.
*/
const success = remaining > 0;
const reset = (currentWindow + 1) * windowDuration;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: remaining > 0,
success,
limit: tokens,
remaining,
reset: (currentWindow + 1) * windowDuration,
reset,
pending: sync(),

@@ -234,0 +269,0 @@ };

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Ratelimit = void 0;
const cache_js_1 = require("./cache.js");
/**

@@ -131,4 +132,10 @@ * Ratelimiter using serverless redis from https://upstash.com/

this.prefix = config.prefix ?? "@upstash/ratelimit";
if (config.ephermeralCache instanceof Map) {
this.ctx.cache = new cache_js_1.Cache(config.ephermeralCache);
}
else if (typeof config.ephermeralCache === "undefined") {
this.ctx.cache = new cache_js_1.Cache(new Map());
}
}
}
exports.Ratelimit = Ratelimit;

@@ -29,3 +29,6 @@ "use strict";

limiter: config.limiter,
ctx: { redis: config.redis },
ctx: {
redis: config.redis,
},
ephermeralCache: config.ephermeralCache,
});

@@ -77,8 +80,25 @@ }

const key = [identifier, bucket].join(":");
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: tokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const usedTokensAfterUpdate = (await ctx.redis.eval(script, [key], [windowDuration]));
const success = usedTokensAfterUpdate <= tokens;
const reset = (bucket + 1) * windowDuration;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: usedTokensAfterUpdate <= tokens,
success,
limit: tokens,
remaining: tokens - usedTokensAfterUpdate,
reset: (bucket + 1) * windowDuration,
reset,
pending: Promise.resolve(),

@@ -150,8 +170,25 @@ };

const previousKey = [identifier, previousWindow].join(":");
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: tokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const remaining = (await ctx.redis.eval(script, [currentKey, previousKey], [tokens, now, windowSize]));
const success = remaining > 0;
const reset = (currentWindow + 1) * windowSize;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: remaining > 0,
success,
limit: tokens,
remaining,
reset: (currentWindow + 1) * windowSize,
reset,
pending: Promise.resolve(),

@@ -234,7 +271,23 @@ };

return async function (ctx, identifier) {
if (ctx.cache) {
const { blocked, reset } = ctx.cache.isBlocked(identifier);
if (blocked) {
return {
success: false,
limit: maxTokens,
remaining: 0,
reset: reset,
pending: Promise.resolve(),
};
}
}
const now = Date.now();
const key = [identifier, Math.floor(now / intervalDuration)].join(":");
const [remaining, reset] = (await ctx.redis.eval(script, [key], [maxTokens, intervalDuration, refillRate, now]));
const success = remaining > 0;
if (ctx.cache && !success) {
ctx.cache.blockUntil(identifier, reset);
}
return {
success: remaining > 0,
success,
limit: maxTokens,

@@ -241,0 +294,0 @@ remaining,

@@ -25,2 +25,19 @@ import type { Duration } from "./duration.js";

prefix?: string;
/**
* If enabled, the ratelimiter will keep a global cache of identifiers, that have
* exhausted their ratelimit. In serverless environments this is only possible if
* you create the ratelimiter instance outside of your handler function. While the
* function is still hot, the ratelimiter can block requests without having to
* request data from redis, thus saving time and money.
*
* Whenever an identifier has exceeded its limit, the ratelimiter will add it to an
* internal list together with its reset timestamp. If the same identifier makes a
* new request before it is reset, we can immediately reject it.
*
* Set to `false` to disable.
*
* If left undefined, a map is created automatically, but it can only work
* if the map or th ratelimit instance is created outside your serverless function handler.
*/
ephermeralCache?: Map<string, number> | false;
};

@@ -27,0 +44,0 @@ /**

@@ -21,2 +21,19 @@ import type { Algorithm, Context, RatelimitResponse } from "./types.js";

prefix?: string;
/**
* If enabled, the ratelimiter will keep a global cache of identifiers, that have
* exhausted their ratelimit. In serverless environments this is only possible if
* you create the ratelimiter instance outside of your handler function. While the
* function is still hot, the ratelimiter can block requests without having to
* request data from redis, thus saving time and money.
*
* Whenever an identifier has exceeded its limit, the ratelimiter will add it to an
* internal list together with its reset timestamp. If the same identifier makes a
* new request before it is reset, we can immediately reject it.
*
* Set to `false` to disable.
*
* If left undefined, a map is created automatically, but it can only work
* if the map or the ratelimit instance is created outside your serverless function handler.
*/
ephermeralCache?: Map<string, number> | false;
};

@@ -23,0 +40,0 @@ /**

@@ -28,2 +28,19 @@ import type { Duration } from "./duration.js";

prefix?: string;
/**
* If enabled, the ratelimiter will keep a global cache of identifiers, that have
* exhausted their ratelimit. In serverless environments this is only possible if
* you create the ratelimiter instance outside of your handler function. While the
* function is still hot, the ratelimiter can block requests without having to
* request data from redis, thus saving time and money.
*
* Whenever an identifier has exceeded its limit, the ratelimiter will add it to an
* internal list together with its reset timestamp. If the same identifier makes a
* new request before it is reset, we can immediately reject it.
*
* Set to `false` to disable.
*
* If left undefined, a map is created automatically, but it can only work
* if the map or the ratelimit instance is created outside your serverless function handler.
*/
ephermeralCache?: Map<string, number> | false;
};

@@ -30,0 +47,0 @@ /**

@@ -5,7 +5,19 @@ export interface Redis {

}
/**
* EphermeralCache is used to block certain identifiers right away in case they have already exceedd the ratelimit.
*/
export interface EphermeralCache {
isBlocked: (identifier: string) => {
blocked: boolean;
reset: number;
};
blockUntil: (identifier: string, reset: number) => void;
}
export declare type RegionContext = {
redis: Redis;
cache?: EphermeralCache;
};
export declare type MultiRegionContext = {
redis: Redis[];
cache?: EphermeralCache;
};

@@ -54,2 +66,4 @@ export declare type Context = RegionContext | MultiRegionContext;

};
export declare type Algorithm<TContext> = (ctx: TContext, identifier: string) => Promise<RatelimitResponse>;
export declare type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
cache?: EphermeralCache;
}) => Promise<RatelimitResponse>;
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc