Launch Week Day 1: Socket for Jira Is Now Available.Learn More
Socket
Book a DemoSign in
Socket

@vercel/blob

Package Overview
Dependencies
Maintainers
2
Versions
146
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@vercel/blob - npm Package Compare versions

Comparing version
2.1.0
to
2.2.0-f6bb7b4-20260204165325
+1363
dist/chunk-7IJ2KDBM.cjs
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }// src/helpers.ts
var _isnodeprocess = require('is-node-process');
// src/multipart/helpers.ts
var _isbuffer = require('is-buffer'); var _isbuffer2 = _interopRequireDefault(_isbuffer);
var _stream = require('stream');
var supportsNewBlobFromArrayBuffer = new Promise((resolve) => {
try {
const helloAsArrayBuffer = new Uint8Array([104, 101, 108, 108, 111]);
const blob = new Blob([helloAsArrayBuffer]);
blob.text().then((text) => {
resolve(text === "hello");
}).catch(() => {
resolve(false);
});
} catch (e) {
resolve(false);
}
});
async function toReadableStream(value) {
if (value instanceof ReadableStream) {
return value;
}
if (value instanceof Blob) {
return value.stream();
}
if (isNodeJsReadableStream(value)) {
return _stream.Readable.toWeb(value);
}
let streamValue;
if (value instanceof ArrayBuffer) {
streamValue = new Uint8Array(value);
} else if (isNodeJsBuffer(value)) {
streamValue = value;
} else {
streamValue = stringToUint8Array(value);
}
if (await supportsNewBlobFromArrayBuffer) {
return new Blob([streamValue]).stream();
}
return new ReadableStream({
start(controller) {
controller.enqueue(streamValue);
controller.close();
}
});
}
function isNodeJsReadableStream(value) {
return typeof value === "object" && typeof value.pipe === "function" && value.readable && typeof value._read === "function" && // @ts-expect-error _readableState does exists on Readable
typeof value._readableState === "object";
}
function stringToUint8Array(s) {
const enc = new TextEncoder();
return enc.encode(s);
}
function isNodeJsBuffer(value) {
return _isbuffer2.default.call(void 0, value);
}
// src/bytes.ts
var parseRegExp = /^((-|\+)?(\d+(?:\.\d+)?)) *(kb|mb|gb|tb|pb)$/i;
var map = {
b: 1,
kb: 1 << 10,
mb: 1 << 20,
gb: 1 << 30,
tb: 1024 ** 4,
pb: 1024 ** 5
};
function bytes(val) {
if (typeof val === "number" && !Number.isNaN(val)) {
return val;
}
if (typeof val !== "string") {
return null;
}
const results = parseRegExp.exec(val);
let floatValue;
let unit = "b";
if (!results) {
floatValue = parseInt(val, 10);
} else {
const [, res, , , unitMatch] = results;
if (!res) {
return null;
}
floatValue = parseFloat(res);
if (unitMatch) {
unit = unitMatch.toLowerCase();
}
}
if (Number.isNaN(floatValue)) {
return null;
}
return Math.floor(map[unit] * floatValue);
}
// src/helpers.ts
var defaultVercelBlobApiUrl = "https://vercel.com/api/blob";
function getTokenFromOptionsOrEnv(options) {
if (options == null ? void 0 : options.token) {
return options.token;
}
if (process.env.BLOB_READ_WRITE_TOKEN) {
return process.env.BLOB_READ_WRITE_TOKEN;
}
throw new BlobError(
"No token found. Either configure the `BLOB_READ_WRITE_TOKEN` environment variable, or pass a `token` option to your calls."
);
}
var BlobError = class extends Error {
constructor(message) {
super(`Vercel Blob: ${message}`);
}
};
function getDownloadUrl(blobUrl) {
const url = new URL(blobUrl);
url.searchParams.set("download", "1");
return url.toString();
}
function isPlainObject(value) {
if (typeof value !== "object" || value === null) {
return false;
}
const prototype = Object.getPrototypeOf(value);
return (prototype === null || prototype === Object.prototype || Object.getPrototypeOf(prototype) === null) && !(Symbol.toStringTag in value) && !(Symbol.iterator in value);
}
var disallowedPathnameCharacters = ["//"];
var supportsRequestStreams = (() => {
if (_isnodeprocess.isNodeProcess.call(void 0, )) {
return true;
}
const apiUrl = getApiUrl();
if (apiUrl.startsWith("http://localhost")) {
return false;
}
let duplexAccessed = false;
const hasContentType = new Request(getApiUrl(), {
body: new ReadableStream(),
method: "POST",
// @ts-expect-error -- TypeScript doesn't yet have duplex but it's in the spec: https://github.com/microsoft/TypeScript-DOM-lib-generator/pull/1729
get duplex() {
duplexAccessed = true;
return "half";
}
}).headers.has("Content-Type");
return duplexAccessed && !hasContentType;
})();
function getApiUrl(pathname = "") {
let baseUrl = null;
try {
baseUrl = process.env.VERCEL_BLOB_API_URL || process.env.NEXT_PUBLIC_VERCEL_BLOB_API_URL;
} catch (e2) {
}
return `${baseUrl || defaultVercelBlobApiUrl}${pathname}`;
}
var TEXT_ENCODER = typeof TextEncoder === "function" ? new TextEncoder() : null;
function computeBodyLength(body) {
if (!body) {
return 0;
}
if (typeof body === "string") {
if (TEXT_ENCODER) {
return TEXT_ENCODER.encode(body).byteLength;
}
return new Blob([body]).size;
}
if ("byteLength" in body && typeof body.byteLength === "number") {
return body.byteLength;
}
if ("size" in body && typeof body.size === "number") {
return body.size;
}
return 0;
}
var createChunkTransformStream = (chunkSize, onProgress) => {
let buffer = new Uint8Array(0);
return new TransformStream({
transform(chunk, controller) {
queueMicrotask(() => {
const newBuffer = new Uint8Array(buffer.length + chunk.byteLength);
newBuffer.set(buffer);
newBuffer.set(new Uint8Array(chunk), buffer.length);
buffer = newBuffer;
while (buffer.length >= chunkSize) {
const newChunk = buffer.slice(0, chunkSize);
controller.enqueue(newChunk);
onProgress == null ? void 0 : onProgress(newChunk.byteLength);
buffer = buffer.slice(chunkSize);
}
});
},
flush(controller) {
queueMicrotask(() => {
if (buffer.length > 0) {
controller.enqueue(buffer);
onProgress == null ? void 0 : onProgress(buffer.byteLength);
}
});
}
});
};
function isReadableStream(value) {
return globalThis.ReadableStream && // TODO: Can be removed once Node.js 16 is no more required internally
value instanceof ReadableStream;
}
function isStream(value) {
if (isReadableStream(value)) {
return true;
}
if (isNodeJsReadableStream(value)) {
return true;
}
return false;
}
// src/api.ts
var _asyncretry = require('async-retry'); var _asyncretry2 = _interopRequireDefault(_asyncretry);
// src/debug.ts
var debugIsActive = false;
var _a, _b;
try {
if (((_a = process.env.DEBUG) == null ? void 0 : _a.includes("blob")) || ((_b = process.env.NEXT_PUBLIC_DEBUG) == null ? void 0 : _b.includes("blob"))) {
debugIsActive = true;
}
} catch (e3) {
}
function debug(message, ...args) {
if (debugIsActive) {
console.debug(`vercel-blob: ${message}`, ...args);
}
}
// src/dom-exception.ts
var _a2;
var DOMException2 = (_a2 = globalThis.DOMException) != null ? _a2 : (() => {
try {
atob("~");
} catch (err) {
return Object.getPrototypeOf(err).constructor;
}
})();
// src/is-network-error.ts
var objectToString = Object.prototype.toString;
var isError = (value) => objectToString.call(value) === "[object Error]";
var errorMessages = /* @__PURE__ */ new Set([
"network error",
// Chrome
"Failed to fetch",
// Chrome
"NetworkError when attempting to fetch resource.",
// Firefox
"The Internet connection appears to be offline.",
// Safari 16
"Load failed",
// Safari 17+
"Network request failed",
// `cross-fetch`
"fetch failed",
// Undici (Node.js)
"terminated"
// Undici (Node.js)
]);
function isNetworkError(error) {
const isValid = error && isError(error) && error.name === "TypeError" && typeof error.message === "string";
if (!isValid) {
return false;
}
if (error.message === "Load failed") {
return error.stack === void 0;
}
return errorMessages.has(error.message);
}
// src/fetch.ts
var _undici = require('undici');
var hasFetch = typeof _undici.fetch === "function";
var hasFetchWithUploadProgress = hasFetch && supportsRequestStreams;
var CHUNK_SIZE = 64 * 1024;
var blobFetch = async ({
input,
init,
onUploadProgress
}) => {
debug("using fetch");
let body;
if (init.body) {
if (onUploadProgress) {
const stream = await toReadableStream(init.body);
let loaded = 0;
const chunkTransformStream = createChunkTransformStream(
CHUNK_SIZE,
(newLoaded) => {
loaded += newLoaded;
onUploadProgress(loaded);
}
);
body = stream.pipeThrough(chunkTransformStream);
} else {
body = init.body;
}
}
const duplex = supportsRequestStreams && body && isStream(body) ? "half" : void 0;
return _undici.fetch.call(void 0,
input,
// @ts-expect-error -- Blob and Nodejs Blob are triggering type errors, fine with it
{
...init,
...init.body ? { body } : {},
duplex
}
);
};
// src/xhr.ts
var hasXhr = typeof XMLHttpRequest !== "undefined";
var blobXhr = async ({
input,
init,
onUploadProgress
}) => {
debug("using xhr");
let body = null;
if (init.body) {
if (isReadableStream(init.body)) {
body = await new Response(init.body).blob();
} else {
body = init.body;
}
}
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open(init.method || "GET", input.toString(), true);
if (onUploadProgress) {
xhr.upload.addEventListener("progress", (event) => {
if (event.lengthComputable) {
onUploadProgress(event.loaded);
}
});
}
xhr.onload = () => {
var _a3;
if ((_a3 = init.signal) == null ? void 0 : _a3.aborted) {
reject(new DOMException("The user aborted the request.", "AbortError"));
return;
}
const headers = new Headers();
const rawHeaders = xhr.getAllResponseHeaders().trim().split(/[\r\n]+/);
rawHeaders.forEach((line) => {
const parts = line.split(": ");
const key = parts.shift();
const value = parts.join(": ");
if (key) headers.set(key.toLowerCase(), value);
});
const response = new Response(xhr.response, {
status: xhr.status,
statusText: xhr.statusText,
headers
});
resolve(response);
};
xhr.onerror = () => {
reject(new TypeError("Network request failed"));
};
xhr.ontimeout = () => {
reject(new TypeError("Network request timed out"));
};
xhr.onabort = () => {
reject(new DOMException("The user aborted a request.", "AbortError"));
};
if (init.headers) {
const headers = new Headers(init.headers);
headers.forEach((value, key) => {
xhr.setRequestHeader(key, value);
});
}
if (init.signal) {
init.signal.addEventListener("abort", () => {
xhr.abort();
});
if (init.signal.aborted) {
xhr.abort();
return;
}
}
xhr.send(body);
});
};
// src/request.ts
var blobRequest = async ({
input,
init,
onUploadProgress
}) => {
if (onUploadProgress) {
if (hasFetchWithUploadProgress) {
return blobFetch({ input, init, onUploadProgress });
}
if (hasXhr) {
return blobXhr({ input, init, onUploadProgress });
}
}
if (hasFetch) {
return blobFetch({ input, init });
}
if (hasXhr) {
return blobXhr({ input, init });
}
throw new Error("No request implementation available");
};
// src/api.ts
var MAXIMUM_PATHNAME_LENGTH = 950;
var BlobAccessError = class extends BlobError {
constructor() {
super("Access denied, please provide a valid token for this resource.");
}
};
var BlobContentTypeNotAllowedError = class extends BlobError {
constructor(message) {
super(`Content type mismatch, ${message}.`);
}
};
var BlobPathnameMismatchError = class extends BlobError {
constructor(message) {
super(
`Pathname mismatch, ${message}. Check the pathname used in upload() or put() matches the one from the client token.`
);
}
};
var BlobClientTokenExpiredError = class extends BlobError {
constructor() {
super("Client token has expired.");
}
};
var BlobFileTooLargeError = class extends BlobError {
constructor(message) {
super(`File is too large, ${message}.`);
}
};
var BlobStoreNotFoundError = class extends BlobError {
constructor() {
super("This store does not exist.");
}
};
var BlobStoreSuspendedError = class extends BlobError {
constructor() {
super("This store has been suspended.");
}
};
var BlobUnknownError = class extends BlobError {
constructor() {
super("Unknown error, please visit https://vercel.com/help.");
}
};
var BlobNotFoundError = class extends BlobError {
constructor() {
super("The requested blob does not exist");
}
};
var BlobServiceNotAvailable = class extends BlobError {
constructor() {
super("The blob service is currently not available. Please try again.");
}
};
var BlobServiceRateLimited = class extends BlobError {
constructor(seconds) {
super(
`Too many requests please lower the number of concurrent requests ${seconds ? ` - try again in ${seconds} seconds` : ""}.`
);
this.retryAfter = seconds != null ? seconds : 0;
}
};
var BlobRequestAbortedError = class extends BlobError {
constructor() {
super("The request was aborted.");
}
};
var BlobPreconditionFailedError = class extends BlobError {
constructor() {
super("Precondition failed: ETag mismatch.");
}
};
var BLOB_API_VERSION = 12;
function getApiVersion() {
let versionOverride = null;
try {
versionOverride = process.env.VERCEL_BLOB_API_VERSION_OVERRIDE || process.env.NEXT_PUBLIC_VERCEL_BLOB_API_VERSION_OVERRIDE;
} catch (e4) {
}
return `${versionOverride != null ? versionOverride : BLOB_API_VERSION}`;
}
function getRetries() {
try {
const retries = process.env.VERCEL_BLOB_RETRIES || "10";
return parseInt(retries, 10);
} catch (e5) {
return 10;
}
}
function createBlobServiceRateLimited(response) {
const retryAfter = response.headers.get("retry-after");
return new BlobServiceRateLimited(
retryAfter ? parseInt(retryAfter, 10) : void 0
);
}
async function getBlobError(response) {
var _a3, _b2, _c;
let code;
let message;
try {
const data = await response.json();
code = (_b2 = (_a3 = data.error) == null ? void 0 : _a3.code) != null ? _b2 : "unknown_error";
message = (_c = data.error) == null ? void 0 : _c.message;
} catch (e6) {
code = "unknown_error";
}
if ((message == null ? void 0 : message.includes("contentType")) && message.includes("is not allowed")) {
code = "content_type_not_allowed";
}
if ((message == null ? void 0 : message.includes('"pathname"')) && message.includes("does not match the token payload")) {
code = "client_token_pathname_mismatch";
}
if (message === "Token expired") {
code = "client_token_expired";
}
if (message == null ? void 0 : message.includes("the file length cannot be greater than")) {
code = "file_too_large";
}
let error;
switch (code) {
case "store_suspended":
error = new BlobStoreSuspendedError();
break;
case "forbidden":
error = new BlobAccessError();
break;
case "content_type_not_allowed":
error = new BlobContentTypeNotAllowedError(message);
break;
case "client_token_pathname_mismatch":
error = new BlobPathnameMismatchError(message);
break;
case "client_token_expired":
error = new BlobClientTokenExpiredError();
break;
case "file_too_large":
error = new BlobFileTooLargeError(message);
break;
case "not_found":
error = new BlobNotFoundError();
break;
case "store_not_found":
error = new BlobStoreNotFoundError();
break;
case "bad_request":
error = new BlobError(message != null ? message : "Bad request");
break;
case "service_unavailable":
error = new BlobServiceNotAvailable();
break;
case "rate_limited":
error = createBlobServiceRateLimited(response);
break;
case "precondition_failed":
error = new BlobPreconditionFailedError();
break;
case "unknown_error":
case "not_allowed":
default:
error = new BlobUnknownError();
break;
}
return { code, error };
}
async function requestApi(pathname, init, commandOptions) {
const apiVersion = getApiVersion();
const token = getTokenFromOptionsOrEnv(commandOptions);
const extraHeaders = getProxyThroughAlternativeApiHeaderFromEnv();
const [, , , storeId = ""] = token.split("_");
const requestId = `${storeId}:${Date.now()}:${Math.random().toString(16).slice(2)}`;
let retryCount = 0;
let bodyLength = 0;
let totalLoaded = 0;
const sendBodyLength = (commandOptions == null ? void 0 : commandOptions.onUploadProgress) || shouldUseXContentLength();
if (init.body && // 1. For upload progress we always need to know the total size of the body
// 2. In development we need the header for put() to work correctly when passing a stream
sendBodyLength) {
bodyLength = computeBodyLength(init.body);
}
if (commandOptions == null ? void 0 : commandOptions.onUploadProgress) {
commandOptions.onUploadProgress({
loaded: 0,
total: bodyLength,
percentage: 0
});
}
const apiResponse = await _asyncretry2.default.call(void 0,
async (bail) => {
let res;
try {
res = await blobRequest({
input: getApiUrl(pathname),
init: {
...init,
headers: {
"x-api-blob-request-id": requestId,
"x-api-blob-request-attempt": String(retryCount),
"x-api-version": apiVersion,
...sendBodyLength ? { "x-content-length": String(bodyLength) } : {},
authorization: `Bearer ${token}`,
...extraHeaders,
...init.headers
}
},
onUploadProgress: (commandOptions == null ? void 0 : commandOptions.onUploadProgress) ? (loaded) => {
var _a3;
const total = bodyLength !== 0 ? bodyLength : loaded;
totalLoaded = loaded;
const percentage = bodyLength > 0 ? Number((loaded / total * 100).toFixed(2)) : 0;
if (percentage === 100 && bodyLength > 0) {
return;
}
(_a3 = commandOptions.onUploadProgress) == null ? void 0 : _a3.call(commandOptions, {
loaded,
// When passing a stream to put(), we have no way to know the total size of the body.
// Instead of defining total as total?: number we decided to set the total to the currently
// loaded number. This is not inaccurate and way more practical for DX.
// Passing down a stream to put() is very rare
total,
percentage
});
} : void 0
});
} catch (error2) {
if (error2 instanceof DOMException2 && error2.name === "AbortError") {
bail(new BlobRequestAbortedError());
return;
}
if (isNetworkError(error2)) {
throw error2;
}
if (error2 instanceof TypeError) {
bail(error2);
return;
}
throw error2;
}
if (res.ok) {
return res;
}
const { code, error } = await getBlobError(res);
if (code === "unknown_error" || code === "service_unavailable" || code === "internal_server_error") {
throw error;
}
bail(error);
},
{
retries: getRetries(),
onRetry: (error) => {
if (error instanceof Error) {
debug(`retrying API request to ${pathname}`, error.message);
}
retryCount = retryCount + 1;
}
}
);
if (!apiResponse) {
throw new BlobUnknownError();
}
if (commandOptions == null ? void 0 : commandOptions.onUploadProgress) {
commandOptions.onUploadProgress({
loaded: totalLoaded,
total: totalLoaded,
percentage: 100
});
}
return await apiResponse.json();
}
function getProxyThroughAlternativeApiHeaderFromEnv() {
const extraHeaders = {};
try {
if ("VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API" in process.env && process.env.VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API !== void 0) {
extraHeaders["x-proxy-through-alternative-api"] = process.env.VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API;
} else if ("NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API" in process.env && process.env.NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API !== void 0) {
extraHeaders["x-proxy-through-alternative-api"] = process.env.NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API;
}
} catch (e7) {
}
return extraHeaders;
}
function shouldUseXContentLength() {
try {
return process.env.VERCEL_BLOB_USE_X_CONTENT_LENGTH === "1";
} catch (e8) {
return false;
}
}
// src/put-helpers.ts
var putOptionHeaderMap = {
cacheControlMaxAge: "x-cache-control-max-age",
addRandomSuffix: "x-add-random-suffix",
allowOverwrite: "x-allow-overwrite",
contentType: "x-content-type",
access: "x-vercel-blob-access",
ifMatch: "x-if-match"
};
function createPutHeaders(allowedOptions, options) {
const headers = {};
headers[putOptionHeaderMap.access] = options.access;
if (allowedOptions.includes("contentType") && options.contentType) {
headers[putOptionHeaderMap.contentType] = options.contentType;
}
if (allowedOptions.includes("addRandomSuffix") && options.addRandomSuffix !== void 0) {
headers[putOptionHeaderMap.addRandomSuffix] = options.addRandomSuffix ? "1" : "0";
}
if (allowedOptions.includes("allowOverwrite") && options.allowOverwrite !== void 0) {
headers[putOptionHeaderMap.allowOverwrite] = options.allowOverwrite ? "1" : "0";
}
if (allowedOptions.includes("cacheControlMaxAge") && options.cacheControlMaxAge !== void 0) {
headers[putOptionHeaderMap.cacheControlMaxAge] = options.cacheControlMaxAge.toString();
}
if (allowedOptions.includes("ifMatch") && options.ifMatch) {
headers[putOptionHeaderMap.ifMatch] = options.ifMatch;
}
return headers;
}
async function createPutOptions({
pathname,
options,
extraChecks,
getToken
}) {
if (!pathname) {
throw new BlobError("pathname is required");
}
if (pathname.length > MAXIMUM_PATHNAME_LENGTH) {
throw new BlobError(
`pathname is too long, maximum length is ${MAXIMUM_PATHNAME_LENGTH}`
);
}
for (const invalidCharacter of disallowedPathnameCharacters) {
if (pathname.includes(invalidCharacter)) {
throw new BlobError(
`pathname cannot contain "${invalidCharacter}", please encode it if needed`
);
}
}
if (!options) {
throw new BlobError("missing options, see usage");
}
if (options.access !== "public" && options.access !== "private") {
throw new BlobError('access must be "public" or "private"');
}
if (extraChecks) {
extraChecks(options);
}
if (getToken) {
options.token = await getToken(pathname, options);
}
return options;
}
// src/multipart/complete.ts
function createCompleteMultipartUploadMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, parts, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
return completeMultipartUpload({
uploadId: options.uploadId,
key: options.key,
pathname,
headers,
options,
parts
});
};
}
async function completeMultipartUpload({
uploadId,
key,
pathname,
parts,
headers,
options
}) {
const params = new URLSearchParams({ pathname });
try {
const response = await requestApi(
`/mpu?${params.toString()}`,
{
method: "POST",
headers: {
...headers,
"content-type": "application/json",
"x-mpu-action": "complete",
"x-mpu-upload-id": uploadId,
// key can be any utf8 character so we need to encode it as HTTP headers can only be us-ascii
// https://www.rfc-editor.org/rfc/rfc7230#swection-3.2.4
"x-mpu-key": encodeURIComponent(key)
},
body: JSON.stringify(parts),
signal: options.abortSignal
},
options
);
debug("mpu: complete", response);
return response;
} catch (error) {
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
throw new BlobServiceNotAvailable();
} else {
throw error;
}
}
}
// src/multipart/create.ts
function createCreateMultipartUploadMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
options
);
return {
key: createMultipartUploadResponse.key,
uploadId: createMultipartUploadResponse.uploadId
};
};
}
async function createMultipartUpload(pathname, headers, options) {
debug("mpu: create", "pathname:", pathname);
const params = new URLSearchParams({ pathname });
try {
const response = await requestApi(
`/mpu?${params.toString()}`,
{
method: "POST",
headers: {
...headers,
"x-mpu-action": "create"
},
signal: options.abortSignal
},
options
);
debug("mpu: create", response);
return response;
} catch (error) {
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
throw new BlobServiceNotAvailable();
}
throw error;
}
}
// src/multipart/upload.ts
var _throttleit = require('throttleit'); var _throttleit2 = _interopRequireDefault(_throttleit);
function createUploadPartMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, body, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const result = await uploadPart({
uploadId: options.uploadId,
key: options.key,
pathname,
part: { blob: body, partNumber: options.partNumber },
headers,
options
});
return {
etag: result.etag,
partNumber: options.partNumber
};
};
}
async function uploadPart({
uploadId,
key,
pathname,
headers,
options,
internalAbortController = new AbortController(),
part
}) {
var _a3, _b2, _c;
const params = new URLSearchParams({ pathname });
const responsePromise = requestApi(
`/mpu?${params.toString()}`,
{
signal: internalAbortController.signal,
method: "POST",
headers: {
...headers,
"x-mpu-action": "upload",
"x-mpu-key": encodeURIComponent(key),
"x-mpu-upload-id": uploadId,
"x-mpu-part-number": part.partNumber.toString()
},
// weird things between undici types and native fetch types
body: part.blob
},
options
);
function handleAbort() {
internalAbortController.abort();
}
if ((_a3 = options.abortSignal) == null ? void 0 : _a3.aborted) {
handleAbort();
} else {
(_b2 = options.abortSignal) == null ? void 0 : _b2.addEventListener("abort", handleAbort);
}
const response = await responsePromise;
(_c = options.abortSignal) == null ? void 0 : _c.removeEventListener("abort", handleAbort);
return response;
}
var maxConcurrentUploads = typeof window !== "undefined" ? 6 : 8;
var partSizeInBytes = 8 * 1024 * 1024;
var maxBytesInMemory = maxConcurrentUploads * partSizeInBytes * 2;
function uploadAllParts({
uploadId,
key,
pathname,
stream,
headers,
options,
totalToLoad
}) {
debug("mpu: upload init", "key:", key);
const internalAbortController = new AbortController();
return new Promise((resolve, reject) => {
const partsToUpload = [];
const completedParts = [];
const reader = stream.getReader();
let activeUploads = 0;
let reading = false;
let currentPartNumber = 1;
let rejected = false;
let currentBytesInMemory = 0;
let doneReading = false;
let bytesSent = 0;
let arrayBuffers = [];
let currentPartBytesRead = 0;
let onUploadProgress;
const totalLoadedPerPartNumber = {};
if (options.onUploadProgress) {
onUploadProgress = _throttleit2.default.call(void 0, () => {
var _a3;
const loaded = Object.values(totalLoadedPerPartNumber).reduce(
(acc, cur) => {
return acc + cur;
},
0
);
const total = totalToLoad || loaded;
const percentage = totalToLoad > 0 ? Number(((loaded / totalToLoad || loaded) * 100).toFixed(2)) : 0;
(_a3 = options.onUploadProgress) == null ? void 0 : _a3.call(options, { loaded, total, percentage });
}, 150);
}
read().catch(cancel);
async function read() {
debug(
"mpu: upload read start",
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
reading = true;
while (currentBytesInMemory < maxBytesInMemory && !rejected) {
try {
const { value, done } = await reader.read();
if (done) {
doneReading = true;
debug("mpu: upload read consumed the whole stream");
if (arrayBuffers.length > 0) {
partsToUpload.push({
partNumber: currentPartNumber++,
blob: new Blob(arrayBuffers, {
type: "application/octet-stream"
})
});
sendParts();
}
reading = false;
return;
}
currentBytesInMemory += value.byteLength;
let valueOffset = 0;
while (valueOffset < value.byteLength) {
const remainingPartSize = partSizeInBytes - currentPartBytesRead;
const endOffset = Math.min(
valueOffset + remainingPartSize,
value.byteLength
);
const chunk = value.slice(valueOffset, endOffset);
arrayBuffers.push(chunk);
currentPartBytesRead += chunk.byteLength;
valueOffset = endOffset;
if (currentPartBytesRead === partSizeInBytes) {
partsToUpload.push({
partNumber: currentPartNumber++,
blob: new Blob(arrayBuffers, {
type: "application/octet-stream"
})
});
arrayBuffers = [];
currentPartBytesRead = 0;
sendParts();
}
}
} catch (error) {
cancel(error);
}
}
debug(
"mpu: upload read end",
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
reading = false;
}
async function sendPart(part) {
activeUploads++;
debug(
"mpu: upload send part start",
"partNumber:",
part.partNumber,
"size:",
part.blob.size,
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
try {
const uploadProgressForPart = options.onUploadProgress ? (event) => {
totalLoadedPerPartNumber[part.partNumber] = event.loaded;
if (onUploadProgress) {
onUploadProgress();
}
} : void 0;
const completedPart = await uploadPart({
uploadId,
key,
pathname,
headers,
options: {
...options,
onUploadProgress: uploadProgressForPart
},
internalAbortController,
part
});
debug(
"mpu: upload send part end",
"partNumber:",
part.partNumber,
"activeUploads",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
if (rejected) {
return;
}
completedParts.push({
partNumber: part.partNumber,
etag: completedPart.etag
});
currentBytesInMemory -= part.blob.size;
activeUploads--;
bytesSent += part.blob.size;
if (partsToUpload.length > 0) {
sendParts();
}
if (doneReading) {
if (activeUploads === 0) {
reader.releaseLock();
resolve(completedParts);
}
return;
}
if (!reading) {
read().catch(cancel);
}
} catch (error) {
cancel(error);
}
}
function sendParts() {
if (rejected) {
return;
}
debug(
"send parts",
"activeUploads",
activeUploads,
"partsToUpload",
partsToUpload.length
);
while (activeUploads < maxConcurrentUploads && partsToUpload.length > 0) {
const partToSend = partsToUpload.shift();
if (partToSend) {
void sendPart(partToSend);
}
}
}
function cancel(error) {
if (rejected) {
return;
}
rejected = true;
internalAbortController.abort();
reader.releaseLock();
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
reject(new BlobServiceNotAvailable());
} else {
reject(error);
}
}
});
}
// src/multipart/create-uploader.ts
function createCreateMultipartUploaderMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
options
);
return {
key: createMultipartUploadResponse.key,
uploadId: createMultipartUploadResponse.uploadId,
async uploadPart(partNumber, body) {
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const result = await uploadPart({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
part: { partNumber, blob: body },
headers,
options
});
return {
etag: result.etag,
partNumber
};
},
async complete(parts) {
return completeMultipartUpload({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
parts,
headers,
options
});
}
};
};
}
// src/put.ts
// src/multipart/uncontrolled.ts
async function uncontrolledMultipartUpload(pathname, body, headers, options) {
debug("mpu: init", "pathname:", pathname, "headers:", headers);
const optionsWithoutOnUploadProgress = {
...options,
onUploadProgress: void 0
};
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
optionsWithoutOnUploadProgress
);
const totalToLoad = computeBodyLength(body);
const stream = await toReadableStream(body);
const parts = await uploadAllParts({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
// @ts-expect-error ReadableStream<ArrayBuffer | Uint8Array> is compatible at runtime
stream,
headers,
options,
totalToLoad
});
const blob = await completeMultipartUpload({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
parts,
headers,
options: optionsWithoutOnUploadProgress
});
return blob;
}
// src/put.ts
function createPutMethod({
allowedOptions,
getToken,
extraChecks
}) {
return async function put(pathname, body, optionsInput) {
if (!body) {
throw new BlobError("body is required");
}
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
if (options.multipart === true) {
return uncontrolledMultipartUpload(pathname, body, headers, options);
}
const onUploadProgress = options.onUploadProgress ? _throttleit2.default.call(void 0, options.onUploadProgress, 100) : void 0;
const params = new URLSearchParams({ pathname });
const response = await requestApi(
`/?${params.toString()}`,
{
method: "PUT",
body,
headers,
signal: options.abortSignal
},
{
...options,
onUploadProgress
}
);
return {
url: response.url,
downloadUrl: response.downloadUrl,
pathname: response.pathname,
contentType: response.contentType,
contentDisposition: response.contentDisposition,
etag: response.etag
};
};
}
// src/create-folder.ts
async function createFolder(pathname, options) {
if (!options) {
throw new BlobError("missing options, see usage");
}
if (options.access !== "public" && options.access !== "private") {
throw new BlobError('access must be "public" or "private"');
}
const folderPathname = pathname.endsWith("/") ? pathname : `${pathname}/`;
const headers = {};
headers[putOptionHeaderMap.access] = options.access;
headers[putOptionHeaderMap.addRandomSuffix] = "0";
const params = new URLSearchParams({ pathname: folderPathname });
const response = await requestApi(
`/?${params.toString()}`,
{
method: "PUT",
headers,
signal: options.abortSignal
},
options
);
return {
url: response.url,
pathname: response.pathname
};
}
exports.getTokenFromOptionsOrEnv = getTokenFromOptionsOrEnv; exports.BlobError = BlobError; exports.getDownloadUrl = getDownloadUrl; exports.disallowedPathnameCharacters = disallowedPathnameCharacters; exports.MAXIMUM_PATHNAME_LENGTH = MAXIMUM_PATHNAME_LENGTH; exports.BlobAccessError = BlobAccessError; exports.BlobContentTypeNotAllowedError = BlobContentTypeNotAllowedError; exports.BlobPathnameMismatchError = BlobPathnameMismatchError; exports.BlobClientTokenExpiredError = BlobClientTokenExpiredError; exports.BlobFileTooLargeError = BlobFileTooLargeError; exports.BlobStoreNotFoundError = BlobStoreNotFoundError; exports.BlobStoreSuspendedError = BlobStoreSuspendedError; exports.BlobUnknownError = BlobUnknownError; exports.BlobNotFoundError = BlobNotFoundError; exports.BlobServiceNotAvailable = BlobServiceNotAvailable; exports.BlobServiceRateLimited = BlobServiceRateLimited; exports.BlobRequestAbortedError = BlobRequestAbortedError; exports.BlobPreconditionFailedError = BlobPreconditionFailedError; exports.requestApi = requestApi; exports.createCompleteMultipartUploadMethod = createCompleteMultipartUploadMethod; exports.createCreateMultipartUploadMethod = createCreateMultipartUploadMethod; exports.createUploadPartMethod = createUploadPartMethod; exports.createCreateMultipartUploaderMethod = createCreateMultipartUploaderMethod; exports.createPutMethod = createPutMethod; exports.createFolder = createFolder;
/*!
* bytes
* Copyright(c) 2012-2014 TJ Holowaychuk
* Copyright(c) 2015 Jed Watson
* MIT Licensed
*/
//# sourceMappingURL=chunk-7IJ2KDBM.cjs.map

Sorry, the diff of this file is too big to display

// src/helpers.ts
import { isNodeProcess } from "is-node-process";
// src/multipart/helpers.ts
import isBuffer from "is-buffer";
import { Readable } from "stream";
var supportsNewBlobFromArrayBuffer = new Promise((resolve) => {
try {
const helloAsArrayBuffer = new Uint8Array([104, 101, 108, 108, 111]);
const blob = new Blob([helloAsArrayBuffer]);
blob.text().then((text) => {
resolve(text === "hello");
}).catch(() => {
resolve(false);
});
} catch {
resolve(false);
}
});
async function toReadableStream(value) {
if (value instanceof ReadableStream) {
return value;
}
if (value instanceof Blob) {
return value.stream();
}
if (isNodeJsReadableStream(value)) {
return Readable.toWeb(value);
}
let streamValue;
if (value instanceof ArrayBuffer) {
streamValue = new Uint8Array(value);
} else if (isNodeJsBuffer(value)) {
streamValue = value;
} else {
streamValue = stringToUint8Array(value);
}
if (await supportsNewBlobFromArrayBuffer) {
return new Blob([streamValue]).stream();
}
return new ReadableStream({
start(controller) {
controller.enqueue(streamValue);
controller.close();
}
});
}
function isNodeJsReadableStream(value) {
return typeof value === "object" && typeof value.pipe === "function" && value.readable && typeof value._read === "function" && // @ts-expect-error _readableState does exists on Readable
typeof value._readableState === "object";
}
function stringToUint8Array(s) {
const enc = new TextEncoder();
return enc.encode(s);
}
function isNodeJsBuffer(value) {
return isBuffer(value);
}
// src/bytes.ts
var parseRegExp = /^((-|\+)?(\d+(?:\.\d+)?)) *(kb|mb|gb|tb|pb)$/i;
var map = {
b: 1,
kb: 1 << 10,
mb: 1 << 20,
gb: 1 << 30,
tb: 1024 ** 4,
pb: 1024 ** 5
};
function bytes(val) {
if (typeof val === "number" && !Number.isNaN(val)) {
return val;
}
if (typeof val !== "string") {
return null;
}
const results = parseRegExp.exec(val);
let floatValue;
let unit = "b";
if (!results) {
floatValue = parseInt(val, 10);
} else {
const [, res, , , unitMatch] = results;
if (!res) {
return null;
}
floatValue = parseFloat(res);
if (unitMatch) {
unit = unitMatch.toLowerCase();
}
}
if (Number.isNaN(floatValue)) {
return null;
}
return Math.floor(map[unit] * floatValue);
}
// src/helpers.ts
var defaultVercelBlobApiUrl = "https://vercel.com/api/blob";
function getTokenFromOptionsOrEnv(options) {
if (options == null ? void 0 : options.token) {
return options.token;
}
if (process.env.BLOB_READ_WRITE_TOKEN) {
return process.env.BLOB_READ_WRITE_TOKEN;
}
throw new BlobError(
"No token found. Either configure the `BLOB_READ_WRITE_TOKEN` environment variable, or pass a `token` option to your calls."
);
}
var BlobError = class extends Error {
constructor(message) {
super(`Vercel Blob: ${message}`);
}
};
function getDownloadUrl(blobUrl) {
const url = new URL(blobUrl);
url.searchParams.set("download", "1");
return url.toString();
}
function isPlainObject(value) {
if (typeof value !== "object" || value === null) {
return false;
}
const prototype = Object.getPrototypeOf(value);
return (prototype === null || prototype === Object.prototype || Object.getPrototypeOf(prototype) === null) && !(Symbol.toStringTag in value) && !(Symbol.iterator in value);
}
var disallowedPathnameCharacters = ["//"];
var supportsRequestStreams = (() => {
if (isNodeProcess()) {
return true;
}
const apiUrl = getApiUrl();
if (apiUrl.startsWith("http://localhost")) {
return false;
}
let duplexAccessed = false;
const hasContentType = new Request(getApiUrl(), {
body: new ReadableStream(),
method: "POST",
// @ts-expect-error -- TypeScript doesn't yet have duplex but it's in the spec: https://github.com/microsoft/TypeScript-DOM-lib-generator/pull/1729
get duplex() {
duplexAccessed = true;
return "half";
}
}).headers.has("Content-Type");
return duplexAccessed && !hasContentType;
})();
function getApiUrl(pathname = "") {
let baseUrl = null;
try {
baseUrl = process.env.VERCEL_BLOB_API_URL || process.env.NEXT_PUBLIC_VERCEL_BLOB_API_URL;
} catch {
}
return `${baseUrl || defaultVercelBlobApiUrl}${pathname}`;
}
var TEXT_ENCODER = typeof TextEncoder === "function" ? new TextEncoder() : null;
function computeBodyLength(body) {
if (!body) {
return 0;
}
if (typeof body === "string") {
if (TEXT_ENCODER) {
return TEXT_ENCODER.encode(body).byteLength;
}
return new Blob([body]).size;
}
if ("byteLength" in body && typeof body.byteLength === "number") {
return body.byteLength;
}
if ("size" in body && typeof body.size === "number") {
return body.size;
}
return 0;
}
var createChunkTransformStream = (chunkSize, onProgress) => {
let buffer = new Uint8Array(0);
return new TransformStream({
transform(chunk, controller) {
queueMicrotask(() => {
const newBuffer = new Uint8Array(buffer.length + chunk.byteLength);
newBuffer.set(buffer);
newBuffer.set(new Uint8Array(chunk), buffer.length);
buffer = newBuffer;
while (buffer.length >= chunkSize) {
const newChunk = buffer.slice(0, chunkSize);
controller.enqueue(newChunk);
onProgress == null ? void 0 : onProgress(newChunk.byteLength);
buffer = buffer.slice(chunkSize);
}
});
},
flush(controller) {
queueMicrotask(() => {
if (buffer.length > 0) {
controller.enqueue(buffer);
onProgress == null ? void 0 : onProgress(buffer.byteLength);
}
});
}
});
};
function isReadableStream(value) {
return globalThis.ReadableStream && // TODO: Can be removed once Node.js 16 is no more required internally
value instanceof ReadableStream;
}
function isStream(value) {
if (isReadableStream(value)) {
return true;
}
if (isNodeJsReadableStream(value)) {
return true;
}
return false;
}
// src/api.ts
import retry from "async-retry";
// src/debug.ts
var debugIsActive = false;
var _a, _b;
try {
if (((_a = process.env.DEBUG) == null ? void 0 : _a.includes("blob")) || ((_b = process.env.NEXT_PUBLIC_DEBUG) == null ? void 0 : _b.includes("blob"))) {
debugIsActive = true;
}
} catch {
}
function debug(message, ...args) {
if (debugIsActive) {
console.debug(`vercel-blob: ${message}`, ...args);
}
}
// src/dom-exception.ts
var _a2;
var DOMException2 = (_a2 = globalThis.DOMException) != null ? _a2 : (() => {
try {
atob("~");
} catch (err) {
return Object.getPrototypeOf(err).constructor;
}
})();
// src/is-network-error.ts
var objectToString = Object.prototype.toString;
var isError = (value) => objectToString.call(value) === "[object Error]";
var errorMessages = /* @__PURE__ */ new Set([
"network error",
// Chrome
"Failed to fetch",
// Chrome
"NetworkError when attempting to fetch resource.",
// Firefox
"The Internet connection appears to be offline.",
// Safari 16
"Load failed",
// Safari 17+
"Network request failed",
// `cross-fetch`
"fetch failed",
// Undici (Node.js)
"terminated"
// Undici (Node.js)
]);
function isNetworkError(error) {
const isValid = error && isError(error) && error.name === "TypeError" && typeof error.message === "string";
if (!isValid) {
return false;
}
if (error.message === "Load failed") {
return error.stack === void 0;
}
return errorMessages.has(error.message);
}
// src/fetch.ts
import { fetch } from "undici";
var hasFetch = typeof fetch === "function";
var hasFetchWithUploadProgress = hasFetch && supportsRequestStreams;
var CHUNK_SIZE = 64 * 1024;
var blobFetch = async ({
input,
init,
onUploadProgress
}) => {
debug("using fetch");
let body;
if (init.body) {
if (onUploadProgress) {
const stream = await toReadableStream(init.body);
let loaded = 0;
const chunkTransformStream = createChunkTransformStream(
CHUNK_SIZE,
(newLoaded) => {
loaded += newLoaded;
onUploadProgress(loaded);
}
);
body = stream.pipeThrough(chunkTransformStream);
} else {
body = init.body;
}
}
const duplex = supportsRequestStreams && body && isStream(body) ? "half" : void 0;
return fetch(
input,
// @ts-expect-error -- Blob and Nodejs Blob are triggering type errors, fine with it
{
...init,
...init.body ? { body } : {},
duplex
}
);
};
// src/xhr.ts
var hasXhr = typeof XMLHttpRequest !== "undefined";
var blobXhr = async ({
input,
init,
onUploadProgress
}) => {
debug("using xhr");
let body = null;
if (init.body) {
if (isReadableStream(init.body)) {
body = await new Response(init.body).blob();
} else {
body = init.body;
}
}
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open(init.method || "GET", input.toString(), true);
if (onUploadProgress) {
xhr.upload.addEventListener("progress", (event) => {
if (event.lengthComputable) {
onUploadProgress(event.loaded);
}
});
}
xhr.onload = () => {
var _a3;
if ((_a3 = init.signal) == null ? void 0 : _a3.aborted) {
reject(new DOMException("The user aborted the request.", "AbortError"));
return;
}
const headers = new Headers();
const rawHeaders = xhr.getAllResponseHeaders().trim().split(/[\r\n]+/);
rawHeaders.forEach((line) => {
const parts = line.split(": ");
const key = parts.shift();
const value = parts.join(": ");
if (key) headers.set(key.toLowerCase(), value);
});
const response = new Response(xhr.response, {
status: xhr.status,
statusText: xhr.statusText,
headers
});
resolve(response);
};
xhr.onerror = () => {
reject(new TypeError("Network request failed"));
};
xhr.ontimeout = () => {
reject(new TypeError("Network request timed out"));
};
xhr.onabort = () => {
reject(new DOMException("The user aborted a request.", "AbortError"));
};
if (init.headers) {
const headers = new Headers(init.headers);
headers.forEach((value, key) => {
xhr.setRequestHeader(key, value);
});
}
if (init.signal) {
init.signal.addEventListener("abort", () => {
xhr.abort();
});
if (init.signal.aborted) {
xhr.abort();
return;
}
}
xhr.send(body);
});
};
// src/request.ts
var blobRequest = async ({
input,
init,
onUploadProgress
}) => {
if (onUploadProgress) {
if (hasFetchWithUploadProgress) {
return blobFetch({ input, init, onUploadProgress });
}
if (hasXhr) {
return blobXhr({ input, init, onUploadProgress });
}
}
if (hasFetch) {
return blobFetch({ input, init });
}
if (hasXhr) {
return blobXhr({ input, init });
}
throw new Error("No request implementation available");
};
// src/api.ts
var MAXIMUM_PATHNAME_LENGTH = 950;
var BlobAccessError = class extends BlobError {
constructor() {
super("Access denied, please provide a valid token for this resource.");
}
};
var BlobContentTypeNotAllowedError = class extends BlobError {
constructor(message) {
super(`Content type mismatch, ${message}.`);
}
};
var BlobPathnameMismatchError = class extends BlobError {
constructor(message) {
super(
`Pathname mismatch, ${message}. Check the pathname used in upload() or put() matches the one from the client token.`
);
}
};
var BlobClientTokenExpiredError = class extends BlobError {
constructor() {
super("Client token has expired.");
}
};
var BlobFileTooLargeError = class extends BlobError {
constructor(message) {
super(`File is too large, ${message}.`);
}
};
var BlobStoreNotFoundError = class extends BlobError {
constructor() {
super("This store does not exist.");
}
};
var BlobStoreSuspendedError = class extends BlobError {
constructor() {
super("This store has been suspended.");
}
};
var BlobUnknownError = class extends BlobError {
constructor() {
super("Unknown error, please visit https://vercel.com/help.");
}
};
var BlobNotFoundError = class extends BlobError {
constructor() {
super("The requested blob does not exist");
}
};
var BlobServiceNotAvailable = class extends BlobError {
constructor() {
super("The blob service is currently not available. Please try again.");
}
};
var BlobServiceRateLimited = class extends BlobError {
constructor(seconds) {
super(
`Too many requests please lower the number of concurrent requests ${seconds ? ` - try again in ${seconds} seconds` : ""}.`
);
this.retryAfter = seconds != null ? seconds : 0;
}
};
var BlobRequestAbortedError = class extends BlobError {
constructor() {
super("The request was aborted.");
}
};
var BlobPreconditionFailedError = class extends BlobError {
constructor() {
super("Precondition failed: ETag mismatch.");
}
};
var BLOB_API_VERSION = 12;
function getApiVersion() {
let versionOverride = null;
try {
versionOverride = process.env.VERCEL_BLOB_API_VERSION_OVERRIDE || process.env.NEXT_PUBLIC_VERCEL_BLOB_API_VERSION_OVERRIDE;
} catch {
}
return `${versionOverride != null ? versionOverride : BLOB_API_VERSION}`;
}
function getRetries() {
try {
const retries = process.env.VERCEL_BLOB_RETRIES || "10";
return parseInt(retries, 10);
} catch {
return 10;
}
}
function createBlobServiceRateLimited(response) {
const retryAfter = response.headers.get("retry-after");
return new BlobServiceRateLimited(
retryAfter ? parseInt(retryAfter, 10) : void 0
);
}
async function getBlobError(response) {
var _a3, _b2, _c;
let code;
let message;
try {
const data = await response.json();
code = (_b2 = (_a3 = data.error) == null ? void 0 : _a3.code) != null ? _b2 : "unknown_error";
message = (_c = data.error) == null ? void 0 : _c.message;
} catch {
code = "unknown_error";
}
if ((message == null ? void 0 : message.includes("contentType")) && message.includes("is not allowed")) {
code = "content_type_not_allowed";
}
if ((message == null ? void 0 : message.includes('"pathname"')) && message.includes("does not match the token payload")) {
code = "client_token_pathname_mismatch";
}
if (message === "Token expired") {
code = "client_token_expired";
}
if (message == null ? void 0 : message.includes("the file length cannot be greater than")) {
code = "file_too_large";
}
let error;
switch (code) {
case "store_suspended":
error = new BlobStoreSuspendedError();
break;
case "forbidden":
error = new BlobAccessError();
break;
case "content_type_not_allowed":
error = new BlobContentTypeNotAllowedError(message);
break;
case "client_token_pathname_mismatch":
error = new BlobPathnameMismatchError(message);
break;
case "client_token_expired":
error = new BlobClientTokenExpiredError();
break;
case "file_too_large":
error = new BlobFileTooLargeError(message);
break;
case "not_found":
error = new BlobNotFoundError();
break;
case "store_not_found":
error = new BlobStoreNotFoundError();
break;
case "bad_request":
error = new BlobError(message != null ? message : "Bad request");
break;
case "service_unavailable":
error = new BlobServiceNotAvailable();
break;
case "rate_limited":
error = createBlobServiceRateLimited(response);
break;
case "precondition_failed":
error = new BlobPreconditionFailedError();
break;
case "unknown_error":
case "not_allowed":
default:
error = new BlobUnknownError();
break;
}
return { code, error };
}
async function requestApi(pathname, init, commandOptions) {
const apiVersion = getApiVersion();
const token = getTokenFromOptionsOrEnv(commandOptions);
const extraHeaders = getProxyThroughAlternativeApiHeaderFromEnv();
const [, , , storeId = ""] = token.split("_");
const requestId = `${storeId}:${Date.now()}:${Math.random().toString(16).slice(2)}`;
let retryCount = 0;
let bodyLength = 0;
let totalLoaded = 0;
const sendBodyLength = (commandOptions == null ? void 0 : commandOptions.onUploadProgress) || shouldUseXContentLength();
if (init.body && // 1. For upload progress we always need to know the total size of the body
// 2. In development we need the header for put() to work correctly when passing a stream
sendBodyLength) {
bodyLength = computeBodyLength(init.body);
}
if (commandOptions == null ? void 0 : commandOptions.onUploadProgress) {
commandOptions.onUploadProgress({
loaded: 0,
total: bodyLength,
percentage: 0
});
}
const apiResponse = await retry(
async (bail) => {
let res;
try {
res = await blobRequest({
input: getApiUrl(pathname),
init: {
...init,
headers: {
"x-api-blob-request-id": requestId,
"x-api-blob-request-attempt": String(retryCount),
"x-api-version": apiVersion,
...sendBodyLength ? { "x-content-length": String(bodyLength) } : {},
authorization: `Bearer ${token}`,
...extraHeaders,
...init.headers
}
},
onUploadProgress: (commandOptions == null ? void 0 : commandOptions.onUploadProgress) ? (loaded) => {
var _a3;
const total = bodyLength !== 0 ? bodyLength : loaded;
totalLoaded = loaded;
const percentage = bodyLength > 0 ? Number((loaded / total * 100).toFixed(2)) : 0;
if (percentage === 100 && bodyLength > 0) {
return;
}
(_a3 = commandOptions.onUploadProgress) == null ? void 0 : _a3.call(commandOptions, {
loaded,
// When passing a stream to put(), we have no way to know the total size of the body.
// Instead of defining total as total?: number we decided to set the total to the currently
// loaded number. This is not inaccurate and way more practical for DX.
// Passing down a stream to put() is very rare
total,
percentage
});
} : void 0
});
} catch (error2) {
if (error2 instanceof DOMException2 && error2.name === "AbortError") {
bail(new BlobRequestAbortedError());
return;
}
if (isNetworkError(error2)) {
throw error2;
}
if (error2 instanceof TypeError) {
bail(error2);
return;
}
throw error2;
}
if (res.ok) {
return res;
}
const { code, error } = await getBlobError(res);
if (code === "unknown_error" || code === "service_unavailable" || code === "internal_server_error") {
throw error;
}
bail(error);
},
{
retries: getRetries(),
onRetry: (error) => {
if (error instanceof Error) {
debug(`retrying API request to ${pathname}`, error.message);
}
retryCount = retryCount + 1;
}
}
);
if (!apiResponse) {
throw new BlobUnknownError();
}
if (commandOptions == null ? void 0 : commandOptions.onUploadProgress) {
commandOptions.onUploadProgress({
loaded: totalLoaded,
total: totalLoaded,
percentage: 100
});
}
return await apiResponse.json();
}
function getProxyThroughAlternativeApiHeaderFromEnv() {
const extraHeaders = {};
try {
if ("VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API" in process.env && process.env.VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API !== void 0) {
extraHeaders["x-proxy-through-alternative-api"] = process.env.VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API;
} else if ("NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API" in process.env && process.env.NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API !== void 0) {
extraHeaders["x-proxy-through-alternative-api"] = process.env.NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API;
}
} catch {
}
return extraHeaders;
}
function shouldUseXContentLength() {
try {
return process.env.VERCEL_BLOB_USE_X_CONTENT_LENGTH === "1";
} catch {
return false;
}
}
// src/put-helpers.ts
var putOptionHeaderMap = {
cacheControlMaxAge: "x-cache-control-max-age",
addRandomSuffix: "x-add-random-suffix",
allowOverwrite: "x-allow-overwrite",
contentType: "x-content-type",
access: "x-vercel-blob-access",
ifMatch: "x-if-match"
};
function createPutHeaders(allowedOptions, options) {
const headers = {};
headers[putOptionHeaderMap.access] = options.access;
if (allowedOptions.includes("contentType") && options.contentType) {
headers[putOptionHeaderMap.contentType] = options.contentType;
}
if (allowedOptions.includes("addRandomSuffix") && options.addRandomSuffix !== void 0) {
headers[putOptionHeaderMap.addRandomSuffix] = options.addRandomSuffix ? "1" : "0";
}
if (allowedOptions.includes("allowOverwrite") && options.allowOverwrite !== void 0) {
headers[putOptionHeaderMap.allowOverwrite] = options.allowOverwrite ? "1" : "0";
}
if (allowedOptions.includes("cacheControlMaxAge") && options.cacheControlMaxAge !== void 0) {
headers[putOptionHeaderMap.cacheControlMaxAge] = options.cacheControlMaxAge.toString();
}
if (allowedOptions.includes("ifMatch") && options.ifMatch) {
headers[putOptionHeaderMap.ifMatch] = options.ifMatch;
}
return headers;
}
async function createPutOptions({
pathname,
options,
extraChecks,
getToken
}) {
if (!pathname) {
throw new BlobError("pathname is required");
}
if (pathname.length > MAXIMUM_PATHNAME_LENGTH) {
throw new BlobError(
`pathname is too long, maximum length is ${MAXIMUM_PATHNAME_LENGTH}`
);
}
for (const invalidCharacter of disallowedPathnameCharacters) {
if (pathname.includes(invalidCharacter)) {
throw new BlobError(
`pathname cannot contain "${invalidCharacter}", please encode it if needed`
);
}
}
if (!options) {
throw new BlobError("missing options, see usage");
}
if (options.access !== "public" && options.access !== "private") {
throw new BlobError('access must be "public" or "private"');
}
if (extraChecks) {
extraChecks(options);
}
if (getToken) {
options.token = await getToken(pathname, options);
}
return options;
}
// src/multipart/complete.ts
function createCompleteMultipartUploadMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, parts, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
return completeMultipartUpload({
uploadId: options.uploadId,
key: options.key,
pathname,
headers,
options,
parts
});
};
}
async function completeMultipartUpload({
uploadId,
key,
pathname,
parts,
headers,
options
}) {
const params = new URLSearchParams({ pathname });
try {
const response = await requestApi(
`/mpu?${params.toString()}`,
{
method: "POST",
headers: {
...headers,
"content-type": "application/json",
"x-mpu-action": "complete",
"x-mpu-upload-id": uploadId,
// key can be any utf8 character so we need to encode it as HTTP headers can only be us-ascii
// https://www.rfc-editor.org/rfc/rfc7230#swection-3.2.4
"x-mpu-key": encodeURIComponent(key)
},
body: JSON.stringify(parts),
signal: options.abortSignal
},
options
);
debug("mpu: complete", response);
return response;
} catch (error) {
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
throw new BlobServiceNotAvailable();
} else {
throw error;
}
}
}
// src/multipart/create.ts
function createCreateMultipartUploadMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
options
);
return {
key: createMultipartUploadResponse.key,
uploadId: createMultipartUploadResponse.uploadId
};
};
}
async function createMultipartUpload(pathname, headers, options) {
debug("mpu: create", "pathname:", pathname);
const params = new URLSearchParams({ pathname });
try {
const response = await requestApi(
`/mpu?${params.toString()}`,
{
method: "POST",
headers: {
...headers,
"x-mpu-action": "create"
},
signal: options.abortSignal
},
options
);
debug("mpu: create", response);
return response;
} catch (error) {
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
throw new BlobServiceNotAvailable();
}
throw error;
}
}
// src/multipart/upload.ts
import throttle from "throttleit";
function createUploadPartMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, body, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const result = await uploadPart({
uploadId: options.uploadId,
key: options.key,
pathname,
part: { blob: body, partNumber: options.partNumber },
headers,
options
});
return {
etag: result.etag,
partNumber: options.partNumber
};
};
}
async function uploadPart({
uploadId,
key,
pathname,
headers,
options,
internalAbortController = new AbortController(),
part
}) {
var _a3, _b2, _c;
const params = new URLSearchParams({ pathname });
const responsePromise = requestApi(
`/mpu?${params.toString()}`,
{
signal: internalAbortController.signal,
method: "POST",
headers: {
...headers,
"x-mpu-action": "upload",
"x-mpu-key": encodeURIComponent(key),
"x-mpu-upload-id": uploadId,
"x-mpu-part-number": part.partNumber.toString()
},
// weird things between undici types and native fetch types
body: part.blob
},
options
);
function handleAbort() {
internalAbortController.abort();
}
if ((_a3 = options.abortSignal) == null ? void 0 : _a3.aborted) {
handleAbort();
} else {
(_b2 = options.abortSignal) == null ? void 0 : _b2.addEventListener("abort", handleAbort);
}
const response = await responsePromise;
(_c = options.abortSignal) == null ? void 0 : _c.removeEventListener("abort", handleAbort);
return response;
}
var maxConcurrentUploads = typeof window !== "undefined" ? 6 : 8;
var partSizeInBytes = 8 * 1024 * 1024;
var maxBytesInMemory = maxConcurrentUploads * partSizeInBytes * 2;
function uploadAllParts({
uploadId,
key,
pathname,
stream,
headers,
options,
totalToLoad
}) {
debug("mpu: upload init", "key:", key);
const internalAbortController = new AbortController();
return new Promise((resolve, reject) => {
const partsToUpload = [];
const completedParts = [];
const reader = stream.getReader();
let activeUploads = 0;
let reading = false;
let currentPartNumber = 1;
let rejected = false;
let currentBytesInMemory = 0;
let doneReading = false;
let bytesSent = 0;
let arrayBuffers = [];
let currentPartBytesRead = 0;
let onUploadProgress;
const totalLoadedPerPartNumber = {};
if (options.onUploadProgress) {
onUploadProgress = throttle(() => {
var _a3;
const loaded = Object.values(totalLoadedPerPartNumber).reduce(
(acc, cur) => {
return acc + cur;
},
0
);
const total = totalToLoad || loaded;
const percentage = totalToLoad > 0 ? Number(((loaded / totalToLoad || loaded) * 100).toFixed(2)) : 0;
(_a3 = options.onUploadProgress) == null ? void 0 : _a3.call(options, { loaded, total, percentage });
}, 150);
}
read().catch(cancel);
async function read() {
debug(
"mpu: upload read start",
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
reading = true;
while (currentBytesInMemory < maxBytesInMemory && !rejected) {
try {
const { value, done } = await reader.read();
if (done) {
doneReading = true;
debug("mpu: upload read consumed the whole stream");
if (arrayBuffers.length > 0) {
partsToUpload.push({
partNumber: currentPartNumber++,
blob: new Blob(arrayBuffers, {
type: "application/octet-stream"
})
});
sendParts();
}
reading = false;
return;
}
currentBytesInMemory += value.byteLength;
let valueOffset = 0;
while (valueOffset < value.byteLength) {
const remainingPartSize = partSizeInBytes - currentPartBytesRead;
const endOffset = Math.min(
valueOffset + remainingPartSize,
value.byteLength
);
const chunk = value.slice(valueOffset, endOffset);
arrayBuffers.push(chunk);
currentPartBytesRead += chunk.byteLength;
valueOffset = endOffset;
if (currentPartBytesRead === partSizeInBytes) {
partsToUpload.push({
partNumber: currentPartNumber++,
blob: new Blob(arrayBuffers, {
type: "application/octet-stream"
})
});
arrayBuffers = [];
currentPartBytesRead = 0;
sendParts();
}
}
} catch (error) {
cancel(error);
}
}
debug(
"mpu: upload read end",
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
reading = false;
}
async function sendPart(part) {
activeUploads++;
debug(
"mpu: upload send part start",
"partNumber:",
part.partNumber,
"size:",
part.blob.size,
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
try {
const uploadProgressForPart = options.onUploadProgress ? (event) => {
totalLoadedPerPartNumber[part.partNumber] = event.loaded;
if (onUploadProgress) {
onUploadProgress();
}
} : void 0;
const completedPart = await uploadPart({
uploadId,
key,
pathname,
headers,
options: {
...options,
onUploadProgress: uploadProgressForPart
},
internalAbortController,
part
});
debug(
"mpu: upload send part end",
"partNumber:",
part.partNumber,
"activeUploads",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
if (rejected) {
return;
}
completedParts.push({
partNumber: part.partNumber,
etag: completedPart.etag
});
currentBytesInMemory -= part.blob.size;
activeUploads--;
bytesSent += part.blob.size;
if (partsToUpload.length > 0) {
sendParts();
}
if (doneReading) {
if (activeUploads === 0) {
reader.releaseLock();
resolve(completedParts);
}
return;
}
if (!reading) {
read().catch(cancel);
}
} catch (error) {
cancel(error);
}
}
function sendParts() {
if (rejected) {
return;
}
debug(
"send parts",
"activeUploads",
activeUploads,
"partsToUpload",
partsToUpload.length
);
while (activeUploads < maxConcurrentUploads && partsToUpload.length > 0) {
const partToSend = partsToUpload.shift();
if (partToSend) {
void sendPart(partToSend);
}
}
}
function cancel(error) {
if (rejected) {
return;
}
rejected = true;
internalAbortController.abort();
reader.releaseLock();
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
reject(new BlobServiceNotAvailable());
} else {
reject(error);
}
}
});
}
// src/multipart/create-uploader.ts
function createCreateMultipartUploaderMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
options
);
return {
key: createMultipartUploadResponse.key,
uploadId: createMultipartUploadResponse.uploadId,
async uploadPart(partNumber, body) {
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const result = await uploadPart({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
part: { partNumber, blob: body },
headers,
options
});
return {
etag: result.etag,
partNumber
};
},
async complete(parts) {
return completeMultipartUpload({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
parts,
headers,
options
});
}
};
};
}
// src/put.ts
import throttle2 from "throttleit";
// src/multipart/uncontrolled.ts
async function uncontrolledMultipartUpload(pathname, body, headers, options) {
debug("mpu: init", "pathname:", pathname, "headers:", headers);
const optionsWithoutOnUploadProgress = {
...options,
onUploadProgress: void 0
};
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
optionsWithoutOnUploadProgress
);
const totalToLoad = computeBodyLength(body);
const stream = await toReadableStream(body);
const parts = await uploadAllParts({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
// @ts-expect-error ReadableStream<ArrayBuffer | Uint8Array> is compatible at runtime
stream,
headers,
options,
totalToLoad
});
const blob = await completeMultipartUpload({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
parts,
headers,
options: optionsWithoutOnUploadProgress
});
return blob;
}
// src/put.ts
function createPutMethod({
allowedOptions,
getToken,
extraChecks
}) {
return async function put(pathname, body, optionsInput) {
if (!body) {
throw new BlobError("body is required");
}
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
if (options.multipart === true) {
return uncontrolledMultipartUpload(pathname, body, headers, options);
}
const onUploadProgress = options.onUploadProgress ? throttle2(options.onUploadProgress, 100) : void 0;
const params = new URLSearchParams({ pathname });
const response = await requestApi(
`/?${params.toString()}`,
{
method: "PUT",
body,
headers,
signal: options.abortSignal
},
{
...options,
onUploadProgress
}
);
return {
url: response.url,
downloadUrl: response.downloadUrl,
pathname: response.pathname,
contentType: response.contentType,
contentDisposition: response.contentDisposition,
etag: response.etag
};
};
}
// src/create-folder.ts
async function createFolder(pathname, options) {
if (!options) {
throw new BlobError("missing options, see usage");
}
if (options.access !== "public" && options.access !== "private") {
throw new BlobError('access must be "public" or "private"');
}
const folderPathname = pathname.endsWith("/") ? pathname : `${pathname}/`;
const headers = {};
headers[putOptionHeaderMap.access] = options.access;
headers[putOptionHeaderMap.addRandomSuffix] = "0";
const params = new URLSearchParams({ pathname: folderPathname });
const response = await requestApi(
`/?${params.toString()}`,
{
method: "PUT",
headers,
signal: options.abortSignal
},
options
);
return {
url: response.url,
pathname: response.pathname
};
}
export {
getTokenFromOptionsOrEnv,
BlobError,
getDownloadUrl,
disallowedPathnameCharacters,
MAXIMUM_PATHNAME_LENGTH,
BlobAccessError,
BlobContentTypeNotAllowedError,
BlobPathnameMismatchError,
BlobClientTokenExpiredError,
BlobFileTooLargeError,
BlobStoreNotFoundError,
BlobStoreSuspendedError,
BlobUnknownError,
BlobNotFoundError,
BlobServiceNotAvailable,
BlobServiceRateLimited,
BlobRequestAbortedError,
BlobPreconditionFailedError,
requestApi,
createCompleteMultipartUploadMethod,
createCreateMultipartUploadMethod,
createUploadPartMethod,
createCreateMultipartUploaderMethod,
createPutMethod,
createFolder
};
/*!
* bytes
* Copyright(c) 2012-2014 TJ Holowaychuk
* Copyright(c) 2015 Jed Watson
* MIT Licensed
*/
//# sourceMappingURL=chunk-JX6ZYJUG.js.map

Sorry, the diff of this file is too big to display

import { Readable } from 'stream';
import { File } from 'undici';
interface BlobCommandOptions {
/**
* Define your blob API token.
* @defaultvalue process.env.BLOB_READ_WRITE_TOKEN
*/
token?: string;
/**
* `AbortSignal` to cancel the running request. See https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
*/
abortSignal?: AbortSignal;
}
/**
* The access level of a blob.
* - 'public': The blob is publicly accessible via its URL.
* - 'private': The blob requires authentication to access.
*/
type BlobAccessType = 'public' | 'private';
interface CommonCreateBlobOptions extends BlobCommandOptions {
/**
* Whether the blob should be publicly accessible.
* - 'public': The blob will be publicly accessible via its URL.
* - 'private': The blob will require authentication to access.
*/
access: BlobAccessType;
/**
* Adds a random suffix to the filename.
* @defaultvalue false
*/
addRandomSuffix?: boolean;
/**
* Allow overwriting an existing blob. By default this is set to false and will throw an error if the blob already exists.
* @defaultvalue false
*/
allowOverwrite?: boolean;
/**
* Defines the content type of the blob. By default, this value is inferred from the pathname. Sent as the 'content-type' header when downloading a blob.
*/
contentType?: string;
/**
* Number in seconds to configure the edge and browser cache. The minimum is 1 minute. There's no maximum but keep in mind that browser and edge caches will do a best effort to respect this value.
* Detailed documentation can be found here: https://vercel.com/docs/storage/vercel-blob#caching
* @defaultvalue 30 * 24 * 60 * 60 (1 Month)
*/
cacheControlMaxAge?: number;
/**
* Only perform the operation if the blob's current ETag matches this value.
* Use this for optimistic concurrency control to prevent overwriting changes made by others.
* If the ETag doesn't match, a `BlobPreconditionFailedError` will be thrown.
*/
ifMatch?: string;
}
/**
* Event object passed to the onUploadProgress callback.
*/
interface UploadProgressEvent {
/**
* The number of bytes uploaded.
*/
loaded: number;
/**
* The total number of bytes to upload.
*/
total: number;
/**
* The percentage of the upload that has been completed.
*/
percentage: number;
}
/**
* Callback type for tracking upload progress.
*/
type OnUploadProgressCallback = (progressEvent: UploadProgressEvent) => void;
/**
* Interface for including upload progress tracking capabilities.
*/
interface WithUploadProgress {
/**
* Callback to track the upload progress. You will receive an object with the following properties:
* - `loaded`: The number of bytes uploaded
* - `total`: The total number of bytes to upload
* - `percentage`: The percentage of the upload that has been completed
*/
onUploadProgress?: OnUploadProgressCallback;
}
declare class BlobError extends Error {
constructor(message: string);
}
/**
* Generates a download URL for a blob.
* The download URL includes a ?download=1 parameter which causes browsers to download
* the file instead of displaying it inline.
*
* @param blobUrl - The URL of the blob to generate a download URL for
* @returns A string containing the download URL with the download parameter appended
*/
declare function getDownloadUrl(blobUrl: string): string;
/**
* Result of a successful put or copy operation.
*/
interface PutBlobResult {
/**
* The URL of the blob.
*/
url: string;
/**
* A URL that will cause browsers to download the file instead of displaying it inline.
*/
downloadUrl: string;
/**
* The pathname of the blob within the store.
*/
pathname: string;
/**
* The content-type of the blob.
*/
contentType: string;
/**
* The content disposition header value.
*/
contentDisposition: string;
/**
* The ETag of the blob. Can be used with `ifMatch` for conditional writes.
*/
etag: string;
}
/**
* Represents the body content for a put operation.
* Can be one of several supported types.
*/
type PutBody = string | Readable | Buffer | Blob | ArrayBuffer | ReadableStream | File;
/**
* Input format for a multipart upload part.
* Used internally for processing multipart uploads.
*/
interface PartInput {
/**
* The part number (1-based index).
*/
partNumber: number;
/**
* The content of the part.
*/
blob: PutBody;
}
/**
* Represents a single part of a multipart upload.
* This structure is used when completing a multipart upload to specify the
* uploaded parts and their order.
*/
interface Part {
/**
* The ETag value returned when the part was uploaded.
* This value is used to verify the integrity of the uploaded part.
*/
etag: string;
/**
* The part number of this part (1-based).
* This number is used to order the parts when completing the multipart upload.
*/
partNumber: number;
}
/**
* Options for completing a multipart upload.
* Used with the completeMultipartUpload method.
*/
interface CommonCompleteMultipartUploadOptions {
/**
* Unique upload identifier for the multipart upload, received from createMultipartUpload.
* This ID is used to identify which multipart upload is being completed.
*/
uploadId: string;
/**
* Unique key identifying the blob object, received from createMultipartUpload.
* This key is used to identify which blob object the parts belong to.
*/
key: string;
}
type CompleteMultipartUploadCommandOptions = CommonCompleteMultipartUploadOptions & CommonCreateBlobOptions;
/**
* Options for uploading a part in a multipart upload process.
* Used with the uploadPart method.
*/
interface CommonMultipartUploadOptions {
/**
* Unique upload identifier for the multipart upload, received from createMultipartUpload.
* This ID is used to associate all uploaded parts with the same multipart upload.
*/
uploadId: string;
/**
* Unique key identifying the blob object, received from createMultipartUpload.
* This key is used to identify which blob object the parts belong to.
*/
key: string;
/**
* A number identifying which part is being uploaded (1-based).
* This number is used to order the parts when completing the multipart upload.
* Parts must be uploaded with consecutive part numbers starting from 1.
*/
partNumber: number;
}
type UploadPartCommandOptions = CommonMultipartUploadOptions & CommonCreateBlobOptions;
type CreateFolderCommandOptions = Pick<CommonCreateBlobOptions, 'access' | 'token' | 'abortSignal'>;
interface CreateFolderResult {
pathname: string;
url: string;
}
/**
* Creates a folder in your store. Vercel Blob has no real concept of folders, our file browser on Vercel.com displays folders based on the presence of trailing slashes in the pathname. Unless you are building a file browser system, you probably don't need to use this method.
*
* Use the resulting `url` to delete the folder, just like you would delete a blob.
* @param pathname - Can be user1/ or user1/avatars/
* @param options - Additional options including required `access` ('public' or 'private') and optional `token`
*/
declare function createFolder(pathname: string, options: CreateFolderCommandOptions): Promise<CreateFolderResult>;
export { type BlobAccessType as B, type CommonCompleteMultipartUploadOptions as C, type OnUploadProgressCallback as O, type PutBlobResult as P, type UploadPartCommandOptions as U, type WithUploadProgress as W, type BlobCommandOptions as a, type Part as b, type PutBody as c, type CommonMultipartUploadOptions as d, createFolder as e, type CommonCreateBlobOptions as f, BlobError as g, type CompleteMultipartUploadCommandOptions as h, type CreateFolderCommandOptions as i, type CreateFolderResult as j, type PartInput as k, type UploadProgressEvent as l, getDownloadUrl as m };
import { Readable } from 'stream';
import { File } from 'undici';
interface BlobCommandOptions {
/**
* Define your blob API token.
* @defaultvalue process.env.BLOB_READ_WRITE_TOKEN
*/
token?: string;
/**
* `AbortSignal` to cancel the running request. See https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
*/
abortSignal?: AbortSignal;
}
/**
* The access level of a blob.
* - 'public': The blob is publicly accessible via its URL.
* - 'private': The blob requires authentication to access.
*/
type BlobAccessType = 'public' | 'private';
interface CommonCreateBlobOptions extends BlobCommandOptions {
/**
* Whether the blob should be publicly accessible.
* - 'public': The blob will be publicly accessible via its URL.
* - 'private': The blob will require authentication to access.
*/
access: BlobAccessType;
/**
* Adds a random suffix to the filename.
* @defaultvalue false
*/
addRandomSuffix?: boolean;
/**
* Allow overwriting an existing blob. By default this is set to false and will throw an error if the blob already exists.
* @defaultvalue false
*/
allowOverwrite?: boolean;
/**
* Defines the content type of the blob. By default, this value is inferred from the pathname. Sent as the 'content-type' header when downloading a blob.
*/
contentType?: string;
/**
* Number in seconds to configure the edge and browser cache. The minimum is 1 minute. There's no maximum but keep in mind that browser and edge caches will do a best effort to respect this value.
* Detailed documentation can be found here: https://vercel.com/docs/storage/vercel-blob#caching
* @defaultvalue 30 * 24 * 60 * 60 (1 Month)
*/
cacheControlMaxAge?: number;
/**
* Only perform the operation if the blob's current ETag matches this value.
* Use this for optimistic concurrency control to prevent overwriting changes made by others.
* If the ETag doesn't match, a `BlobPreconditionFailedError` will be thrown.
*/
ifMatch?: string;
}
/**
* Event object passed to the onUploadProgress callback.
*/
interface UploadProgressEvent {
/**
* The number of bytes uploaded.
*/
loaded: number;
/**
* The total number of bytes to upload.
*/
total: number;
/**
* The percentage of the upload that has been completed.
*/
percentage: number;
}
/**
* Callback type for tracking upload progress.
*/
type OnUploadProgressCallback = (progressEvent: UploadProgressEvent) => void;
/**
* Interface for including upload progress tracking capabilities.
*/
interface WithUploadProgress {
/**
* Callback to track the upload progress. You will receive an object with the following properties:
* - `loaded`: The number of bytes uploaded
* - `total`: The total number of bytes to upload
* - `percentage`: The percentage of the upload that has been completed
*/
onUploadProgress?: OnUploadProgressCallback;
}
declare class BlobError extends Error {
constructor(message: string);
}
/**
* Generates a download URL for a blob.
* The download URL includes a ?download=1 parameter which causes browsers to download
* the file instead of displaying it inline.
*
* @param blobUrl - The URL of the blob to generate a download URL for
* @returns A string containing the download URL with the download parameter appended
*/
declare function getDownloadUrl(blobUrl: string): string;
/**
* Result of a successful put or copy operation.
*/
interface PutBlobResult {
/**
* The URL of the blob.
*/
url: string;
/**
* A URL that will cause browsers to download the file instead of displaying it inline.
*/
downloadUrl: string;
/**
* The pathname of the blob within the store.
*/
pathname: string;
/**
* The content-type of the blob.
*/
contentType: string;
/**
* The content disposition header value.
*/
contentDisposition: string;
/**
* The ETag of the blob. Can be used with `ifMatch` for conditional writes.
*/
etag: string;
}
/**
* Represents the body content for a put operation.
* Can be one of several supported types.
*/
type PutBody = string | Readable | Buffer | Blob | ArrayBuffer | ReadableStream | File;
/**
* Input format for a multipart upload part.
* Used internally for processing multipart uploads.
*/
interface PartInput {
/**
* The part number (1-based index).
*/
partNumber: number;
/**
* The content of the part.
*/
blob: PutBody;
}
/**
* Represents a single part of a multipart upload.
* This structure is used when completing a multipart upload to specify the
* uploaded parts and their order.
*/
interface Part {
/**
* The ETag value returned when the part was uploaded.
* This value is used to verify the integrity of the uploaded part.
*/
etag: string;
/**
* The part number of this part (1-based).
* This number is used to order the parts when completing the multipart upload.
*/
partNumber: number;
}
/**
* Options for completing a multipart upload.
* Used with the completeMultipartUpload method.
*/
interface CommonCompleteMultipartUploadOptions {
/**
* Unique upload identifier for the multipart upload, received from createMultipartUpload.
* This ID is used to identify which multipart upload is being completed.
*/
uploadId: string;
/**
* Unique key identifying the blob object, received from createMultipartUpload.
* This key is used to identify which blob object the parts belong to.
*/
key: string;
}
type CompleteMultipartUploadCommandOptions = CommonCompleteMultipartUploadOptions & CommonCreateBlobOptions;
/**
* Options for uploading a part in a multipart upload process.
* Used with the uploadPart method.
*/
interface CommonMultipartUploadOptions {
/**
* Unique upload identifier for the multipart upload, received from createMultipartUpload.
* This ID is used to associate all uploaded parts with the same multipart upload.
*/
uploadId: string;
/**
* Unique key identifying the blob object, received from createMultipartUpload.
* This key is used to identify which blob object the parts belong to.
*/
key: string;
/**
* A number identifying which part is being uploaded (1-based).
* This number is used to order the parts when completing the multipart upload.
* Parts must be uploaded with consecutive part numbers starting from 1.
*/
partNumber: number;
}
type UploadPartCommandOptions = CommonMultipartUploadOptions & CommonCreateBlobOptions;
type CreateFolderCommandOptions = Pick<CommonCreateBlobOptions, 'access' | 'token' | 'abortSignal'>;
interface CreateFolderResult {
pathname: string;
url: string;
}
/**
* Creates a folder in your store. Vercel Blob has no real concept of folders, our file browser on Vercel.com displays folders based on the presence of trailing slashes in the pathname. Unless you are building a file browser system, you probably don't need to use this method.
*
* Use the resulting `url` to delete the folder, just like you would delete a blob.
* @param pathname - Can be user1/ or user1/avatars/
* @param options - Additional options including required `access` ('public' or 'private') and optional `token`
*/
declare function createFolder(pathname: string, options: CreateFolderCommandOptions): Promise<CreateFolderResult>;
export { type BlobAccessType as B, type CommonCompleteMultipartUploadOptions as C, type OnUploadProgressCallback as O, type PutBlobResult as P, type UploadPartCommandOptions as U, type WithUploadProgress as W, type BlobCommandOptions as a, type Part as b, type PutBody as c, type CommonMultipartUploadOptions as d, createFolder as e, type CommonCreateBlobOptions as f, BlobError as g, type CompleteMultipartUploadCommandOptions as h, type CreateFolderCommandOptions as i, type CreateFolderResult as j, type PartInput as k, type UploadProgressEvent as l, getDownloadUrl as m };
+22
-22

@@ -10,3 +10,3 @@ "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { newObj[key] = obj[key]; } } } newObj.default = obj; return newObj; } }

var _chunk23VLASYPcjs = require('./chunk-23VLASYP.cjs');
var _chunk7IJ2KDBMcjs = require('./chunk-7IJ2KDBM.cjs');

@@ -19,3 +19,3 @@ // src/client.ts

if (!options.token.startsWith("vercel_blob_client_")) {
throw new (0, _chunk23VLASYPcjs.BlobError)(`${methodName} must be called with a client token`);
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(`${methodName} must be called with a client token`);
}

@@ -28,3 +28,3 @@ if (

) {
throw new (0, _chunk23VLASYPcjs.BlobError)(
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(
`${methodName} doesn't allow \`addRandomSuffix\`, \`cacheControlMaxAge\` or \`allowOverwrite\`. Configure these options at the server side when generating client tokens.`

@@ -35,11 +35,11 @@ );

}
var put = _chunk23VLASYPcjs.createPutMethod.call(void 0, {
var put = _chunk7IJ2KDBMcjs.createPutMethod.call(void 0, {
allowedOptions: ["contentType"],
extraChecks: createPutExtraChecks("client/`put`")
});
var createMultipartUpload = _chunk23VLASYPcjs.createCreateMultipartUploadMethod.call(void 0, {
var createMultipartUpload = _chunk7IJ2KDBMcjs.createCreateMultipartUploadMethod.call(void 0, {
allowedOptions: ["contentType"],
extraChecks: createPutExtraChecks("client/`createMultipartUpload`")
});
var createMultipartUploader = _chunk23VLASYPcjs.createCreateMultipartUploaderMethod.call(void 0,
var createMultipartUploader = _chunk7IJ2KDBMcjs.createCreateMultipartUploaderMethod.call(void 0,
{

@@ -50,7 +50,7 @@ allowedOptions: ["contentType"],

);
var uploadPart = _chunk23VLASYPcjs.createUploadPartMethod.call(void 0, {
var uploadPart = _chunk7IJ2KDBMcjs.createUploadPartMethod.call(void 0, {
allowedOptions: ["contentType"],
extraChecks: createPutExtraChecks("client/`multipartUpload`")
});
var completeMultipartUpload = _chunk23VLASYPcjs.createCompleteMultipartUploadMethod.call(void 0,
var completeMultipartUpload = _chunk7IJ2KDBMcjs.createCompleteMultipartUploadMethod.call(void 0,
{

@@ -61,7 +61,7 @@ allowedOptions: ["contentType"],

);
var upload = _chunk23VLASYPcjs.createPutMethod.call(void 0, {
var upload = _chunk7IJ2KDBMcjs.createPutMethod.call(void 0, {
allowedOptions: ["contentType"],
extraChecks(options) {
if (options.handleUploadUrl === void 0) {
throw new (0, _chunk23VLASYPcjs.BlobError)(
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(
"client/`upload` requires the 'handleUploadUrl' parameter"

@@ -77,3 +77,3 @@ );

) {
throw new (0, _chunk23VLASYPcjs.BlobError)(
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(
"client/`upload` doesn't allow `addRandomSuffix`, `cacheControlMaxAge`, `allowOverwrite` or `ifMatch`. Configure these options at the server side when generating client tokens."

@@ -163,3 +163,3 @@ );

var _a, _b, _c, _d;
const resolvedToken = _chunk23VLASYPcjs.getTokenFromOptionsOrEnv.call(void 0, { token });
const resolvedToken = _chunk7IJ2KDBMcjs.getTokenFromOptionsOrEnv.call(void 0, { token });
const type = body.type;

@@ -206,3 +206,3 @@ switch (type) {

if (!signature) {
throw new (0, _chunk23VLASYPcjs.BlobError)("Missing callback signature");
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("Missing callback signature");
}

@@ -215,3 +215,3 @@ const isVerified = await verifyCallbackSignature({

if (!isVerified) {
throw new (0, _chunk23VLASYPcjs.BlobError)("Invalid callback signature");
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("Invalid callback signature");
}

@@ -224,3 +224,3 @@ if (onUploadCompleted) {

default:
throw new (0, _chunk23VLASYPcjs.BlobError)("Invalid event type");
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("Invalid event type");
}

@@ -249,3 +249,3 @@ }

if (!res.ok) {
throw new (0, _chunk23VLASYPcjs.BlobError)("Failed to retrieve the client token");
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("Failed to retrieve the client token");
}

@@ -256,3 +256,3 @@ try {

} catch (e) {
throw new (0, _chunk23VLASYPcjs.BlobError)("Failed to retrieve the client token");
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("Failed to retrieve the client token");
}

@@ -276,3 +276,3 @@ }

if (typeof window !== "undefined") {
throw new (0, _chunk23VLASYPcjs.BlobError)(
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(
'"generateClientTokenFromReadWriteToken" must be called from a server environment'

@@ -283,6 +283,6 @@ );

timestamp.setSeconds(timestamp.getSeconds() + 30);
const readWriteToken = _chunk23VLASYPcjs.getTokenFromOptionsOrEnv.call(void 0, { token });
const readWriteToken = _chunk7IJ2KDBMcjs.getTokenFromOptionsOrEnv.call(void 0, { token });
const [, , , storeId = null] = readWriteToken.split("_");
if (!storeId) {
throw new (0, _chunk23VLASYPcjs.BlobError)(
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(
token ? "Invalid `token` parameter" : "Invalid `BLOB_READ_WRITE_TOKEN`"

@@ -299,3 +299,3 @@ );

if (!securedKey) {
throw new (0, _chunk23VLASYPcjs.BlobError)("Unable to sign client token");
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("Unable to sign client token");
}

@@ -355,3 +355,3 @@ return `vercel_blob_client_${storeId}_${Buffer.from(

exports.completeMultipartUpload = completeMultipartUpload; exports.createFolder = _chunk23VLASYPcjs.createFolder; exports.createMultipartUpload = createMultipartUpload; exports.createMultipartUploader = createMultipartUploader; exports.generateClientTokenFromReadWriteToken = generateClientTokenFromReadWriteToken; exports.getPayloadFromClientToken = getPayloadFromClientToken; exports.handleUpload = handleUpload; exports.put = put; exports.upload = upload; exports.uploadPart = uploadPart;
exports.completeMultipartUpload = completeMultipartUpload; exports.createFolder = _chunk7IJ2KDBMcjs.createFolder; exports.createMultipartUpload = createMultipartUpload; exports.createMultipartUploader = createMultipartUploader; exports.generateClientTokenFromReadWriteToken = generateClientTokenFromReadWriteToken; exports.getPayloadFromClientToken = getPayloadFromClientToken; exports.handleUpload = handleUpload; exports.put = put; exports.upload = upload; exports.uploadPart = uploadPart;
//# sourceMappingURL=client.cjs.map

@@ -1,3 +0,3 @@

import { W as WithUploadProgress, B as BlobCommandOptions, P as PutBlobResult, a as Part, C as CommonCompleteMultipartUploadOptions, b as PutBody, c as CommonMultipartUploadOptions } from './create-folder-Bmttf9ub.cjs';
export { d as createFolder } from './create-folder-Bmttf9ub.cjs';
import { B as BlobAccessType, W as WithUploadProgress, a as BlobCommandOptions, P as PutBlobResult, b as Part, C as CommonCompleteMultipartUploadOptions, c as PutBody, d as CommonMultipartUploadOptions } from './create-folder-CdCq7xeS.cjs';
export { e as createFolder } from './create-folder-CdCq7xeS.cjs';
import { IncomingMessage } from 'node:http';

@@ -14,4 +14,6 @@ import 'stream';

* Whether the blob should be publicly accessible.
* - 'public': The blob will be publicly accessible via its URL.
* - 'private': The blob will require authentication to access.
*/
access: 'public';
access: BlobAccessType;
/**

@@ -57,3 +59,3 @@ * Defines the content type of the blob. By default, this value is inferred from the pathname.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -76,3 +78,3 @@ * - contentType - (Optional) The media type for the blob. By default, it's derived from the pathname.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -95,3 +97,3 @@ * - contentType - (Optional) The media type for the file. If not specified, it's derived from the file extension.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -126,3 +128,3 @@ * - contentType - (Optional) The media type for the file. If not specified, it's derived from the file extension.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -149,3 +151,3 @@ * - uploadId - (Required) A string returned from createMultipartUpload which identifies the multipart upload.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -192,3 +194,3 @@ * - uploadId - (Required) A string returned from createMultipartUpload which identifies the multipart upload.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - handleUploadUrl - (Required) A string specifying the route to call for generating client tokens for client uploads.

@@ -195,0 +197,0 @@ * - clientPayload - (Optional) A string to be sent to your handleUpload server code. Example use-case: attaching the post id an image relates to.

@@ -1,3 +0,3 @@

import { W as WithUploadProgress, B as BlobCommandOptions, P as PutBlobResult, a as Part, C as CommonCompleteMultipartUploadOptions, b as PutBody, c as CommonMultipartUploadOptions } from './create-folder-Bmttf9ub.js';
export { d as createFolder } from './create-folder-Bmttf9ub.js';
import { B as BlobAccessType, W as WithUploadProgress, a as BlobCommandOptions, P as PutBlobResult, b as Part, C as CommonCompleteMultipartUploadOptions, c as PutBody, d as CommonMultipartUploadOptions } from './create-folder-CdCq7xeS.js';
export { e as createFolder } from './create-folder-CdCq7xeS.js';
import { IncomingMessage } from 'node:http';

@@ -14,4 +14,6 @@ import 'stream';

* Whether the blob should be publicly accessible.
* - 'public': The blob will be publicly accessible via its URL.
* - 'private': The blob will require authentication to access.
*/
access: 'public';
access: BlobAccessType;
/**

@@ -57,3 +59,3 @@ * Defines the content type of the blob. By default, this value is inferred from the pathname.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -76,3 +78,3 @@ * - contentType - (Optional) The media type for the blob. By default, it's derived from the pathname.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -95,3 +97,3 @@ * - contentType - (Optional) The media type for the file. If not specified, it's derived from the file extension.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -126,3 +128,3 @@ * - contentType - (Optional) The media type for the file. If not specified, it's derived from the file extension.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -149,3 +151,3 @@ * - uploadId - (Required) A string returned from createMultipartUpload which identifies the multipart upload.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - token - (Required) A client token generated by your server using the generateClientTokenFromReadWriteToken method.

@@ -192,3 +194,3 @@ * - uploadId - (Required) A string returned from createMultipartUpload which identifies the multipart upload.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - handleUploadUrl - (Required) A string specifying the route to call for generating client tokens for client uploads.

@@ -195,0 +197,0 @@ * - clientPayload - (Optional) A string to be sent to your handleUpload server code. Example use-case: attaching the post id an image relates to.

@@ -10,3 +10,3 @@ import {

getTokenFromOptionsOrEnv
} from "./chunk-NUG4TPYD.js";
} from "./chunk-JX6ZYJUG.js";

@@ -13,0 +13,0 @@ // src/client.ts

@@ -26,7 +26,8 @@ "use strict";Object.defineProperty(exports, "__esModule", {value: true});

var _chunk23VLASYPcjs = require('./chunk-23VLASYP.cjs');
var _chunk7IJ2KDBMcjs = require('./chunk-7IJ2KDBM.cjs');
// src/del.ts
async function del(urlOrPathname, options) {
await _chunk23VLASYPcjs.requestApi.call(void 0,
await _chunk7IJ2KDBMcjs.requestApi.call(void 0,
"/delete",

@@ -48,3 +49,3 @@ {

const searchParams = new URLSearchParams({ url: urlOrPathname });
const response = await _chunk23VLASYPcjs.requestApi.call(void 0,
const response = await _chunk7IJ2KDBMcjs.requestApi.call(void 0,
`?${searchParams.toString()}`,

@@ -71,2 +72,95 @@ // HEAD can't have body as a response, so we use GET

// src/get.ts
var _undici = require('undici');
function isUrl(urlOrPathname) {
return urlOrPathname.startsWith("http://") || urlOrPathname.startsWith("https://");
}
function extractPathnameFromUrl(url) {
try {
const parsedUrl = new URL(url);
return parsedUrl.pathname.slice(1);
} catch (e) {
return url;
}
}
function getStoreIdFromToken(token) {
const [, , , storeId = ""] = token.split("_");
return storeId;
}
function constructBlobUrl(storeId, pathname, access) {
return `https://${storeId}.${access}.blob.vercel-storage.com/${pathname}`;
}
async function get(urlOrPathname, options) {
if (!urlOrPathname) {
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("url or pathname is required");
}
if (!options) {
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("missing options, see usage");
}
if (options.access !== "public" && options.access !== "private") {
throw new (0, _chunk7IJ2KDBMcjs.BlobError)('access must be "public" or "private"');
}
const token = _chunk7IJ2KDBMcjs.getTokenFromOptionsOrEnv.call(void 0, options);
let blobUrl;
let pathname;
const access = options.access;
if (isUrl(urlOrPathname)) {
blobUrl = urlOrPathname;
pathname = extractPathnameFromUrl(urlOrPathname);
} else {
const storeId = getStoreIdFromToken(token);
if (!storeId) {
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("Invalid token: unable to extract store ID");
}
pathname = urlOrPathname;
blobUrl = constructBlobUrl(storeId, pathname, access);
}
const requestHeaders = {
...options.headers,
authorization: `Bearer ${token}`
};
let fetchUrl = blobUrl;
if (options.useCache === false) {
const url = new URL(blobUrl);
url.searchParams.set("cache", "0");
fetchUrl = url.toString();
}
const response = await _undici.fetch.call(void 0, fetchUrl, {
method: "GET",
headers: requestHeaders,
signal: options.abortSignal
});
if (!response.ok) {
if (response.status === 404) {
return null;
}
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(
`Failed to fetch blob: ${response.status} ${response.statusText}`
);
}
const stream = response.body;
if (!stream) {
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("Response body is null");
}
const contentLength = response.headers.get("content-length");
const lastModified = response.headers.get("last-modified");
const downloadUrl = new URL(blobUrl);
downloadUrl.searchParams.set("download", "1");
return {
stream,
headers: response.headers,
blob: {
url: blobUrl,
downloadUrl: downloadUrl.toString(),
pathname,
contentType: response.headers.get("content-type") || "application/octet-stream",
contentDisposition: response.headers.get("content-disposition") || "",
cacheControl: response.headers.get("cache-control") || "",
size: contentLength ? parseInt(contentLength, 10) : 0,
uploadedAt: lastModified ? new Date(lastModified) : /* @__PURE__ */ new Date(),
etag: response.headers.get("etag") || ""
}
};
}
// src/list.ts

@@ -88,3 +182,3 @@ async function list(options) {

}
const response = await _chunk23VLASYPcjs.requestApi.call(void 0,
const response = await _chunk7IJ2KDBMcjs.requestApi.call(void 0,
`?${searchParams.toString()}`,

@@ -125,15 +219,15 @@ {

if (!options) {
throw new (0, _chunk23VLASYPcjs.BlobError)("missing options, see usage");
throw new (0, _chunk7IJ2KDBMcjs.BlobError)("missing options, see usage");
}
if (options.access !== "public") {
throw new (0, _chunk23VLASYPcjs.BlobError)('access must be "public"');
if (options.access !== "public" && options.access !== "private") {
throw new (0, _chunk7IJ2KDBMcjs.BlobError)('access must be "public" or "private"');
}
if (toPathname.length > _chunk23VLASYPcjs.MAXIMUM_PATHNAME_LENGTH) {
throw new (0, _chunk23VLASYPcjs.BlobError)(
`pathname is too long, maximum length is ${_chunk23VLASYPcjs.MAXIMUM_PATHNAME_LENGTH}`
if (toPathname.length > _chunk7IJ2KDBMcjs.MAXIMUM_PATHNAME_LENGTH) {
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(
`pathname is too long, maximum length is ${_chunk7IJ2KDBMcjs.MAXIMUM_PATHNAME_LENGTH}`
);
}
for (const invalidCharacter of _chunk23VLASYPcjs.disallowedPathnameCharacters) {
for (const invalidCharacter of _chunk7IJ2KDBMcjs.disallowedPathnameCharacters) {
if (toPathname.includes(invalidCharacter)) {
throw new (0, _chunk23VLASYPcjs.BlobError)(
throw new (0, _chunk7IJ2KDBMcjs.BlobError)(
`pathname cannot contain "${invalidCharacter}", please encode it if needed`

@@ -144,2 +238,3 @@ );

const headers = {};
headers["x-vercel-blob-access"] = options.access;
if (options.addRandomSuffix !== void 0) {

@@ -164,3 +259,3 @@ headers["x-add-random-suffix"] = options.addRandomSuffix ? "1" : "0";

});
const response = await _chunk23VLASYPcjs.requestApi.call(void 0,
const response = await _chunk7IJ2KDBMcjs.requestApi.call(void 0,
`?${params.toString()}`,

@@ -185,3 +280,3 @@ {

// src/index.ts
var put = _chunk23VLASYPcjs.createPutMethod.call(void 0, {
var put = _chunk7IJ2KDBMcjs.createPutMethod.call(void 0, {
allowedOptions: [

@@ -195,3 +290,3 @@ "cacheControlMaxAge",

});
var createMultipartUpload = _chunk23VLASYPcjs.createCreateMultipartUploadMethod.call(void 0, {
var createMultipartUpload = _chunk7IJ2KDBMcjs.createCreateMultipartUploadMethod.call(void 0, {
allowedOptions: [

@@ -205,3 +300,3 @@ "cacheControlMaxAge",

});
var createMultipartUploader = _chunk23VLASYPcjs.createCreateMultipartUploaderMethod.call(void 0, {
var createMultipartUploader = _chunk7IJ2KDBMcjs.createCreateMultipartUploaderMethod.call(void 0, {
allowedOptions: [

@@ -215,3 +310,3 @@ "cacheControlMaxAge",

});
var uploadPart = _chunk23VLASYPcjs.createUploadPartMethod.call(void 0, {
var uploadPart = _chunk7IJ2KDBMcjs.createUploadPartMethod.call(void 0, {
allowedOptions: [

@@ -224,3 +319,3 @@ "cacheControlMaxAge",

});
var completeMultipartUpload = _chunk23VLASYPcjs.createCompleteMultipartUploadMethod.call(void 0, {
var completeMultipartUpload = _chunk7IJ2KDBMcjs.createCompleteMultipartUploadMethod.call(void 0, {
allowedOptions: [

@@ -259,3 +354,4 @@ "cacheControlMaxAge",

exports.BlobAccessError = _chunk23VLASYPcjs.BlobAccessError; exports.BlobClientTokenExpiredError = _chunk23VLASYPcjs.BlobClientTokenExpiredError; exports.BlobContentTypeNotAllowedError = _chunk23VLASYPcjs.BlobContentTypeNotAllowedError; exports.BlobError = _chunk23VLASYPcjs.BlobError; exports.BlobFileTooLargeError = _chunk23VLASYPcjs.BlobFileTooLargeError; exports.BlobNotFoundError = _chunk23VLASYPcjs.BlobNotFoundError; exports.BlobPathnameMismatchError = _chunk23VLASYPcjs.BlobPathnameMismatchError; exports.BlobPreconditionFailedError = _chunk23VLASYPcjs.BlobPreconditionFailedError; exports.BlobRequestAbortedError = _chunk23VLASYPcjs.BlobRequestAbortedError; exports.BlobServiceNotAvailable = _chunk23VLASYPcjs.BlobServiceNotAvailable; exports.BlobServiceRateLimited = _chunk23VLASYPcjs.BlobServiceRateLimited; exports.BlobStoreNotFoundError = _chunk23VLASYPcjs.BlobStoreNotFoundError; exports.BlobStoreSuspendedError = _chunk23VLASYPcjs.BlobStoreSuspendedError; exports.BlobUnknownError = _chunk23VLASYPcjs.BlobUnknownError; exports.completeMultipartUpload = completeMultipartUpload; exports.copy = copy; exports.createFolder = _chunk23VLASYPcjs.createFolder; exports.createMultipartUpload = createMultipartUpload; exports.createMultipartUploader = createMultipartUploader; exports.del = del; exports.getDownloadUrl = _chunk23VLASYPcjs.getDownloadUrl; exports.head = head; exports.list = list; exports.put = put; exports.uploadPart = uploadPart;
exports.BlobAccessError = _chunk7IJ2KDBMcjs.BlobAccessError; exports.BlobClientTokenExpiredError = _chunk7IJ2KDBMcjs.BlobClientTokenExpiredError; exports.BlobContentTypeNotAllowedError = _chunk7IJ2KDBMcjs.BlobContentTypeNotAllowedError; exports.BlobError = _chunk7IJ2KDBMcjs.BlobError; exports.BlobFileTooLargeError = _chunk7IJ2KDBMcjs.BlobFileTooLargeError; exports.BlobNotFoundError = _chunk7IJ2KDBMcjs.BlobNotFoundError; exports.BlobPathnameMismatchError = _chunk7IJ2KDBMcjs.BlobPathnameMismatchError; exports.BlobPreconditionFailedError = _chunk7IJ2KDBMcjs.BlobPreconditionFailedError; exports.BlobRequestAbortedError = _chunk7IJ2KDBMcjs.BlobRequestAbortedError; exports.BlobServiceNotAvailable = _chunk7IJ2KDBMcjs.BlobServiceNotAvailable; exports.BlobServiceRateLimited = _chunk7IJ2KDBMcjs.BlobServiceRateLimited; exports.BlobStoreNotFoundError = _chunk7IJ2KDBMcjs.BlobStoreNotFoundError; exports.BlobStoreSuspendedError = _chunk7IJ2KDBMcjs.BlobStoreSuspendedError; exports.BlobUnknownError = _chunk7IJ2KDBMcjs.BlobUnknownError; exports.completeMultipartUpload = completeMultipartUpload; exports.copy = copy; exports.createFolder = _chunk7IJ2KDBMcjs.createFolder; exports.createMultipartUpload = createMultipartUpload; exports.createMultipartUploader = createMultipartUploader; exports.del = del; exports.get = get; exports.getDownloadUrl = _chunk7IJ2KDBMcjs.getDownloadUrl; exports.head = head; exports.list = list; exports.put = put; exports.uploadPart = uploadPart;
//# sourceMappingURL=index.cjs.map

@@ -1,5 +0,5 @@

import { e as CommonCreateBlobOptions, W as WithUploadProgress, f as BlobError, B as BlobCommandOptions, a as Part, g as CompleteMultipartUploadCommandOptions, P as PutBlobResult, b as PutBody, U as UploadPartCommandOptions } from './create-folder-Bmttf9ub.cjs';
export { O as OnUploadProgressCallback, h as PartInput, i as UploadProgressEvent, d as createFolder, j as getDownloadUrl } from './create-folder-Bmttf9ub.cjs';
import { f as CommonCreateBlobOptions, W as WithUploadProgress, g as BlobError, a as BlobCommandOptions, B as BlobAccessType, b as Part, h as CompleteMultipartUploadCommandOptions, P as PutBlobResult, c as PutBody, U as UploadPartCommandOptions } from './create-folder-CdCq7xeS.cjs';
export { i as CreateFolderCommandOptions, j as CreateFolderResult, O as OnUploadProgressCallback, k as PartInput, l as UploadProgressEvent, e as createFolder, m as getDownloadUrl } from './create-folder-CdCq7xeS.cjs';
import { Headers } from 'undici';
import 'stream';
import 'undici';

@@ -115,2 +115,75 @@ interface PutCommandOptions extends CommonCreateBlobOptions, WithUploadProgress {

/**
* Options for the get method.
*/
interface GetCommandOptions extends BlobCommandOptions {
/**
* Whether the blob is publicly accessible or private.
* - 'public': The blob is publicly accessible via its URL.
* - 'private': The blob requires authentication to access.
*/
access: BlobAccessType;
/**
* Whether to allow the blob to be served from CDN cache.
* When false, fetches directly from origin storage.
* Only effective for private blobs (ignored for public blobs).
* @defaultValue true
*/
useCache?: boolean;
/**
* Advanced: Additional headers to include in the fetch request.
* You probably don't need this. The authorization header is automatically set.
*/
headers?: HeadersInit;
}
/**
* Result of the get method containing the stream and blob metadata.
*/
interface GetBlobResult {
/**
* The readable stream from the fetch response.
* This is the raw stream with no automatic buffering, allowing efficient
* streaming of large files without loading them entirely into memory.
*/
stream: ReadableStream<Uint8Array>;
/**
* The raw headers from the fetch response.
* Useful for accessing additional response metadata like ETag, x-vercel-* headers, etc.
*/
headers: Headers;
/**
* The blob metadata object containing url, pathname, contentType, size,
* downloadUrl, contentDisposition, cacheControl, and uploadedAt.
*/
blob: HeadBlobResult;
}
/**
* Fetches blob content by URL or pathname.
* - If a URL is provided, fetches the blob directly.
* - If a pathname is provided, constructs the URL from the token's store ID.
*
* Returns a stream (no automatic buffering) and blob metadata.
*
* @example
* ```ts
* // Basic usage
* const { stream, headers, blob } = await get('user123/avatar.png', { access: 'private' });
*
* // Bypass cache for private blobs (always fetch fresh from storage)
* const { stream, headers, blob } = await get('user123/data.json', { access: 'private', useCache: false });
* ```
*
* Detailed documentation can be found here: https://vercel.com/docs/vercel-blob/using-blob-sdk
*
* @param urlOrPathname - The URL or pathname of the blob to fetch.
* @param options - Configuration options including:
* - access - (Required) Must be 'public' or 'private'. Determines the access level of the blob.
* - useCache - (Optional) When false, fetches directly from origin storage instead of CDN cache. Only effective for private blobs. Defaults to true.
* - token - (Optional) A string specifying the token to use when making requests. It defaults to process.env.BLOB_READ_WRITE_TOKEN when deployed on Vercel.
* - abortSignal - (Optional) AbortSignal to cancel the operation.
* - headers - (Optional, advanced) Additional headers to include in the fetch request. You probably don't need this.
* @returns A promise that resolves to { stream, blob } or null if not found.
*/
declare function get(urlOrPathname: string, options: GetCommandOptions): Promise<GetBlobResult | null>;
/**
* Basic blob object information returned by the list method.

@@ -249,3 +322,3 @@ */

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - addRandomSuffix - (Optional) A boolean specifying whether to add a random suffix to the pathname. It defaults to false. We recommend using this option to ensure there are no conflicts in your blob filenames.

@@ -268,3 +341,3 @@ * - allowOverwrite - (Optional) A boolean to allow overwriting blobs. By default an error will be thrown if you try to overwrite a blob by using the same pathname for multiple blobs.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - addRandomSuffix - (Optional) A boolean specifying whether to add a random suffix to the pathname. It defaults to true.

@@ -290,3 +363,3 @@ * - allowOverwrite - (Optional) A boolean to allow overwriting blobs. By default an error will be thrown if you try to overwrite a blob by using the same pathname for multiple blobs.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - addRandomSuffix - (Optional) A boolean specifying whether to add a random suffix to the pathname. It defaults to true.

@@ -321,3 +394,3 @@ * - allowOverwrite - (Optional) A boolean to allow overwriting blobs. By default an error will be thrown if you try to overwrite a blob by using the same pathname for multiple blobs.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - uploadId - (Required) A string returned from createMultipartUpload which identifies the multipart upload.

@@ -344,3 +417,3 @@ * - key - (Required) A string returned from createMultipartUpload which identifies the blob object.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - uploadId - (Required) A string returned from createMultipartUpload which identifies the multipart upload.

@@ -358,2 +431,2 @@ * - key - (Required) A string returned from createMultipartUpload which identifies the blob object.

export { BlobAccessError, BlobClientTokenExpiredError, BlobContentTypeNotAllowedError, BlobError, BlobFileTooLargeError, BlobNotFoundError, BlobPathnameMismatchError, BlobPreconditionFailedError, BlobRequestAbortedError, BlobServiceNotAvailable, BlobServiceRateLimited, BlobStoreNotFoundError, BlobStoreSuspendedError, BlobUnknownError, CompleteMultipartUploadCommandOptions, type CopyBlobResult, type CopyCommandOptions, type HeadBlobResult, type ListBlobResult, type ListBlobResultBlob, type ListCommandOptions, type ListFoldedBlobResult, Part, PutBlobResult, type PutCommandOptions, UploadPartCommandOptions, completeMultipartUpload, copy, createMultipartUpload, createMultipartUploader, del, head, list, put, uploadPart };
export { BlobAccessError, BlobAccessType, BlobClientTokenExpiredError, BlobContentTypeNotAllowedError, BlobError, BlobFileTooLargeError, BlobNotFoundError, BlobPathnameMismatchError, BlobPreconditionFailedError, BlobRequestAbortedError, BlobServiceNotAvailable, BlobServiceRateLimited, BlobStoreNotFoundError, BlobStoreSuspendedError, BlobUnknownError, CompleteMultipartUploadCommandOptions, type CopyBlobResult, type CopyCommandOptions, type GetBlobResult, type GetCommandOptions, type HeadBlobResult, type ListBlobResult, type ListBlobResultBlob, type ListCommandOptions, type ListFoldedBlobResult, Part, PutBlobResult, type PutCommandOptions, UploadPartCommandOptions, completeMultipartUpload, copy, createMultipartUpload, createMultipartUploader, del, get, head, list, put, uploadPart };

@@ -1,5 +0,5 @@

import { e as CommonCreateBlobOptions, W as WithUploadProgress, f as BlobError, B as BlobCommandOptions, a as Part, g as CompleteMultipartUploadCommandOptions, P as PutBlobResult, b as PutBody, U as UploadPartCommandOptions } from './create-folder-Bmttf9ub.js';
export { O as OnUploadProgressCallback, h as PartInput, i as UploadProgressEvent, d as createFolder, j as getDownloadUrl } from './create-folder-Bmttf9ub.js';
import { f as CommonCreateBlobOptions, W as WithUploadProgress, g as BlobError, a as BlobCommandOptions, B as BlobAccessType, b as Part, h as CompleteMultipartUploadCommandOptions, P as PutBlobResult, c as PutBody, U as UploadPartCommandOptions } from './create-folder-CdCq7xeS.js';
export { i as CreateFolderCommandOptions, j as CreateFolderResult, O as OnUploadProgressCallback, k as PartInput, l as UploadProgressEvent, e as createFolder, m as getDownloadUrl } from './create-folder-CdCq7xeS.js';
import { Headers } from 'undici';
import 'stream';
import 'undici';

@@ -115,2 +115,75 @@ interface PutCommandOptions extends CommonCreateBlobOptions, WithUploadProgress {

/**
* Options for the get method.
*/
interface GetCommandOptions extends BlobCommandOptions {
/**
* Whether the blob is publicly accessible or private.
* - 'public': The blob is publicly accessible via its URL.
* - 'private': The blob requires authentication to access.
*/
access: BlobAccessType;
/**
* Whether to allow the blob to be served from CDN cache.
* When false, fetches directly from origin storage.
* Only effective for private blobs (ignored for public blobs).
* @defaultValue true
*/
useCache?: boolean;
/**
* Advanced: Additional headers to include in the fetch request.
* You probably don't need this. The authorization header is automatically set.
*/
headers?: HeadersInit;
}
/**
* Result of the get method containing the stream and blob metadata.
*/
interface GetBlobResult {
/**
* The readable stream from the fetch response.
* This is the raw stream with no automatic buffering, allowing efficient
* streaming of large files without loading them entirely into memory.
*/
stream: ReadableStream<Uint8Array>;
/**
* The raw headers from the fetch response.
* Useful for accessing additional response metadata like ETag, x-vercel-* headers, etc.
*/
headers: Headers;
/**
* The blob metadata object containing url, pathname, contentType, size,
* downloadUrl, contentDisposition, cacheControl, and uploadedAt.
*/
blob: HeadBlobResult;
}
/**
* Fetches blob content by URL or pathname.
* - If a URL is provided, fetches the blob directly.
* - If a pathname is provided, constructs the URL from the token's store ID.
*
* Returns a stream (no automatic buffering) and blob metadata.
*
* @example
* ```ts
* // Basic usage
* const { stream, headers, blob } = await get('user123/avatar.png', { access: 'private' });
*
* // Bypass cache for private blobs (always fetch fresh from storage)
* const { stream, headers, blob } = await get('user123/data.json', { access: 'private', useCache: false });
* ```
*
* Detailed documentation can be found here: https://vercel.com/docs/vercel-blob/using-blob-sdk
*
* @param urlOrPathname - The URL or pathname of the blob to fetch.
* @param options - Configuration options including:
* - access - (Required) Must be 'public' or 'private'. Determines the access level of the blob.
* - useCache - (Optional) When false, fetches directly from origin storage instead of CDN cache. Only effective for private blobs. Defaults to true.
* - token - (Optional) A string specifying the token to use when making requests. It defaults to process.env.BLOB_READ_WRITE_TOKEN when deployed on Vercel.
* - abortSignal - (Optional) AbortSignal to cancel the operation.
* - headers - (Optional, advanced) Additional headers to include in the fetch request. You probably don't need this.
* @returns A promise that resolves to { stream, blob } or null if not found.
*/
declare function get(urlOrPathname: string, options: GetCommandOptions): Promise<GetBlobResult | null>;
/**
* Basic blob object information returned by the list method.

@@ -249,3 +322,3 @@ */

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - addRandomSuffix - (Optional) A boolean specifying whether to add a random suffix to the pathname. It defaults to false. We recommend using this option to ensure there are no conflicts in your blob filenames.

@@ -268,3 +341,3 @@ * - allowOverwrite - (Optional) A boolean to allow overwriting blobs. By default an error will be thrown if you try to overwrite a blob by using the same pathname for multiple blobs.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - addRandomSuffix - (Optional) A boolean specifying whether to add a random suffix to the pathname. It defaults to true.

@@ -290,3 +363,3 @@ * - allowOverwrite - (Optional) A boolean to allow overwriting blobs. By default an error will be thrown if you try to overwrite a blob by using the same pathname for multiple blobs.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - addRandomSuffix - (Optional) A boolean specifying whether to add a random suffix to the pathname. It defaults to true.

@@ -321,3 +394,3 @@ * - allowOverwrite - (Optional) A boolean to allow overwriting blobs. By default an error will be thrown if you try to overwrite a blob by using the same pathname for multiple blobs.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - uploadId - (Required) A string returned from createMultipartUpload which identifies the multipart upload.

@@ -344,3 +417,3 @@ * - key - (Required) A string returned from createMultipartUpload which identifies the blob object.

* @param options - Configuration options including:
* - access - (Required) Must be 'public' as blobs are publicly accessible.
* - access - (Required) Must be 'public' or 'private'. Public blobs are accessible via URL, private blobs require authentication.
* - uploadId - (Required) A string returned from createMultipartUpload which identifies the multipart upload.

@@ -358,2 +431,2 @@ * - key - (Required) A string returned from createMultipartUpload which identifies the blob object.

export { BlobAccessError, BlobClientTokenExpiredError, BlobContentTypeNotAllowedError, BlobError, BlobFileTooLargeError, BlobNotFoundError, BlobPathnameMismatchError, BlobPreconditionFailedError, BlobRequestAbortedError, BlobServiceNotAvailable, BlobServiceRateLimited, BlobStoreNotFoundError, BlobStoreSuspendedError, BlobUnknownError, CompleteMultipartUploadCommandOptions, type CopyBlobResult, type CopyCommandOptions, type HeadBlobResult, type ListBlobResult, type ListBlobResultBlob, type ListCommandOptions, type ListFoldedBlobResult, Part, PutBlobResult, type PutCommandOptions, UploadPartCommandOptions, completeMultipartUpload, copy, createMultipartUpload, createMultipartUploader, del, head, list, put, uploadPart };
export { BlobAccessError, BlobAccessType, BlobClientTokenExpiredError, BlobContentTypeNotAllowedError, BlobError, BlobFileTooLargeError, BlobNotFoundError, BlobPathnameMismatchError, BlobPreconditionFailedError, BlobRequestAbortedError, BlobServiceNotAvailable, BlobServiceRateLimited, BlobStoreNotFoundError, BlobStoreSuspendedError, BlobUnknownError, CompleteMultipartUploadCommandOptions, type CopyBlobResult, type CopyCommandOptions, type GetBlobResult, type GetCommandOptions, type HeadBlobResult, type ListBlobResult, type ListBlobResultBlob, type ListCommandOptions, type ListFoldedBlobResult, Part, PutBlobResult, type PutCommandOptions, UploadPartCommandOptions, completeMultipartUpload, copy, createMultipartUpload, createMultipartUploader, del, get, head, list, put, uploadPart };

@@ -25,4 +25,5 @@ import {

getDownloadUrl,
getTokenFromOptionsOrEnv,
requestApi
} from "./chunk-NUG4TPYD.js";
} from "./chunk-JX6ZYJUG.js";

@@ -70,2 +71,95 @@ // src/del.ts

// src/get.ts
import { fetch } from "undici";
function isUrl(urlOrPathname) {
return urlOrPathname.startsWith("http://") || urlOrPathname.startsWith("https://");
}
function extractPathnameFromUrl(url) {
try {
const parsedUrl = new URL(url);
return parsedUrl.pathname.slice(1);
} catch {
return url;
}
}
function getStoreIdFromToken(token) {
const [, , , storeId = ""] = token.split("_");
return storeId;
}
function constructBlobUrl(storeId, pathname, access) {
return `https://${storeId}.${access}.blob.vercel-storage.com/${pathname}`;
}
async function get(urlOrPathname, options) {
if (!urlOrPathname) {
throw new BlobError("url or pathname is required");
}
if (!options) {
throw new BlobError("missing options, see usage");
}
if (options.access !== "public" && options.access !== "private") {
throw new BlobError('access must be "public" or "private"');
}
const token = getTokenFromOptionsOrEnv(options);
let blobUrl;
let pathname;
const access = options.access;
if (isUrl(urlOrPathname)) {
blobUrl = urlOrPathname;
pathname = extractPathnameFromUrl(urlOrPathname);
} else {
const storeId = getStoreIdFromToken(token);
if (!storeId) {
throw new BlobError("Invalid token: unable to extract store ID");
}
pathname = urlOrPathname;
blobUrl = constructBlobUrl(storeId, pathname, access);
}
const requestHeaders = {
...options.headers,
authorization: `Bearer ${token}`
};
let fetchUrl = blobUrl;
if (options.useCache === false) {
const url = new URL(blobUrl);
url.searchParams.set("cache", "0");
fetchUrl = url.toString();
}
const response = await fetch(fetchUrl, {
method: "GET",
headers: requestHeaders,
signal: options.abortSignal
});
if (!response.ok) {
if (response.status === 404) {
return null;
}
throw new BlobError(
`Failed to fetch blob: ${response.status} ${response.statusText}`
);
}
const stream = response.body;
if (!stream) {
throw new BlobError("Response body is null");
}
const contentLength = response.headers.get("content-length");
const lastModified = response.headers.get("last-modified");
const downloadUrl = new URL(blobUrl);
downloadUrl.searchParams.set("download", "1");
return {
stream,
headers: response.headers,
blob: {
url: blobUrl,
downloadUrl: downloadUrl.toString(),
pathname,
contentType: response.headers.get("content-type") || "application/octet-stream",
contentDisposition: response.headers.get("content-disposition") || "",
cacheControl: response.headers.get("cache-control") || "",
size: contentLength ? parseInt(contentLength, 10) : 0,
uploadedAt: lastModified ? new Date(lastModified) : /* @__PURE__ */ new Date(),
etag: response.headers.get("etag") || ""
}
};
}
// src/list.ts

@@ -125,4 +219,4 @@ async function list(options) {

}
if (options.access !== "public") {
throw new BlobError('access must be "public"');
if (options.access !== "public" && options.access !== "private") {
throw new BlobError('access must be "public" or "private"');
}

@@ -142,2 +236,3 @@ if (toPathname.length > MAXIMUM_PATHNAME_LENGTH) {

const headers = {};
headers["x-vercel-blob-access"] = options.access;
if (options.addRandomSuffix !== void 0) {

@@ -246,2 +341,3 @@ headers["x-add-random-suffix"] = options.addRandomSuffix ? "1" : "0";

del,
get,
getDownloadUrl,

@@ -248,0 +344,0 @@ head,

{
"name": "@vercel/blob",
"version": "2.1.0",
"version": "2.2.0-f6bb7b4-20260204165325",
"description": "The Vercel Blob JavaScript API client",

@@ -5,0 +5,0 @@ "homepage": "https://vercel.com/storage/blob",

"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }// src/helpers.ts
var _isnodeprocess = require('is-node-process');
// src/multipart/helpers.ts
var _isbuffer = require('is-buffer'); var _isbuffer2 = _interopRequireDefault(_isbuffer);
var _stream = require('stream');
var supportsNewBlobFromArrayBuffer = new Promise((resolve) => {
try {
const helloAsArrayBuffer = new Uint8Array([104, 101, 108, 108, 111]);
const blob = new Blob([helloAsArrayBuffer]);
blob.text().then((text) => {
resolve(text === "hello");
}).catch(() => {
resolve(false);
});
} catch (e) {
resolve(false);
}
});
async function toReadableStream(value) {
if (value instanceof ReadableStream) {
return value;
}
if (value instanceof Blob) {
return value.stream();
}
if (isNodeJsReadableStream(value)) {
return _stream.Readable.toWeb(value);
}
let streamValue;
if (value instanceof ArrayBuffer) {
streamValue = new Uint8Array(value);
} else if (isNodeJsBuffer(value)) {
streamValue = value;
} else {
streamValue = stringToUint8Array(value);
}
if (await supportsNewBlobFromArrayBuffer) {
return new Blob([streamValue]).stream();
}
return new ReadableStream({
start(controller) {
controller.enqueue(streamValue);
controller.close();
}
});
}
function isNodeJsReadableStream(value) {
return typeof value === "object" && typeof value.pipe === "function" && value.readable && typeof value._read === "function" && // @ts-expect-error _readableState does exists on Readable
typeof value._readableState === "object";
}
function stringToUint8Array(s) {
const enc = new TextEncoder();
return enc.encode(s);
}
function isNodeJsBuffer(value) {
return _isbuffer2.default.call(void 0, value);
}
// src/bytes.ts
var parseRegExp = /^((-|\+)?(\d+(?:\.\d+)?)) *(kb|mb|gb|tb|pb)$/i;
var map = {
b: 1,
kb: 1 << 10,
mb: 1 << 20,
gb: 1 << 30,
tb: 1024 ** 4,
pb: 1024 ** 5
};
function bytes(val) {
if (typeof val === "number" && !Number.isNaN(val)) {
return val;
}
if (typeof val !== "string") {
return null;
}
const results = parseRegExp.exec(val);
let floatValue;
let unit = "b";
if (!results) {
floatValue = parseInt(val, 10);
} else {
const [, res, , , unitMatch] = results;
if (!res) {
return null;
}
floatValue = parseFloat(res);
if (unitMatch) {
unit = unitMatch.toLowerCase();
}
}
if (Number.isNaN(floatValue)) {
return null;
}
return Math.floor(map[unit] * floatValue);
}
// src/helpers.ts
var defaultVercelBlobApiUrl = "https://vercel.com/api/blob";
function getTokenFromOptionsOrEnv(options) {
if (options == null ? void 0 : options.token) {
return options.token;
}
if (process.env.BLOB_READ_WRITE_TOKEN) {
return process.env.BLOB_READ_WRITE_TOKEN;
}
throw new BlobError(
"No token found. Either configure the `BLOB_READ_WRITE_TOKEN` environment variable, or pass a `token` option to your calls."
);
}
var BlobError = class extends Error {
constructor(message) {
super(`Vercel Blob: ${message}`);
}
};
function getDownloadUrl(blobUrl) {
const url = new URL(blobUrl);
url.searchParams.set("download", "1");
return url.toString();
}
function isPlainObject(value) {
if (typeof value !== "object" || value === null) {
return false;
}
const prototype = Object.getPrototypeOf(value);
return (prototype === null || prototype === Object.prototype || Object.getPrototypeOf(prototype) === null) && !(Symbol.toStringTag in value) && !(Symbol.iterator in value);
}
var disallowedPathnameCharacters = ["//"];
var supportsRequestStreams = (() => {
if (_isnodeprocess.isNodeProcess.call(void 0, )) {
return true;
}
const apiUrl = getApiUrl();
if (apiUrl.startsWith("http://localhost")) {
return false;
}
let duplexAccessed = false;
const hasContentType = new Request(getApiUrl(), {
body: new ReadableStream(),
method: "POST",
// @ts-expect-error -- TypeScript doesn't yet have duplex but it's in the spec: https://github.com/microsoft/TypeScript-DOM-lib-generator/pull/1729
get duplex() {
duplexAccessed = true;
return "half";
}
}).headers.has("Content-Type");
return duplexAccessed && !hasContentType;
})();
function getApiUrl(pathname = "") {
let baseUrl = null;
try {
baseUrl = process.env.VERCEL_BLOB_API_URL || process.env.NEXT_PUBLIC_VERCEL_BLOB_API_URL;
} catch (e2) {
}
return `${baseUrl || defaultVercelBlobApiUrl}${pathname}`;
}
var TEXT_ENCODER = typeof TextEncoder === "function" ? new TextEncoder() : null;
function computeBodyLength(body) {
if (!body) {
return 0;
}
if (typeof body === "string") {
if (TEXT_ENCODER) {
return TEXT_ENCODER.encode(body).byteLength;
}
return new Blob([body]).size;
}
if ("byteLength" in body && typeof body.byteLength === "number") {
return body.byteLength;
}
if ("size" in body && typeof body.size === "number") {
return body.size;
}
return 0;
}
var createChunkTransformStream = (chunkSize, onProgress) => {
let buffer = new Uint8Array(0);
return new TransformStream({
transform(chunk, controller) {
queueMicrotask(() => {
const newBuffer = new Uint8Array(buffer.length + chunk.byteLength);
newBuffer.set(buffer);
newBuffer.set(new Uint8Array(chunk), buffer.length);
buffer = newBuffer;
while (buffer.length >= chunkSize) {
const newChunk = buffer.slice(0, chunkSize);
controller.enqueue(newChunk);
onProgress == null ? void 0 : onProgress(newChunk.byteLength);
buffer = buffer.slice(chunkSize);
}
});
},
flush(controller) {
queueMicrotask(() => {
if (buffer.length > 0) {
controller.enqueue(buffer);
onProgress == null ? void 0 : onProgress(buffer.byteLength);
}
});
}
});
};
function isReadableStream(value) {
return globalThis.ReadableStream && // TODO: Can be removed once Node.js 16 is no more required internally
value instanceof ReadableStream;
}
function isStream(value) {
if (isReadableStream(value)) {
return true;
}
if (isNodeJsReadableStream(value)) {
return true;
}
return false;
}
// src/api.ts
var _asyncretry = require('async-retry'); var _asyncretry2 = _interopRequireDefault(_asyncretry);
// src/debug.ts
var debugIsActive = false;
var _a, _b;
try {
if (((_a = process.env.DEBUG) == null ? void 0 : _a.includes("blob")) || ((_b = process.env.NEXT_PUBLIC_DEBUG) == null ? void 0 : _b.includes("blob"))) {
debugIsActive = true;
}
} catch (e3) {
}
function debug(message, ...args) {
if (debugIsActive) {
console.debug(`vercel-blob: ${message}`, ...args);
}
}
// src/dom-exception.ts
var _a2;
var DOMException2 = (_a2 = globalThis.DOMException) != null ? _a2 : (() => {
try {
atob("~");
} catch (err) {
return Object.getPrototypeOf(err).constructor;
}
})();
// src/is-network-error.ts
var objectToString = Object.prototype.toString;
var isError = (value) => objectToString.call(value) === "[object Error]";
var errorMessages = /* @__PURE__ */ new Set([
"network error",
// Chrome
"Failed to fetch",
// Chrome
"NetworkError when attempting to fetch resource.",
// Firefox
"The Internet connection appears to be offline.",
// Safari 16
"Load failed",
// Safari 17+
"Network request failed",
// `cross-fetch`
"fetch failed",
// Undici (Node.js)
"terminated"
// Undici (Node.js)
]);
function isNetworkError(error) {
const isValid = error && isError(error) && error.name === "TypeError" && typeof error.message === "string";
if (!isValid) {
return false;
}
if (error.message === "Load failed") {
return error.stack === void 0;
}
return errorMessages.has(error.message);
}
// src/fetch.ts
var _undici = require('undici');
var hasFetch = typeof _undici.fetch === "function";
var hasFetchWithUploadProgress = hasFetch && supportsRequestStreams;
var CHUNK_SIZE = 64 * 1024;
var blobFetch = async ({
input,
init,
onUploadProgress
}) => {
debug("using fetch");
let body;
if (init.body) {
if (onUploadProgress) {
const stream = await toReadableStream(init.body);
let loaded = 0;
const chunkTransformStream = createChunkTransformStream(
CHUNK_SIZE,
(newLoaded) => {
loaded += newLoaded;
onUploadProgress(loaded);
}
);
body = stream.pipeThrough(chunkTransformStream);
} else {
body = init.body;
}
}
const duplex = supportsRequestStreams && body && isStream(body) ? "half" : void 0;
return _undici.fetch.call(void 0,
input,
// @ts-expect-error -- Blob and Nodejs Blob are triggering type errors, fine with it
{
...init,
...init.body ? { body } : {},
duplex
}
);
};
// src/xhr.ts
var hasXhr = typeof XMLHttpRequest !== "undefined";
var blobXhr = async ({
input,
init,
onUploadProgress
}) => {
debug("using xhr");
let body = null;
if (init.body) {
if (isReadableStream(init.body)) {
body = await new Response(init.body).blob();
} else {
body = init.body;
}
}
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open(init.method || "GET", input.toString(), true);
if (onUploadProgress) {
xhr.upload.addEventListener("progress", (event) => {
if (event.lengthComputable) {
onUploadProgress(event.loaded);
}
});
}
xhr.onload = () => {
var _a3;
if ((_a3 = init.signal) == null ? void 0 : _a3.aborted) {
reject(new DOMException("The user aborted the request.", "AbortError"));
return;
}
const headers = new Headers();
const rawHeaders = xhr.getAllResponseHeaders().trim().split(/[\r\n]+/);
rawHeaders.forEach((line) => {
const parts = line.split(": ");
const key = parts.shift();
const value = parts.join(": ");
if (key) headers.set(key.toLowerCase(), value);
});
const response = new Response(xhr.response, {
status: xhr.status,
statusText: xhr.statusText,
headers
});
resolve(response);
};
xhr.onerror = () => {
reject(new TypeError("Network request failed"));
};
xhr.ontimeout = () => {
reject(new TypeError("Network request timed out"));
};
xhr.onabort = () => {
reject(new DOMException("The user aborted a request.", "AbortError"));
};
if (init.headers) {
const headers = new Headers(init.headers);
headers.forEach((value, key) => {
xhr.setRequestHeader(key, value);
});
}
if (init.signal) {
init.signal.addEventListener("abort", () => {
xhr.abort();
});
if (init.signal.aborted) {
xhr.abort();
return;
}
}
xhr.send(body);
});
};
// src/request.ts
var blobRequest = async ({
input,
init,
onUploadProgress
}) => {
if (onUploadProgress) {
if (hasFetchWithUploadProgress) {
return blobFetch({ input, init, onUploadProgress });
}
if (hasXhr) {
return blobXhr({ input, init, onUploadProgress });
}
}
if (hasFetch) {
return blobFetch({ input, init });
}
if (hasXhr) {
return blobXhr({ input, init });
}
throw new Error("No request implementation available");
};
// src/api.ts
var MAXIMUM_PATHNAME_LENGTH = 950;
var BlobAccessError = class extends BlobError {
constructor() {
super("Access denied, please provide a valid token for this resource.");
}
};
var BlobContentTypeNotAllowedError = class extends BlobError {
constructor(message) {
super(`Content type mismatch, ${message}.`);
}
};
var BlobPathnameMismatchError = class extends BlobError {
constructor(message) {
super(
`Pathname mismatch, ${message}. Check the pathname used in upload() or put() matches the one from the client token.`
);
}
};
var BlobClientTokenExpiredError = class extends BlobError {
constructor() {
super("Client token has expired.");
}
};
var BlobFileTooLargeError = class extends BlobError {
constructor(message) {
super(`File is too large, ${message}.`);
}
};
var BlobStoreNotFoundError = class extends BlobError {
constructor() {
super("This store does not exist.");
}
};
var BlobStoreSuspendedError = class extends BlobError {
constructor() {
super("This store has been suspended.");
}
};
var BlobUnknownError = class extends BlobError {
constructor() {
super("Unknown error, please visit https://vercel.com/help.");
}
};
var BlobNotFoundError = class extends BlobError {
constructor() {
super("The requested blob does not exist");
}
};
var BlobServiceNotAvailable = class extends BlobError {
constructor() {
super("The blob service is currently not available. Please try again.");
}
};
var BlobServiceRateLimited = class extends BlobError {
constructor(seconds) {
super(
`Too many requests please lower the number of concurrent requests ${seconds ? ` - try again in ${seconds} seconds` : ""}.`
);
this.retryAfter = seconds != null ? seconds : 0;
}
};
var BlobRequestAbortedError = class extends BlobError {
constructor() {
super("The request was aborted.");
}
};
var BlobPreconditionFailedError = class extends BlobError {
constructor() {
super("Precondition failed: ETag mismatch.");
}
};
var BLOB_API_VERSION = 12;
function getApiVersion() {
let versionOverride = null;
try {
versionOverride = process.env.VERCEL_BLOB_API_VERSION_OVERRIDE || process.env.NEXT_PUBLIC_VERCEL_BLOB_API_VERSION_OVERRIDE;
} catch (e4) {
}
return `${versionOverride != null ? versionOverride : BLOB_API_VERSION}`;
}
function getRetries() {
try {
const retries = process.env.VERCEL_BLOB_RETRIES || "10";
return parseInt(retries, 10);
} catch (e5) {
return 10;
}
}
function createBlobServiceRateLimited(response) {
const retryAfter = response.headers.get("retry-after");
return new BlobServiceRateLimited(
retryAfter ? parseInt(retryAfter, 10) : void 0
);
}
async function getBlobError(response) {
var _a3, _b2, _c;
let code;
let message;
try {
const data = await response.json();
code = (_b2 = (_a3 = data.error) == null ? void 0 : _a3.code) != null ? _b2 : "unknown_error";
message = (_c = data.error) == null ? void 0 : _c.message;
} catch (e6) {
code = "unknown_error";
}
if ((message == null ? void 0 : message.includes("contentType")) && message.includes("is not allowed")) {
code = "content_type_not_allowed";
}
if ((message == null ? void 0 : message.includes('"pathname"')) && message.includes("does not match the token payload")) {
code = "client_token_pathname_mismatch";
}
if (message === "Token expired") {
code = "client_token_expired";
}
if (message == null ? void 0 : message.includes("the file length cannot be greater than")) {
code = "file_too_large";
}
let error;
switch (code) {
case "store_suspended":
error = new BlobStoreSuspendedError();
break;
case "forbidden":
error = new BlobAccessError();
break;
case "content_type_not_allowed":
error = new BlobContentTypeNotAllowedError(message);
break;
case "client_token_pathname_mismatch":
error = new BlobPathnameMismatchError(message);
break;
case "client_token_expired":
error = new BlobClientTokenExpiredError();
break;
case "file_too_large":
error = new BlobFileTooLargeError(message);
break;
case "not_found":
error = new BlobNotFoundError();
break;
case "store_not_found":
error = new BlobStoreNotFoundError();
break;
case "bad_request":
error = new BlobError(message != null ? message : "Bad request");
break;
case "service_unavailable":
error = new BlobServiceNotAvailable();
break;
case "rate_limited":
error = createBlobServiceRateLimited(response);
break;
case "precondition_failed":
error = new BlobPreconditionFailedError();
break;
case "unknown_error":
case "not_allowed":
default:
error = new BlobUnknownError();
break;
}
return { code, error };
}
async function requestApi(pathname, init, commandOptions) {
const apiVersion = getApiVersion();
const token = getTokenFromOptionsOrEnv(commandOptions);
const extraHeaders = getProxyThroughAlternativeApiHeaderFromEnv();
const [, , , storeId = ""] = token.split("_");
const requestId = `${storeId}:${Date.now()}:${Math.random().toString(16).slice(2)}`;
let retryCount = 0;
let bodyLength = 0;
let totalLoaded = 0;
const sendBodyLength = (commandOptions == null ? void 0 : commandOptions.onUploadProgress) || shouldUseXContentLength();
if (init.body && // 1. For upload progress we always need to know the total size of the body
// 2. In development we need the header for put() to work correctly when passing a stream
sendBodyLength) {
bodyLength = computeBodyLength(init.body);
}
if (commandOptions == null ? void 0 : commandOptions.onUploadProgress) {
commandOptions.onUploadProgress({
loaded: 0,
total: bodyLength,
percentage: 0
});
}
const apiResponse = await _asyncretry2.default.call(void 0,
async (bail) => {
let res;
try {
res = await blobRequest({
input: getApiUrl(pathname),
init: {
...init,
headers: {
"x-api-blob-request-id": requestId,
"x-api-blob-request-attempt": String(retryCount),
"x-api-version": apiVersion,
...sendBodyLength ? { "x-content-length": String(bodyLength) } : {},
authorization: `Bearer ${token}`,
...extraHeaders,
...init.headers
}
},
onUploadProgress: (commandOptions == null ? void 0 : commandOptions.onUploadProgress) ? (loaded) => {
var _a3;
const total = bodyLength !== 0 ? bodyLength : loaded;
totalLoaded = loaded;
const percentage = bodyLength > 0 ? Number((loaded / total * 100).toFixed(2)) : 0;
if (percentage === 100 && bodyLength > 0) {
return;
}
(_a3 = commandOptions.onUploadProgress) == null ? void 0 : _a3.call(commandOptions, {
loaded,
// When passing a stream to put(), we have no way to know the total size of the body.
// Instead of defining total as total?: number we decided to set the total to the currently
// loaded number. This is not inaccurate and way more practical for DX.
// Passing down a stream to put() is very rare
total,
percentage
});
} : void 0
});
} catch (error2) {
if (error2 instanceof DOMException2 && error2.name === "AbortError") {
bail(new BlobRequestAbortedError());
return;
}
if (isNetworkError(error2)) {
throw error2;
}
if (error2 instanceof TypeError) {
bail(error2);
return;
}
throw error2;
}
if (res.ok) {
return res;
}
const { code, error } = await getBlobError(res);
if (code === "unknown_error" || code === "service_unavailable" || code === "internal_server_error") {
throw error;
}
bail(error);
},
{
retries: getRetries(),
onRetry: (error) => {
if (error instanceof Error) {
debug(`retrying API request to ${pathname}`, error.message);
}
retryCount = retryCount + 1;
}
}
);
if (!apiResponse) {
throw new BlobUnknownError();
}
if (commandOptions == null ? void 0 : commandOptions.onUploadProgress) {
commandOptions.onUploadProgress({
loaded: totalLoaded,
total: totalLoaded,
percentage: 100
});
}
return await apiResponse.json();
}
function getProxyThroughAlternativeApiHeaderFromEnv() {
const extraHeaders = {};
try {
if ("VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API" in process.env && process.env.VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API !== void 0) {
extraHeaders["x-proxy-through-alternative-api"] = process.env.VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API;
} else if ("NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API" in process.env && process.env.NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API !== void 0) {
extraHeaders["x-proxy-through-alternative-api"] = process.env.NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API;
}
} catch (e7) {
}
return extraHeaders;
}
function shouldUseXContentLength() {
try {
return process.env.VERCEL_BLOB_USE_X_CONTENT_LENGTH === "1";
} catch (e8) {
return false;
}
}
// src/put-helpers.ts
var putOptionHeaderMap = {
cacheControlMaxAge: "x-cache-control-max-age",
addRandomSuffix: "x-add-random-suffix",
allowOverwrite: "x-allow-overwrite",
contentType: "x-content-type",
ifMatch: "x-if-match"
};
function createPutHeaders(allowedOptions, options) {
const headers = {};
if (allowedOptions.includes("contentType") && options.contentType) {
headers[putOptionHeaderMap.contentType] = options.contentType;
}
if (allowedOptions.includes("addRandomSuffix") && options.addRandomSuffix !== void 0) {
headers[putOptionHeaderMap.addRandomSuffix] = options.addRandomSuffix ? "1" : "0";
}
if (allowedOptions.includes("allowOverwrite") && options.allowOverwrite !== void 0) {
headers[putOptionHeaderMap.allowOverwrite] = options.allowOverwrite ? "1" : "0";
}
if (allowedOptions.includes("cacheControlMaxAge") && options.cacheControlMaxAge !== void 0) {
headers[putOptionHeaderMap.cacheControlMaxAge] = options.cacheControlMaxAge.toString();
}
if (allowedOptions.includes("ifMatch") && options.ifMatch) {
headers[putOptionHeaderMap.ifMatch] = options.ifMatch;
}
return headers;
}
async function createPutOptions({
pathname,
options,
extraChecks,
getToken
}) {
if (!pathname) {
throw new BlobError("pathname is required");
}
if (pathname.length > MAXIMUM_PATHNAME_LENGTH) {
throw new BlobError(
`pathname is too long, maximum length is ${MAXIMUM_PATHNAME_LENGTH}`
);
}
for (const invalidCharacter of disallowedPathnameCharacters) {
if (pathname.includes(invalidCharacter)) {
throw new BlobError(
`pathname cannot contain "${invalidCharacter}", please encode it if needed`
);
}
}
if (!options) {
throw new BlobError("missing options, see usage");
}
if (options.access !== "public") {
throw new BlobError('access must be "public"');
}
if (extraChecks) {
extraChecks(options);
}
if (getToken) {
options.token = await getToken(pathname, options);
}
return options;
}
// src/multipart/complete.ts
function createCompleteMultipartUploadMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, parts, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
return completeMultipartUpload({
uploadId: options.uploadId,
key: options.key,
pathname,
headers,
options,
parts
});
};
}
async function completeMultipartUpload({
uploadId,
key,
pathname,
parts,
headers,
options
}) {
const params = new URLSearchParams({ pathname });
try {
const response = await requestApi(
`/mpu?${params.toString()}`,
{
method: "POST",
headers: {
...headers,
"content-type": "application/json",
"x-mpu-action": "complete",
"x-mpu-upload-id": uploadId,
// key can be any utf8 character so we need to encode it as HTTP headers can only be us-ascii
// https://www.rfc-editor.org/rfc/rfc7230#swection-3.2.4
"x-mpu-key": encodeURIComponent(key)
},
body: JSON.stringify(parts),
signal: options.abortSignal
},
options
);
debug("mpu: complete", response);
return response;
} catch (error) {
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
throw new BlobServiceNotAvailable();
} else {
throw error;
}
}
}
// src/multipart/create.ts
function createCreateMultipartUploadMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
options
);
return {
key: createMultipartUploadResponse.key,
uploadId: createMultipartUploadResponse.uploadId
};
};
}
async function createMultipartUpload(pathname, headers, options) {
debug("mpu: create", "pathname:", pathname);
const params = new URLSearchParams({ pathname });
try {
const response = await requestApi(
`/mpu?${params.toString()}`,
{
method: "POST",
headers: {
...headers,
"x-mpu-action": "create"
},
signal: options.abortSignal
},
options
);
debug("mpu: create", response);
return response;
} catch (error) {
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
throw new BlobServiceNotAvailable();
}
throw error;
}
}
// src/multipart/upload.ts
var _throttleit = require('throttleit'); var _throttleit2 = _interopRequireDefault(_throttleit);
function createUploadPartMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, body, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const result = await uploadPart({
uploadId: options.uploadId,
key: options.key,
pathname,
part: { blob: body, partNumber: options.partNumber },
headers,
options
});
return {
etag: result.etag,
partNumber: options.partNumber
};
};
}
async function uploadPart({
uploadId,
key,
pathname,
headers,
options,
internalAbortController = new AbortController(),
part
}) {
var _a3, _b2, _c;
const params = new URLSearchParams({ pathname });
const responsePromise = requestApi(
`/mpu?${params.toString()}`,
{
signal: internalAbortController.signal,
method: "POST",
headers: {
...headers,
"x-mpu-action": "upload",
"x-mpu-key": encodeURIComponent(key),
"x-mpu-upload-id": uploadId,
"x-mpu-part-number": part.partNumber.toString()
},
// weird things between undici types and native fetch types
body: part.blob
},
options
);
function handleAbort() {
internalAbortController.abort();
}
if ((_a3 = options.abortSignal) == null ? void 0 : _a3.aborted) {
handleAbort();
} else {
(_b2 = options.abortSignal) == null ? void 0 : _b2.addEventListener("abort", handleAbort);
}
const response = await responsePromise;
(_c = options.abortSignal) == null ? void 0 : _c.removeEventListener("abort", handleAbort);
return response;
}
var maxConcurrentUploads = typeof window !== "undefined" ? 6 : 8;
var partSizeInBytes = 8 * 1024 * 1024;
var maxBytesInMemory = maxConcurrentUploads * partSizeInBytes * 2;
function uploadAllParts({
uploadId,
key,
pathname,
stream,
headers,
options,
totalToLoad
}) {
debug("mpu: upload init", "key:", key);
const internalAbortController = new AbortController();
return new Promise((resolve, reject) => {
const partsToUpload = [];
const completedParts = [];
const reader = stream.getReader();
let activeUploads = 0;
let reading = false;
let currentPartNumber = 1;
let rejected = false;
let currentBytesInMemory = 0;
let doneReading = false;
let bytesSent = 0;
let arrayBuffers = [];
let currentPartBytesRead = 0;
let onUploadProgress;
const totalLoadedPerPartNumber = {};
if (options.onUploadProgress) {
onUploadProgress = _throttleit2.default.call(void 0, () => {
var _a3;
const loaded = Object.values(totalLoadedPerPartNumber).reduce(
(acc, cur) => {
return acc + cur;
},
0
);
const total = totalToLoad || loaded;
const percentage = totalToLoad > 0 ? Number(((loaded / totalToLoad || loaded) * 100).toFixed(2)) : 0;
(_a3 = options.onUploadProgress) == null ? void 0 : _a3.call(options, { loaded, total, percentage });
}, 150);
}
read().catch(cancel);
async function read() {
debug(
"mpu: upload read start",
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
reading = true;
while (currentBytesInMemory < maxBytesInMemory && !rejected) {
try {
const { value, done } = await reader.read();
if (done) {
doneReading = true;
debug("mpu: upload read consumed the whole stream");
if (arrayBuffers.length > 0) {
partsToUpload.push({
partNumber: currentPartNumber++,
blob: new Blob(arrayBuffers, {
type: "application/octet-stream"
})
});
sendParts();
}
reading = false;
return;
}
currentBytesInMemory += value.byteLength;
let valueOffset = 0;
while (valueOffset < value.byteLength) {
const remainingPartSize = partSizeInBytes - currentPartBytesRead;
const endOffset = Math.min(
valueOffset + remainingPartSize,
value.byteLength
);
const chunk = value.slice(valueOffset, endOffset);
arrayBuffers.push(chunk);
currentPartBytesRead += chunk.byteLength;
valueOffset = endOffset;
if (currentPartBytesRead === partSizeInBytes) {
partsToUpload.push({
partNumber: currentPartNumber++,
blob: new Blob(arrayBuffers, {
type: "application/octet-stream"
})
});
arrayBuffers = [];
currentPartBytesRead = 0;
sendParts();
}
}
} catch (error) {
cancel(error);
}
}
debug(
"mpu: upload read end",
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
reading = false;
}
async function sendPart(part) {
activeUploads++;
debug(
"mpu: upload send part start",
"partNumber:",
part.partNumber,
"size:",
part.blob.size,
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
try {
const uploadProgressForPart = options.onUploadProgress ? (event) => {
totalLoadedPerPartNumber[part.partNumber] = event.loaded;
if (onUploadProgress) {
onUploadProgress();
}
} : void 0;
const completedPart = await uploadPart({
uploadId,
key,
pathname,
headers,
options: {
...options,
onUploadProgress: uploadProgressForPart
},
internalAbortController,
part
});
debug(
"mpu: upload send part end",
"partNumber:",
part.partNumber,
"activeUploads",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
if (rejected) {
return;
}
completedParts.push({
partNumber: part.partNumber,
etag: completedPart.etag
});
currentBytesInMemory -= part.blob.size;
activeUploads--;
bytesSent += part.blob.size;
if (partsToUpload.length > 0) {
sendParts();
}
if (doneReading) {
if (activeUploads === 0) {
reader.releaseLock();
resolve(completedParts);
}
return;
}
if (!reading) {
read().catch(cancel);
}
} catch (error) {
cancel(error);
}
}
function sendParts() {
if (rejected) {
return;
}
debug(
"send parts",
"activeUploads",
activeUploads,
"partsToUpload",
partsToUpload.length
);
while (activeUploads < maxConcurrentUploads && partsToUpload.length > 0) {
const partToSend = partsToUpload.shift();
if (partToSend) {
void sendPart(partToSend);
}
}
}
function cancel(error) {
if (rejected) {
return;
}
rejected = true;
internalAbortController.abort();
reader.releaseLock();
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
reject(new BlobServiceNotAvailable());
} else {
reject(error);
}
}
});
}
// src/multipart/create-uploader.ts
function createCreateMultipartUploaderMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
options
);
return {
key: createMultipartUploadResponse.key,
uploadId: createMultipartUploadResponse.uploadId,
async uploadPart(partNumber, body) {
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const result = await uploadPart({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
part: { partNumber, blob: body },
headers,
options
});
return {
etag: result.etag,
partNumber
};
},
async complete(parts) {
return completeMultipartUpload({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
parts,
headers,
options
});
}
};
};
}
// src/put.ts
// src/multipart/uncontrolled.ts
async function uncontrolledMultipartUpload(pathname, body, headers, options) {
debug("mpu: init", "pathname:", pathname, "headers:", headers);
const optionsWithoutOnUploadProgress = {
...options,
onUploadProgress: void 0
};
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
optionsWithoutOnUploadProgress
);
const totalToLoad = computeBodyLength(body);
const stream = await toReadableStream(body);
const parts = await uploadAllParts({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
// @ts-expect-error ReadableStream<ArrayBuffer | Uint8Array> is compatible at runtime
stream,
headers,
options,
totalToLoad
});
const blob = await completeMultipartUpload({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
parts,
headers,
options: optionsWithoutOnUploadProgress
});
return blob;
}
// src/put.ts
function createPutMethod({
allowedOptions,
getToken,
extraChecks
}) {
return async function put(pathname, body, optionsInput) {
if (!body) {
throw new BlobError("body is required");
}
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
if (options.multipart === true) {
return uncontrolledMultipartUpload(pathname, body, headers, options);
}
const onUploadProgress = options.onUploadProgress ? _throttleit2.default.call(void 0, options.onUploadProgress, 100) : void 0;
const params = new URLSearchParams({ pathname });
const response = await requestApi(
`/?${params.toString()}`,
{
method: "PUT",
body,
headers,
signal: options.abortSignal
},
{
...options,
onUploadProgress
}
);
return {
url: response.url,
downloadUrl: response.downloadUrl,
pathname: response.pathname,
contentType: response.contentType,
contentDisposition: response.contentDisposition,
etag: response.etag
};
};
}
// src/create-folder.ts
async function createFolder(pathname, options = {}) {
const folderPathname = pathname.endsWith("/") ? pathname : `${pathname}/`;
const headers = {};
headers[putOptionHeaderMap.addRandomSuffix] = "0";
const params = new URLSearchParams({ pathname: folderPathname });
const response = await requestApi(
`/?${params.toString()}`,
{
method: "PUT",
headers,
signal: options.abortSignal
},
options
);
return {
url: response.url,
pathname: response.pathname
};
}
exports.getTokenFromOptionsOrEnv = getTokenFromOptionsOrEnv; exports.BlobError = BlobError; exports.getDownloadUrl = getDownloadUrl; exports.disallowedPathnameCharacters = disallowedPathnameCharacters; exports.MAXIMUM_PATHNAME_LENGTH = MAXIMUM_PATHNAME_LENGTH; exports.BlobAccessError = BlobAccessError; exports.BlobContentTypeNotAllowedError = BlobContentTypeNotAllowedError; exports.BlobPathnameMismatchError = BlobPathnameMismatchError; exports.BlobClientTokenExpiredError = BlobClientTokenExpiredError; exports.BlobFileTooLargeError = BlobFileTooLargeError; exports.BlobStoreNotFoundError = BlobStoreNotFoundError; exports.BlobStoreSuspendedError = BlobStoreSuspendedError; exports.BlobUnknownError = BlobUnknownError; exports.BlobNotFoundError = BlobNotFoundError; exports.BlobServiceNotAvailable = BlobServiceNotAvailable; exports.BlobServiceRateLimited = BlobServiceRateLimited; exports.BlobRequestAbortedError = BlobRequestAbortedError; exports.BlobPreconditionFailedError = BlobPreconditionFailedError; exports.requestApi = requestApi; exports.createCompleteMultipartUploadMethod = createCompleteMultipartUploadMethod; exports.createCreateMultipartUploadMethod = createCreateMultipartUploadMethod; exports.createUploadPartMethod = createUploadPartMethod; exports.createCreateMultipartUploaderMethod = createCreateMultipartUploaderMethod; exports.createPutMethod = createPutMethod; exports.createFolder = createFolder;
/*!
* bytes
* Copyright(c) 2012-2014 TJ Holowaychuk
* Copyright(c) 2015 Jed Watson
* MIT Licensed
*/
//# sourceMappingURL=chunk-23VLASYP.cjs.map

Sorry, the diff of this file is too big to display

// src/helpers.ts
import { isNodeProcess } from "is-node-process";
// src/multipart/helpers.ts
import isBuffer from "is-buffer";
import { Readable } from "stream";
var supportsNewBlobFromArrayBuffer = new Promise((resolve) => {
try {
const helloAsArrayBuffer = new Uint8Array([104, 101, 108, 108, 111]);
const blob = new Blob([helloAsArrayBuffer]);
blob.text().then((text) => {
resolve(text === "hello");
}).catch(() => {
resolve(false);
});
} catch {
resolve(false);
}
});
async function toReadableStream(value) {
if (value instanceof ReadableStream) {
return value;
}
if (value instanceof Blob) {
return value.stream();
}
if (isNodeJsReadableStream(value)) {
return Readable.toWeb(value);
}
let streamValue;
if (value instanceof ArrayBuffer) {
streamValue = new Uint8Array(value);
} else if (isNodeJsBuffer(value)) {
streamValue = value;
} else {
streamValue = stringToUint8Array(value);
}
if (await supportsNewBlobFromArrayBuffer) {
return new Blob([streamValue]).stream();
}
return new ReadableStream({
start(controller) {
controller.enqueue(streamValue);
controller.close();
}
});
}
function isNodeJsReadableStream(value) {
return typeof value === "object" && typeof value.pipe === "function" && value.readable && typeof value._read === "function" && // @ts-expect-error _readableState does exists on Readable
typeof value._readableState === "object";
}
function stringToUint8Array(s) {
const enc = new TextEncoder();
return enc.encode(s);
}
function isNodeJsBuffer(value) {
return isBuffer(value);
}
// src/bytes.ts
var parseRegExp = /^((-|\+)?(\d+(?:\.\d+)?)) *(kb|mb|gb|tb|pb)$/i;
var map = {
b: 1,
kb: 1 << 10,
mb: 1 << 20,
gb: 1 << 30,
tb: 1024 ** 4,
pb: 1024 ** 5
};
function bytes(val) {
if (typeof val === "number" && !Number.isNaN(val)) {
return val;
}
if (typeof val !== "string") {
return null;
}
const results = parseRegExp.exec(val);
let floatValue;
let unit = "b";
if (!results) {
floatValue = parseInt(val, 10);
} else {
const [, res, , , unitMatch] = results;
if (!res) {
return null;
}
floatValue = parseFloat(res);
if (unitMatch) {
unit = unitMatch.toLowerCase();
}
}
if (Number.isNaN(floatValue)) {
return null;
}
return Math.floor(map[unit] * floatValue);
}
// src/helpers.ts
var defaultVercelBlobApiUrl = "https://vercel.com/api/blob";
function getTokenFromOptionsOrEnv(options) {
if (options == null ? void 0 : options.token) {
return options.token;
}
if (process.env.BLOB_READ_WRITE_TOKEN) {
return process.env.BLOB_READ_WRITE_TOKEN;
}
throw new BlobError(
"No token found. Either configure the `BLOB_READ_WRITE_TOKEN` environment variable, or pass a `token` option to your calls."
);
}
var BlobError = class extends Error {
constructor(message) {
super(`Vercel Blob: ${message}`);
}
};
function getDownloadUrl(blobUrl) {
const url = new URL(blobUrl);
url.searchParams.set("download", "1");
return url.toString();
}
function isPlainObject(value) {
if (typeof value !== "object" || value === null) {
return false;
}
const prototype = Object.getPrototypeOf(value);
return (prototype === null || prototype === Object.prototype || Object.getPrototypeOf(prototype) === null) && !(Symbol.toStringTag in value) && !(Symbol.iterator in value);
}
var disallowedPathnameCharacters = ["//"];
var supportsRequestStreams = (() => {
if (isNodeProcess()) {
return true;
}
const apiUrl = getApiUrl();
if (apiUrl.startsWith("http://localhost")) {
return false;
}
let duplexAccessed = false;
const hasContentType = new Request(getApiUrl(), {
body: new ReadableStream(),
method: "POST",
// @ts-expect-error -- TypeScript doesn't yet have duplex but it's in the spec: https://github.com/microsoft/TypeScript-DOM-lib-generator/pull/1729
get duplex() {
duplexAccessed = true;
return "half";
}
}).headers.has("Content-Type");
return duplexAccessed && !hasContentType;
})();
function getApiUrl(pathname = "") {
let baseUrl = null;
try {
baseUrl = process.env.VERCEL_BLOB_API_URL || process.env.NEXT_PUBLIC_VERCEL_BLOB_API_URL;
} catch {
}
return `${baseUrl || defaultVercelBlobApiUrl}${pathname}`;
}
var TEXT_ENCODER = typeof TextEncoder === "function" ? new TextEncoder() : null;
function computeBodyLength(body) {
if (!body) {
return 0;
}
if (typeof body === "string") {
if (TEXT_ENCODER) {
return TEXT_ENCODER.encode(body).byteLength;
}
return new Blob([body]).size;
}
if ("byteLength" in body && typeof body.byteLength === "number") {
return body.byteLength;
}
if ("size" in body && typeof body.size === "number") {
return body.size;
}
return 0;
}
var createChunkTransformStream = (chunkSize, onProgress) => {
let buffer = new Uint8Array(0);
return new TransformStream({
transform(chunk, controller) {
queueMicrotask(() => {
const newBuffer = new Uint8Array(buffer.length + chunk.byteLength);
newBuffer.set(buffer);
newBuffer.set(new Uint8Array(chunk), buffer.length);
buffer = newBuffer;
while (buffer.length >= chunkSize) {
const newChunk = buffer.slice(0, chunkSize);
controller.enqueue(newChunk);
onProgress == null ? void 0 : onProgress(newChunk.byteLength);
buffer = buffer.slice(chunkSize);
}
});
},
flush(controller) {
queueMicrotask(() => {
if (buffer.length > 0) {
controller.enqueue(buffer);
onProgress == null ? void 0 : onProgress(buffer.byteLength);
}
});
}
});
};
function isReadableStream(value) {
return globalThis.ReadableStream && // TODO: Can be removed once Node.js 16 is no more required internally
value instanceof ReadableStream;
}
function isStream(value) {
if (isReadableStream(value)) {
return true;
}
if (isNodeJsReadableStream(value)) {
return true;
}
return false;
}
// src/api.ts
import retry from "async-retry";
// src/debug.ts
var debugIsActive = false;
var _a, _b;
try {
if (((_a = process.env.DEBUG) == null ? void 0 : _a.includes("blob")) || ((_b = process.env.NEXT_PUBLIC_DEBUG) == null ? void 0 : _b.includes("blob"))) {
debugIsActive = true;
}
} catch {
}
function debug(message, ...args) {
if (debugIsActive) {
console.debug(`vercel-blob: ${message}`, ...args);
}
}
// src/dom-exception.ts
var _a2;
var DOMException2 = (_a2 = globalThis.DOMException) != null ? _a2 : (() => {
try {
atob("~");
} catch (err) {
return Object.getPrototypeOf(err).constructor;
}
})();
// src/is-network-error.ts
var objectToString = Object.prototype.toString;
var isError = (value) => objectToString.call(value) === "[object Error]";
var errorMessages = /* @__PURE__ */ new Set([
"network error",
// Chrome
"Failed to fetch",
// Chrome
"NetworkError when attempting to fetch resource.",
// Firefox
"The Internet connection appears to be offline.",
// Safari 16
"Load failed",
// Safari 17+
"Network request failed",
// `cross-fetch`
"fetch failed",
// Undici (Node.js)
"terminated"
// Undici (Node.js)
]);
function isNetworkError(error) {
const isValid = error && isError(error) && error.name === "TypeError" && typeof error.message === "string";
if (!isValid) {
return false;
}
if (error.message === "Load failed") {
return error.stack === void 0;
}
return errorMessages.has(error.message);
}
// src/fetch.ts
import { fetch } from "undici";
var hasFetch = typeof fetch === "function";
var hasFetchWithUploadProgress = hasFetch && supportsRequestStreams;
var CHUNK_SIZE = 64 * 1024;
var blobFetch = async ({
input,
init,
onUploadProgress
}) => {
debug("using fetch");
let body;
if (init.body) {
if (onUploadProgress) {
const stream = await toReadableStream(init.body);
let loaded = 0;
const chunkTransformStream = createChunkTransformStream(
CHUNK_SIZE,
(newLoaded) => {
loaded += newLoaded;
onUploadProgress(loaded);
}
);
body = stream.pipeThrough(chunkTransformStream);
} else {
body = init.body;
}
}
const duplex = supportsRequestStreams && body && isStream(body) ? "half" : void 0;
return fetch(
input,
// @ts-expect-error -- Blob and Nodejs Blob are triggering type errors, fine with it
{
...init,
...init.body ? { body } : {},
duplex
}
);
};
// src/xhr.ts
var hasXhr = typeof XMLHttpRequest !== "undefined";
var blobXhr = async ({
input,
init,
onUploadProgress
}) => {
debug("using xhr");
let body = null;
if (init.body) {
if (isReadableStream(init.body)) {
body = await new Response(init.body).blob();
} else {
body = init.body;
}
}
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open(init.method || "GET", input.toString(), true);
if (onUploadProgress) {
xhr.upload.addEventListener("progress", (event) => {
if (event.lengthComputable) {
onUploadProgress(event.loaded);
}
});
}
xhr.onload = () => {
var _a3;
if ((_a3 = init.signal) == null ? void 0 : _a3.aborted) {
reject(new DOMException("The user aborted the request.", "AbortError"));
return;
}
const headers = new Headers();
const rawHeaders = xhr.getAllResponseHeaders().trim().split(/[\r\n]+/);
rawHeaders.forEach((line) => {
const parts = line.split(": ");
const key = parts.shift();
const value = parts.join(": ");
if (key) headers.set(key.toLowerCase(), value);
});
const response = new Response(xhr.response, {
status: xhr.status,
statusText: xhr.statusText,
headers
});
resolve(response);
};
xhr.onerror = () => {
reject(new TypeError("Network request failed"));
};
xhr.ontimeout = () => {
reject(new TypeError("Network request timed out"));
};
xhr.onabort = () => {
reject(new DOMException("The user aborted a request.", "AbortError"));
};
if (init.headers) {
const headers = new Headers(init.headers);
headers.forEach((value, key) => {
xhr.setRequestHeader(key, value);
});
}
if (init.signal) {
init.signal.addEventListener("abort", () => {
xhr.abort();
});
if (init.signal.aborted) {
xhr.abort();
return;
}
}
xhr.send(body);
});
};
// src/request.ts
var blobRequest = async ({
input,
init,
onUploadProgress
}) => {
if (onUploadProgress) {
if (hasFetchWithUploadProgress) {
return blobFetch({ input, init, onUploadProgress });
}
if (hasXhr) {
return blobXhr({ input, init, onUploadProgress });
}
}
if (hasFetch) {
return blobFetch({ input, init });
}
if (hasXhr) {
return blobXhr({ input, init });
}
throw new Error("No request implementation available");
};
// src/api.ts
var MAXIMUM_PATHNAME_LENGTH = 950;
var BlobAccessError = class extends BlobError {
constructor() {
super("Access denied, please provide a valid token for this resource.");
}
};
var BlobContentTypeNotAllowedError = class extends BlobError {
constructor(message) {
super(`Content type mismatch, ${message}.`);
}
};
var BlobPathnameMismatchError = class extends BlobError {
constructor(message) {
super(
`Pathname mismatch, ${message}. Check the pathname used in upload() or put() matches the one from the client token.`
);
}
};
var BlobClientTokenExpiredError = class extends BlobError {
constructor() {
super("Client token has expired.");
}
};
var BlobFileTooLargeError = class extends BlobError {
constructor(message) {
super(`File is too large, ${message}.`);
}
};
var BlobStoreNotFoundError = class extends BlobError {
constructor() {
super("This store does not exist.");
}
};
var BlobStoreSuspendedError = class extends BlobError {
constructor() {
super("This store has been suspended.");
}
};
var BlobUnknownError = class extends BlobError {
constructor() {
super("Unknown error, please visit https://vercel.com/help.");
}
};
var BlobNotFoundError = class extends BlobError {
constructor() {
super("The requested blob does not exist");
}
};
var BlobServiceNotAvailable = class extends BlobError {
constructor() {
super("The blob service is currently not available. Please try again.");
}
};
var BlobServiceRateLimited = class extends BlobError {
constructor(seconds) {
super(
`Too many requests please lower the number of concurrent requests ${seconds ? ` - try again in ${seconds} seconds` : ""}.`
);
this.retryAfter = seconds != null ? seconds : 0;
}
};
var BlobRequestAbortedError = class extends BlobError {
constructor() {
super("The request was aborted.");
}
};
var BlobPreconditionFailedError = class extends BlobError {
constructor() {
super("Precondition failed: ETag mismatch.");
}
};
var BLOB_API_VERSION = 12;
function getApiVersion() {
let versionOverride = null;
try {
versionOverride = process.env.VERCEL_BLOB_API_VERSION_OVERRIDE || process.env.NEXT_PUBLIC_VERCEL_BLOB_API_VERSION_OVERRIDE;
} catch {
}
return `${versionOverride != null ? versionOverride : BLOB_API_VERSION}`;
}
function getRetries() {
try {
const retries = process.env.VERCEL_BLOB_RETRIES || "10";
return parseInt(retries, 10);
} catch {
return 10;
}
}
function createBlobServiceRateLimited(response) {
const retryAfter = response.headers.get("retry-after");
return new BlobServiceRateLimited(
retryAfter ? parseInt(retryAfter, 10) : void 0
);
}
async function getBlobError(response) {
var _a3, _b2, _c;
let code;
let message;
try {
const data = await response.json();
code = (_b2 = (_a3 = data.error) == null ? void 0 : _a3.code) != null ? _b2 : "unknown_error";
message = (_c = data.error) == null ? void 0 : _c.message;
} catch {
code = "unknown_error";
}
if ((message == null ? void 0 : message.includes("contentType")) && message.includes("is not allowed")) {
code = "content_type_not_allowed";
}
if ((message == null ? void 0 : message.includes('"pathname"')) && message.includes("does not match the token payload")) {
code = "client_token_pathname_mismatch";
}
if (message === "Token expired") {
code = "client_token_expired";
}
if (message == null ? void 0 : message.includes("the file length cannot be greater than")) {
code = "file_too_large";
}
let error;
switch (code) {
case "store_suspended":
error = new BlobStoreSuspendedError();
break;
case "forbidden":
error = new BlobAccessError();
break;
case "content_type_not_allowed":
error = new BlobContentTypeNotAllowedError(message);
break;
case "client_token_pathname_mismatch":
error = new BlobPathnameMismatchError(message);
break;
case "client_token_expired":
error = new BlobClientTokenExpiredError();
break;
case "file_too_large":
error = new BlobFileTooLargeError(message);
break;
case "not_found":
error = new BlobNotFoundError();
break;
case "store_not_found":
error = new BlobStoreNotFoundError();
break;
case "bad_request":
error = new BlobError(message != null ? message : "Bad request");
break;
case "service_unavailable":
error = new BlobServiceNotAvailable();
break;
case "rate_limited":
error = createBlobServiceRateLimited(response);
break;
case "precondition_failed":
error = new BlobPreconditionFailedError();
break;
case "unknown_error":
case "not_allowed":
default:
error = new BlobUnknownError();
break;
}
return { code, error };
}
async function requestApi(pathname, init, commandOptions) {
const apiVersion = getApiVersion();
const token = getTokenFromOptionsOrEnv(commandOptions);
const extraHeaders = getProxyThroughAlternativeApiHeaderFromEnv();
const [, , , storeId = ""] = token.split("_");
const requestId = `${storeId}:${Date.now()}:${Math.random().toString(16).slice(2)}`;
let retryCount = 0;
let bodyLength = 0;
let totalLoaded = 0;
const sendBodyLength = (commandOptions == null ? void 0 : commandOptions.onUploadProgress) || shouldUseXContentLength();
if (init.body && // 1. For upload progress we always need to know the total size of the body
// 2. In development we need the header for put() to work correctly when passing a stream
sendBodyLength) {
bodyLength = computeBodyLength(init.body);
}
if (commandOptions == null ? void 0 : commandOptions.onUploadProgress) {
commandOptions.onUploadProgress({
loaded: 0,
total: bodyLength,
percentage: 0
});
}
const apiResponse = await retry(
async (bail) => {
let res;
try {
res = await blobRequest({
input: getApiUrl(pathname),
init: {
...init,
headers: {
"x-api-blob-request-id": requestId,
"x-api-blob-request-attempt": String(retryCount),
"x-api-version": apiVersion,
...sendBodyLength ? { "x-content-length": String(bodyLength) } : {},
authorization: `Bearer ${token}`,
...extraHeaders,
...init.headers
}
},
onUploadProgress: (commandOptions == null ? void 0 : commandOptions.onUploadProgress) ? (loaded) => {
var _a3;
const total = bodyLength !== 0 ? bodyLength : loaded;
totalLoaded = loaded;
const percentage = bodyLength > 0 ? Number((loaded / total * 100).toFixed(2)) : 0;
if (percentage === 100 && bodyLength > 0) {
return;
}
(_a3 = commandOptions.onUploadProgress) == null ? void 0 : _a3.call(commandOptions, {
loaded,
// When passing a stream to put(), we have no way to know the total size of the body.
// Instead of defining total as total?: number we decided to set the total to the currently
// loaded number. This is not inaccurate and way more practical for DX.
// Passing down a stream to put() is very rare
total,
percentage
});
} : void 0
});
} catch (error2) {
if (error2 instanceof DOMException2 && error2.name === "AbortError") {
bail(new BlobRequestAbortedError());
return;
}
if (isNetworkError(error2)) {
throw error2;
}
if (error2 instanceof TypeError) {
bail(error2);
return;
}
throw error2;
}
if (res.ok) {
return res;
}
const { code, error } = await getBlobError(res);
if (code === "unknown_error" || code === "service_unavailable" || code === "internal_server_error") {
throw error;
}
bail(error);
},
{
retries: getRetries(),
onRetry: (error) => {
if (error instanceof Error) {
debug(`retrying API request to ${pathname}`, error.message);
}
retryCount = retryCount + 1;
}
}
);
if (!apiResponse) {
throw new BlobUnknownError();
}
if (commandOptions == null ? void 0 : commandOptions.onUploadProgress) {
commandOptions.onUploadProgress({
loaded: totalLoaded,
total: totalLoaded,
percentage: 100
});
}
return await apiResponse.json();
}
function getProxyThroughAlternativeApiHeaderFromEnv() {
const extraHeaders = {};
try {
if ("VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API" in process.env && process.env.VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API !== void 0) {
extraHeaders["x-proxy-through-alternative-api"] = process.env.VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API;
} else if ("NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API" in process.env && process.env.NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API !== void 0) {
extraHeaders["x-proxy-through-alternative-api"] = process.env.NEXT_PUBLIC_VERCEL_BLOB_PROXY_THROUGH_ALTERNATIVE_API;
}
} catch {
}
return extraHeaders;
}
function shouldUseXContentLength() {
try {
return process.env.VERCEL_BLOB_USE_X_CONTENT_LENGTH === "1";
} catch {
return false;
}
}
// src/put-helpers.ts
var putOptionHeaderMap = {
cacheControlMaxAge: "x-cache-control-max-age",
addRandomSuffix: "x-add-random-suffix",
allowOverwrite: "x-allow-overwrite",
contentType: "x-content-type",
ifMatch: "x-if-match"
};
function createPutHeaders(allowedOptions, options) {
const headers = {};
if (allowedOptions.includes("contentType") && options.contentType) {
headers[putOptionHeaderMap.contentType] = options.contentType;
}
if (allowedOptions.includes("addRandomSuffix") && options.addRandomSuffix !== void 0) {
headers[putOptionHeaderMap.addRandomSuffix] = options.addRandomSuffix ? "1" : "0";
}
if (allowedOptions.includes("allowOverwrite") && options.allowOverwrite !== void 0) {
headers[putOptionHeaderMap.allowOverwrite] = options.allowOverwrite ? "1" : "0";
}
if (allowedOptions.includes("cacheControlMaxAge") && options.cacheControlMaxAge !== void 0) {
headers[putOptionHeaderMap.cacheControlMaxAge] = options.cacheControlMaxAge.toString();
}
if (allowedOptions.includes("ifMatch") && options.ifMatch) {
headers[putOptionHeaderMap.ifMatch] = options.ifMatch;
}
return headers;
}
async function createPutOptions({
pathname,
options,
extraChecks,
getToken
}) {
if (!pathname) {
throw new BlobError("pathname is required");
}
if (pathname.length > MAXIMUM_PATHNAME_LENGTH) {
throw new BlobError(
`pathname is too long, maximum length is ${MAXIMUM_PATHNAME_LENGTH}`
);
}
for (const invalidCharacter of disallowedPathnameCharacters) {
if (pathname.includes(invalidCharacter)) {
throw new BlobError(
`pathname cannot contain "${invalidCharacter}", please encode it if needed`
);
}
}
if (!options) {
throw new BlobError("missing options, see usage");
}
if (options.access !== "public") {
throw new BlobError('access must be "public"');
}
if (extraChecks) {
extraChecks(options);
}
if (getToken) {
options.token = await getToken(pathname, options);
}
return options;
}
// src/multipart/complete.ts
function createCompleteMultipartUploadMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, parts, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
return completeMultipartUpload({
uploadId: options.uploadId,
key: options.key,
pathname,
headers,
options,
parts
});
};
}
async function completeMultipartUpload({
uploadId,
key,
pathname,
parts,
headers,
options
}) {
const params = new URLSearchParams({ pathname });
try {
const response = await requestApi(
`/mpu?${params.toString()}`,
{
method: "POST",
headers: {
...headers,
"content-type": "application/json",
"x-mpu-action": "complete",
"x-mpu-upload-id": uploadId,
// key can be any utf8 character so we need to encode it as HTTP headers can only be us-ascii
// https://www.rfc-editor.org/rfc/rfc7230#swection-3.2.4
"x-mpu-key": encodeURIComponent(key)
},
body: JSON.stringify(parts),
signal: options.abortSignal
},
options
);
debug("mpu: complete", response);
return response;
} catch (error) {
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
throw new BlobServiceNotAvailable();
} else {
throw error;
}
}
}
// src/multipart/create.ts
function createCreateMultipartUploadMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
options
);
return {
key: createMultipartUploadResponse.key,
uploadId: createMultipartUploadResponse.uploadId
};
};
}
async function createMultipartUpload(pathname, headers, options) {
debug("mpu: create", "pathname:", pathname);
const params = new URLSearchParams({ pathname });
try {
const response = await requestApi(
`/mpu?${params.toString()}`,
{
method: "POST",
headers: {
...headers,
"x-mpu-action": "create"
},
signal: options.abortSignal
},
options
);
debug("mpu: create", response);
return response;
} catch (error) {
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
throw new BlobServiceNotAvailable();
}
throw error;
}
}
// src/multipart/upload.ts
import throttle from "throttleit";
function createUploadPartMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, body, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const result = await uploadPart({
uploadId: options.uploadId,
key: options.key,
pathname,
part: { blob: body, partNumber: options.partNumber },
headers,
options
});
return {
etag: result.etag,
partNumber: options.partNumber
};
};
}
async function uploadPart({
uploadId,
key,
pathname,
headers,
options,
internalAbortController = new AbortController(),
part
}) {
var _a3, _b2, _c;
const params = new URLSearchParams({ pathname });
const responsePromise = requestApi(
`/mpu?${params.toString()}`,
{
signal: internalAbortController.signal,
method: "POST",
headers: {
...headers,
"x-mpu-action": "upload",
"x-mpu-key": encodeURIComponent(key),
"x-mpu-upload-id": uploadId,
"x-mpu-part-number": part.partNumber.toString()
},
// weird things between undici types and native fetch types
body: part.blob
},
options
);
function handleAbort() {
internalAbortController.abort();
}
if ((_a3 = options.abortSignal) == null ? void 0 : _a3.aborted) {
handleAbort();
} else {
(_b2 = options.abortSignal) == null ? void 0 : _b2.addEventListener("abort", handleAbort);
}
const response = await responsePromise;
(_c = options.abortSignal) == null ? void 0 : _c.removeEventListener("abort", handleAbort);
return response;
}
var maxConcurrentUploads = typeof window !== "undefined" ? 6 : 8;
var partSizeInBytes = 8 * 1024 * 1024;
var maxBytesInMemory = maxConcurrentUploads * partSizeInBytes * 2;
function uploadAllParts({
uploadId,
key,
pathname,
stream,
headers,
options,
totalToLoad
}) {
debug("mpu: upload init", "key:", key);
const internalAbortController = new AbortController();
return new Promise((resolve, reject) => {
const partsToUpload = [];
const completedParts = [];
const reader = stream.getReader();
let activeUploads = 0;
let reading = false;
let currentPartNumber = 1;
let rejected = false;
let currentBytesInMemory = 0;
let doneReading = false;
let bytesSent = 0;
let arrayBuffers = [];
let currentPartBytesRead = 0;
let onUploadProgress;
const totalLoadedPerPartNumber = {};
if (options.onUploadProgress) {
onUploadProgress = throttle(() => {
var _a3;
const loaded = Object.values(totalLoadedPerPartNumber).reduce(
(acc, cur) => {
return acc + cur;
},
0
);
const total = totalToLoad || loaded;
const percentage = totalToLoad > 0 ? Number(((loaded / totalToLoad || loaded) * 100).toFixed(2)) : 0;
(_a3 = options.onUploadProgress) == null ? void 0 : _a3.call(options, { loaded, total, percentage });
}, 150);
}
read().catch(cancel);
async function read() {
debug(
"mpu: upload read start",
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
reading = true;
while (currentBytesInMemory < maxBytesInMemory && !rejected) {
try {
const { value, done } = await reader.read();
if (done) {
doneReading = true;
debug("mpu: upload read consumed the whole stream");
if (arrayBuffers.length > 0) {
partsToUpload.push({
partNumber: currentPartNumber++,
blob: new Blob(arrayBuffers, {
type: "application/octet-stream"
})
});
sendParts();
}
reading = false;
return;
}
currentBytesInMemory += value.byteLength;
let valueOffset = 0;
while (valueOffset < value.byteLength) {
const remainingPartSize = partSizeInBytes - currentPartBytesRead;
const endOffset = Math.min(
valueOffset + remainingPartSize,
value.byteLength
);
const chunk = value.slice(valueOffset, endOffset);
arrayBuffers.push(chunk);
currentPartBytesRead += chunk.byteLength;
valueOffset = endOffset;
if (currentPartBytesRead === partSizeInBytes) {
partsToUpload.push({
partNumber: currentPartNumber++,
blob: new Blob(arrayBuffers, {
type: "application/octet-stream"
})
});
arrayBuffers = [];
currentPartBytesRead = 0;
sendParts();
}
}
} catch (error) {
cancel(error);
}
}
debug(
"mpu: upload read end",
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
reading = false;
}
async function sendPart(part) {
activeUploads++;
debug(
"mpu: upload send part start",
"partNumber:",
part.partNumber,
"size:",
part.blob.size,
"activeUploads:",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
try {
const uploadProgressForPart = options.onUploadProgress ? (event) => {
totalLoadedPerPartNumber[part.partNumber] = event.loaded;
if (onUploadProgress) {
onUploadProgress();
}
} : void 0;
const completedPart = await uploadPart({
uploadId,
key,
pathname,
headers,
options: {
...options,
onUploadProgress: uploadProgressForPart
},
internalAbortController,
part
});
debug(
"mpu: upload send part end",
"partNumber:",
part.partNumber,
"activeUploads",
activeUploads,
"currentBytesInMemory:",
`${bytes(currentBytesInMemory)}/${bytes(maxBytesInMemory)}`,
"bytesSent:",
bytes(bytesSent)
);
if (rejected) {
return;
}
completedParts.push({
partNumber: part.partNumber,
etag: completedPart.etag
});
currentBytesInMemory -= part.blob.size;
activeUploads--;
bytesSent += part.blob.size;
if (partsToUpload.length > 0) {
sendParts();
}
if (doneReading) {
if (activeUploads === 0) {
reader.releaseLock();
resolve(completedParts);
}
return;
}
if (!reading) {
read().catch(cancel);
}
} catch (error) {
cancel(error);
}
}
function sendParts() {
if (rejected) {
return;
}
debug(
"send parts",
"activeUploads",
activeUploads,
"partsToUpload",
partsToUpload.length
);
while (activeUploads < maxConcurrentUploads && partsToUpload.length > 0) {
const partToSend = partsToUpload.shift();
if (partToSend) {
void sendPart(partToSend);
}
}
}
function cancel(error) {
if (rejected) {
return;
}
rejected = true;
internalAbortController.abort();
reader.releaseLock();
if (error instanceof TypeError && (error.message === "Failed to fetch" || error.message === "fetch failed")) {
reject(new BlobServiceNotAvailable());
} else {
reject(error);
}
}
});
}
// src/multipart/create-uploader.ts
function createCreateMultipartUploaderMethod({ allowedOptions, getToken, extraChecks }) {
return async (pathname, optionsInput) => {
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
options
);
return {
key: createMultipartUploadResponse.key,
uploadId: createMultipartUploadResponse.uploadId,
async uploadPart(partNumber, body) {
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const result = await uploadPart({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
part: { partNumber, blob: body },
headers,
options
});
return {
etag: result.etag,
partNumber
};
},
async complete(parts) {
return completeMultipartUpload({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
parts,
headers,
options
});
}
};
};
}
// src/put.ts
import throttle2 from "throttleit";
// src/multipart/uncontrolled.ts
async function uncontrolledMultipartUpload(pathname, body, headers, options) {
debug("mpu: init", "pathname:", pathname, "headers:", headers);
const optionsWithoutOnUploadProgress = {
...options,
onUploadProgress: void 0
};
const createMultipartUploadResponse = await createMultipartUpload(
pathname,
headers,
optionsWithoutOnUploadProgress
);
const totalToLoad = computeBodyLength(body);
const stream = await toReadableStream(body);
const parts = await uploadAllParts({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
// @ts-expect-error ReadableStream<ArrayBuffer | Uint8Array> is compatible at runtime
stream,
headers,
options,
totalToLoad
});
const blob = await completeMultipartUpload({
uploadId: createMultipartUploadResponse.uploadId,
key: createMultipartUploadResponse.key,
pathname,
parts,
headers,
options: optionsWithoutOnUploadProgress
});
return blob;
}
// src/put.ts
function createPutMethod({
allowedOptions,
getToken,
extraChecks
}) {
return async function put(pathname, body, optionsInput) {
if (!body) {
throw new BlobError("body is required");
}
if (isPlainObject(body)) {
throw new BlobError(
"Body must be a string, buffer or stream. You sent a plain JavaScript object, double check what you're trying to upload."
);
}
const options = await createPutOptions({
pathname,
options: optionsInput,
extraChecks,
getToken
});
const headers = createPutHeaders(allowedOptions, options);
if (options.multipart === true) {
return uncontrolledMultipartUpload(pathname, body, headers, options);
}
const onUploadProgress = options.onUploadProgress ? throttle2(options.onUploadProgress, 100) : void 0;
const params = new URLSearchParams({ pathname });
const response = await requestApi(
`/?${params.toString()}`,
{
method: "PUT",
body,
headers,
signal: options.abortSignal
},
{
...options,
onUploadProgress
}
);
return {
url: response.url,
downloadUrl: response.downloadUrl,
pathname: response.pathname,
contentType: response.contentType,
contentDisposition: response.contentDisposition,
etag: response.etag
};
};
}
// src/create-folder.ts
async function createFolder(pathname, options = {}) {
const folderPathname = pathname.endsWith("/") ? pathname : `${pathname}/`;
const headers = {};
headers[putOptionHeaderMap.addRandomSuffix] = "0";
const params = new URLSearchParams({ pathname: folderPathname });
const response = await requestApi(
`/?${params.toString()}`,
{
method: "PUT",
headers,
signal: options.abortSignal
},
options
);
return {
url: response.url,
pathname: response.pathname
};
}
export {
getTokenFromOptionsOrEnv,
BlobError,
getDownloadUrl,
disallowedPathnameCharacters,
MAXIMUM_PATHNAME_LENGTH,
BlobAccessError,
BlobContentTypeNotAllowedError,
BlobPathnameMismatchError,
BlobClientTokenExpiredError,
BlobFileTooLargeError,
BlobStoreNotFoundError,
BlobStoreSuspendedError,
BlobUnknownError,
BlobNotFoundError,
BlobServiceNotAvailable,
BlobServiceRateLimited,
BlobRequestAbortedError,
BlobPreconditionFailedError,
requestApi,
createCompleteMultipartUploadMethod,
createCreateMultipartUploadMethod,
createUploadPartMethod,
createCreateMultipartUploaderMethod,
createPutMethod,
createFolder
};
/*!
* bytes
* Copyright(c) 2012-2014 TJ Holowaychuk
* Copyright(c) 2015 Jed Watson
* MIT Licensed
*/
//# sourceMappingURL=chunk-NUG4TPYD.js.map

Sorry, the diff of this file is too big to display

import { Readable } from 'stream';
import { File } from 'undici';
interface BlobCommandOptions {
/**
* Define your blob API token.
* @defaultvalue process.env.BLOB_READ_WRITE_TOKEN
*/
token?: string;
/**
* `AbortSignal` to cancel the running request. See https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
*/
abortSignal?: AbortSignal;
}
interface CommonCreateBlobOptions extends BlobCommandOptions {
/**
* Whether the blob should be publicly accessible. The only currently allowed value is `public`.
*/
access: 'public';
/**
* Adds a random suffix to the filename.
* @defaultvalue false
*/
addRandomSuffix?: boolean;
/**
* Allow overwriting an existing blob. By default this is set to false and will throw an error if the blob already exists.
* @defaultvalue false
*/
allowOverwrite?: boolean;
/**
* Defines the content type of the blob. By default, this value is inferred from the pathname. Sent as the 'content-type' header when downloading a blob.
*/
contentType?: string;
/**
* Number in seconds to configure the edge and browser cache. The minimum is 1 minute. There's no maximum but keep in mind that browser and edge caches will do a best effort to respect this value.
* Detailed documentation can be found here: https://vercel.com/docs/storage/vercel-blob#caching
* @defaultvalue 30 * 24 * 60 * 60 (1 Month)
*/
cacheControlMaxAge?: number;
/**
* Only perform the operation if the blob's current ETag matches this value.
* Use this for optimistic concurrency control to prevent overwriting changes made by others.
* If the ETag doesn't match, a `BlobPreconditionFailedError` will be thrown.
*/
ifMatch?: string;
}
/**
* Event object passed to the onUploadProgress callback.
*/
interface UploadProgressEvent {
/**
* The number of bytes uploaded.
*/
loaded: number;
/**
* The total number of bytes to upload.
*/
total: number;
/**
* The percentage of the upload that has been completed.
*/
percentage: number;
}
/**
* Callback type for tracking upload progress.
*/
type OnUploadProgressCallback = (progressEvent: UploadProgressEvent) => void;
/**
* Interface for including upload progress tracking capabilities.
*/
interface WithUploadProgress {
/**
* Callback to track the upload progress. You will receive an object with the following properties:
* - `loaded`: The number of bytes uploaded
* - `total`: The total number of bytes to upload
* - `percentage`: The percentage of the upload that has been completed
*/
onUploadProgress?: OnUploadProgressCallback;
}
declare class BlobError extends Error {
constructor(message: string);
}
/**
* Generates a download URL for a blob.
* The download URL includes a ?download=1 parameter which causes browsers to download
* the file instead of displaying it inline.
*
* @param blobUrl - The URL of the blob to generate a download URL for
* @returns A string containing the download URL with the download parameter appended
*/
declare function getDownloadUrl(blobUrl: string): string;
/**
* Result of a successful put or copy operation.
*/
interface PutBlobResult {
/**
* The URL of the blob.
*/
url: string;
/**
* A URL that will cause browsers to download the file instead of displaying it inline.
*/
downloadUrl: string;
/**
* The pathname of the blob within the store.
*/
pathname: string;
/**
* The content-type of the blob.
*/
contentType: string;
/**
* The content disposition header value.
*/
contentDisposition: string;
/**
* The ETag of the blob. Can be used with `ifMatch` for conditional writes.
*/
etag: string;
}
/**
* Represents the body content for a put operation.
* Can be one of several supported types.
*/
type PutBody = string | Readable | Buffer | Blob | ArrayBuffer | ReadableStream | File;
/**
* Input format for a multipart upload part.
* Used internally for processing multipart uploads.
*/
interface PartInput {
/**
* The part number (1-based index).
*/
partNumber: number;
/**
* The content of the part.
*/
blob: PutBody;
}
/**
* Represents a single part of a multipart upload.
* This structure is used when completing a multipart upload to specify the
* uploaded parts and their order.
*/
interface Part {
/**
* The ETag value returned when the part was uploaded.
* This value is used to verify the integrity of the uploaded part.
*/
etag: string;
/**
* The part number of this part (1-based).
* This number is used to order the parts when completing the multipart upload.
*/
partNumber: number;
}
/**
* Options for completing a multipart upload.
* Used with the completeMultipartUpload method.
*/
interface CommonCompleteMultipartUploadOptions {
/**
* Unique upload identifier for the multipart upload, received from createMultipartUpload.
* This ID is used to identify which multipart upload is being completed.
*/
uploadId: string;
/**
* Unique key identifying the blob object, received from createMultipartUpload.
* This key is used to identify which blob object the parts belong to.
*/
key: string;
}
type CompleteMultipartUploadCommandOptions = CommonCompleteMultipartUploadOptions & CommonCreateBlobOptions;
/**
* Options for uploading a part in a multipart upload process.
* Used with the uploadPart method.
*/
interface CommonMultipartUploadOptions {
/**
* Unique upload identifier for the multipart upload, received from createMultipartUpload.
* This ID is used to associate all uploaded parts with the same multipart upload.
*/
uploadId: string;
/**
* Unique key identifying the blob object, received from createMultipartUpload.
* This key is used to identify which blob object the parts belong to.
*/
key: string;
/**
* A number identifying which part is being uploaded (1-based).
* This number is used to order the parts when completing the multipart upload.
* Parts must be uploaded with consecutive part numbers starting from 1.
*/
partNumber: number;
}
type UploadPartCommandOptions = CommonMultipartUploadOptions & CommonCreateBlobOptions;
interface CreateFolderResult {
pathname: string;
url: string;
}
/**
* Creates a folder in your store. Vercel Blob has no real concept of folders, our file browser on Vercel.com displays folders based on the presence of trailing slashes in the pathname. Unless you are building a file browser system, you probably don't need to use this method.
*
* Use the resulting `url` to delete the folder, just like you would delete a blob.
* @param pathname - Can be user1/ or user1/avatars/
* @param options - Additional options like `token`
*/
declare function createFolder(pathname: string, options?: BlobCommandOptions): Promise<CreateFolderResult>;
export { type BlobCommandOptions as B, type CommonCompleteMultipartUploadOptions as C, type OnUploadProgressCallback as O, type PutBlobResult as P, type UploadPartCommandOptions as U, type WithUploadProgress as W, type Part as a, type PutBody as b, type CommonMultipartUploadOptions as c, createFolder as d, type CommonCreateBlobOptions as e, BlobError as f, type CompleteMultipartUploadCommandOptions as g, type PartInput as h, type UploadProgressEvent as i, getDownloadUrl as j };
import { Readable } from 'stream';
import { File } from 'undici';
interface BlobCommandOptions {
/**
* Define your blob API token.
* @defaultvalue process.env.BLOB_READ_WRITE_TOKEN
*/
token?: string;
/**
* `AbortSignal` to cancel the running request. See https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
*/
abortSignal?: AbortSignal;
}
interface CommonCreateBlobOptions extends BlobCommandOptions {
/**
* Whether the blob should be publicly accessible. The only currently allowed value is `public`.
*/
access: 'public';
/**
* Adds a random suffix to the filename.
* @defaultvalue false
*/
addRandomSuffix?: boolean;
/**
* Allow overwriting an existing blob. By default this is set to false and will throw an error if the blob already exists.
* @defaultvalue false
*/
allowOverwrite?: boolean;
/**
* Defines the content type of the blob. By default, this value is inferred from the pathname. Sent as the 'content-type' header when downloading a blob.
*/
contentType?: string;
/**
* Number in seconds to configure the edge and browser cache. The minimum is 1 minute. There's no maximum but keep in mind that browser and edge caches will do a best effort to respect this value.
* Detailed documentation can be found here: https://vercel.com/docs/storage/vercel-blob#caching
* @defaultvalue 30 * 24 * 60 * 60 (1 Month)
*/
cacheControlMaxAge?: number;
/**
* Only perform the operation if the blob's current ETag matches this value.
* Use this for optimistic concurrency control to prevent overwriting changes made by others.
* If the ETag doesn't match, a `BlobPreconditionFailedError` will be thrown.
*/
ifMatch?: string;
}
/**
* Event object passed to the onUploadProgress callback.
*/
interface UploadProgressEvent {
/**
* The number of bytes uploaded.
*/
loaded: number;
/**
* The total number of bytes to upload.
*/
total: number;
/**
* The percentage of the upload that has been completed.
*/
percentage: number;
}
/**
* Callback type for tracking upload progress.
*/
type OnUploadProgressCallback = (progressEvent: UploadProgressEvent) => void;
/**
* Interface for including upload progress tracking capabilities.
*/
interface WithUploadProgress {
/**
* Callback to track the upload progress. You will receive an object with the following properties:
* - `loaded`: The number of bytes uploaded
* - `total`: The total number of bytes to upload
* - `percentage`: The percentage of the upload that has been completed
*/
onUploadProgress?: OnUploadProgressCallback;
}
declare class BlobError extends Error {
constructor(message: string);
}
/**
* Generates a download URL for a blob.
* The download URL includes a ?download=1 parameter which causes browsers to download
* the file instead of displaying it inline.
*
* @param blobUrl - The URL of the blob to generate a download URL for
* @returns A string containing the download URL with the download parameter appended
*/
declare function getDownloadUrl(blobUrl: string): string;
/**
* Result of a successful put or copy operation.
*/
interface PutBlobResult {
/**
* The URL of the blob.
*/
url: string;
/**
* A URL that will cause browsers to download the file instead of displaying it inline.
*/
downloadUrl: string;
/**
* The pathname of the blob within the store.
*/
pathname: string;
/**
* The content-type of the blob.
*/
contentType: string;
/**
* The content disposition header value.
*/
contentDisposition: string;
/**
* The ETag of the blob. Can be used with `ifMatch` for conditional writes.
*/
etag: string;
}
/**
* Represents the body content for a put operation.
* Can be one of several supported types.
*/
type PutBody = string | Readable | Buffer | Blob | ArrayBuffer | ReadableStream | File;
/**
* Input format for a multipart upload part.
* Used internally for processing multipart uploads.
*/
interface PartInput {
/**
* The part number (1-based index).
*/
partNumber: number;
/**
* The content of the part.
*/
blob: PutBody;
}
/**
* Represents a single part of a multipart upload.
* This structure is used when completing a multipart upload to specify the
* uploaded parts and their order.
*/
interface Part {
/**
* The ETag value returned when the part was uploaded.
* This value is used to verify the integrity of the uploaded part.
*/
etag: string;
/**
* The part number of this part (1-based).
* This number is used to order the parts when completing the multipart upload.
*/
partNumber: number;
}
/**
* Options for completing a multipart upload.
* Used with the completeMultipartUpload method.
*/
interface CommonCompleteMultipartUploadOptions {
/**
* Unique upload identifier for the multipart upload, received from createMultipartUpload.
* This ID is used to identify which multipart upload is being completed.
*/
uploadId: string;
/**
* Unique key identifying the blob object, received from createMultipartUpload.
* This key is used to identify which blob object the parts belong to.
*/
key: string;
}
type CompleteMultipartUploadCommandOptions = CommonCompleteMultipartUploadOptions & CommonCreateBlobOptions;
/**
* Options for uploading a part in a multipart upload process.
* Used with the uploadPart method.
*/
interface CommonMultipartUploadOptions {
/**
* Unique upload identifier for the multipart upload, received from createMultipartUpload.
* This ID is used to associate all uploaded parts with the same multipart upload.
*/
uploadId: string;
/**
* Unique key identifying the blob object, received from createMultipartUpload.
* This key is used to identify which blob object the parts belong to.
*/
key: string;
/**
* A number identifying which part is being uploaded (1-based).
* This number is used to order the parts when completing the multipart upload.
* Parts must be uploaded with consecutive part numbers starting from 1.
*/
partNumber: number;
}
type UploadPartCommandOptions = CommonMultipartUploadOptions & CommonCreateBlobOptions;
interface CreateFolderResult {
pathname: string;
url: string;
}
/**
* Creates a folder in your store. Vercel Blob has no real concept of folders, our file browser on Vercel.com displays folders based on the presence of trailing slashes in the pathname. Unless you are building a file browser system, you probably don't need to use this method.
*
* Use the resulting `url` to delete the folder, just like you would delete a blob.
* @param pathname - Can be user1/ or user1/avatars/
* @param options - Additional options like `token`
*/
declare function createFolder(pathname: string, options?: BlobCommandOptions): Promise<CreateFolderResult>;
export { type BlobCommandOptions as B, type CommonCompleteMultipartUploadOptions as C, type OnUploadProgressCallback as O, type PutBlobResult as P, type UploadPartCommandOptions as U, type WithUploadProgress as W, type Part as a, type PutBody as b, type CommonMultipartUploadOptions as c, createFolder as d, type CommonCreateBlobOptions as e, BlobError as f, type CompleteMultipartUploadCommandOptions as g, type PartInput as h, type UploadProgressEvent as i, getDownloadUrl as j };

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet