@ethersphere/bee-js
Advanced tools
Comparing version 6.8.1 to 6.9.0
@@ -25,3 +25,3 @@ "use strict"; | ||
'content-type': 'application/octet-stream', | ||
...(0, headers_1.extractUploadHeaders)(postageBatchId, options), | ||
...(0, headers_1.extractRedundantUploadHeaders)(postageBatchId, options), | ||
}, | ||
@@ -41,6 +41,7 @@ }); | ||
*/ | ||
async function download(requestOptions, hash) { | ||
async function download(requestOptions, hash, options) { | ||
const response = await (0, http_1.http)(requestOptions, { | ||
responseType: 'arraybuffer', | ||
url: `${endpoint}/${hash}`, | ||
headers: (0, headers_1.extractDownloadHeaders)(options), | ||
}); | ||
@@ -56,6 +57,7 @@ return (0, bytes_1.wrapBytesWithHelpers)(new Uint8Array(response.data)); | ||
*/ | ||
async function downloadReadable(requestOptions, hash) { | ||
async function downloadReadable(requestOptions, hash, options) { | ||
const response = await (0, http_1.http)(requestOptions, { | ||
responseType: 'stream', | ||
url: `${endpoint}/${hash}`, | ||
headers: (0, headers_1.extractDownloadHeaders)(options), | ||
}); | ||
@@ -62,0 +64,0 @@ return response.data; |
@@ -13,3 +13,3 @@ "use strict"; | ||
function extractFileUploadHeaders(postageBatchId, options) { | ||
const headers = (0, headers_1.extractUploadHeaders)(postageBatchId, options); | ||
const headers = (0, headers_1.extractRedundantUploadHeaders)(postageBatchId, options); | ||
if (options?.size) | ||
@@ -32,4 +32,5 @@ headers['content-length'] = String(options.size); | ||
if ((0, stream_1.isReadable)(data) && !options?.contentType) { | ||
if (!options) | ||
if (!options) { | ||
options = {}; | ||
} | ||
options.contentType = 'application/octet-stream'; | ||
@@ -60,3 +61,3 @@ } | ||
*/ | ||
async function downloadFile(requestOptions, hash, path = '') { | ||
async function downloadFile(requestOptions, hash, path = '', options) { | ||
const response = await (0, http_1.http)(requestOptions, { | ||
@@ -66,2 +67,3 @@ method: 'GET', | ||
url: `${bzzEndpoint}/${hash}/${path}`, | ||
headers: (0, headers_1.extractDownloadHeaders)(options), | ||
}); | ||
@@ -82,3 +84,3 @@ const file = { | ||
*/ | ||
async function downloadFileReadable(requestOptions, hash, path = '') { | ||
async function downloadFileReadable(requestOptions, hash, path = '', options) { | ||
const response = await (0, http_1.http)(requestOptions, { | ||
@@ -88,2 +90,3 @@ method: 'GET', | ||
url: `${bzzEndpoint}/${hash}/${path}`, | ||
headers: (0, headers_1.extractDownloadHeaders)(options), | ||
}); | ||
@@ -98,7 +101,9 @@ const file = { | ||
function extractCollectionUploadHeaders(postageBatchId, options) { | ||
const headers = (0, headers_1.extractUploadHeaders)(postageBatchId, options); | ||
if (options?.indexDocument) | ||
const headers = (0, headers_1.extractRedundantUploadHeaders)(postageBatchId, options); | ||
if (options?.indexDocument) { | ||
headers['swarm-index-document'] = options.indexDocument; | ||
if (options?.errorDocument) | ||
} | ||
if (options?.errorDocument) { | ||
headers['swarm-error-document'] = options.errorDocument; | ||
} | ||
return headers; | ||
@@ -105,0 +110,0 @@ } |
@@ -17,3 +17,3 @@ "use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.SIGNATURE_BYTES_LENGTH = exports.SIGNATURE_HEX_LENGTH = exports.TOPIC_HEX_LENGTH = exports.TOPIC_BYTES_LENGTH = exports.FEED_INDEX_HEX_LENGTH = exports.TAGS_LIMIT_MAX = exports.TAGS_LIMIT_MIN = exports.STAMPS_DEPTH_MAX = exports.STAMPS_DEPTH_MIN = exports.ENCRYPTED_REFERENCE_BYTES_LENGTH = exports.REFERENCE_BYTES_LENGTH = exports.ENCRYPTED_REFERENCE_HEX_LENGTH = exports.REFERENCE_HEX_LENGTH = exports.BATCH_ID_HEX_LENGTH = exports.PUBKEY_HEX_LENGTH = exports.PSS_TARGET_HEX_LENGTH_MAX = exports.ADDRESS_HEX_LENGTH = exports.CHUNK_SIZE = exports.BRANCHES = exports.SECTION_SIZE = exports.SPAN_SIZE = void 0; | ||
exports.SIGNATURE_BYTES_LENGTH = exports.SIGNATURE_HEX_LENGTH = exports.TOPIC_HEX_LENGTH = exports.TOPIC_BYTES_LENGTH = exports.RedundancyStrategy = exports.RedundancyLevel = exports.FEED_INDEX_HEX_LENGTH = exports.TAGS_LIMIT_MAX = exports.TAGS_LIMIT_MIN = exports.STAMPS_DEPTH_MAX = exports.STAMPS_DEPTH_MIN = exports.ENCRYPTED_REFERENCE_BYTES_LENGTH = exports.REFERENCE_BYTES_LENGTH = exports.ENCRYPTED_REFERENCE_HEX_LENGTH = exports.REFERENCE_HEX_LENGTH = exports.BATCH_ID_HEX_LENGTH = exports.PUBKEY_HEX_LENGTH = exports.PSS_TARGET_HEX_LENGTH_MAX = exports.ADDRESS_HEX_LENGTH = exports.CHUNK_SIZE = exports.BRANCHES = exports.SECTION_SIZE = exports.SPAN_SIZE = void 0; | ||
__exportStar(require("./debug"), exports); | ||
@@ -43,2 +43,31 @@ exports.SPAN_SIZE = 8; | ||
exports.FEED_INDEX_HEX_LENGTH = 16; | ||
/** | ||
* Add redundancy to the data being uploaded so that downloaders can download it with better UX. | ||
* 0 value is default and does not add any redundancy to the file. | ||
*/ | ||
var RedundancyLevel; | ||
(function (RedundancyLevel) { | ||
RedundancyLevel[RedundancyLevel["OFF"] = 0] = "OFF"; | ||
RedundancyLevel[RedundancyLevel["MEDIUM"] = 1] = "MEDIUM"; | ||
RedundancyLevel[RedundancyLevel["STRONG"] = 2] = "STRONG"; | ||
RedundancyLevel[RedundancyLevel["INSANE"] = 3] = "INSANE"; | ||
RedundancyLevel[RedundancyLevel["PARANOID"] = 4] = "PARANOID"; | ||
})(RedundancyLevel = exports.RedundancyLevel || (exports.RedundancyLevel = {})); | ||
/** | ||
* Specify the retrieve strategy on redundant data. | ||
* The possible values are NONE, DATA, PROX and RACE. | ||
* Strategy NONE means no prefetching takes place. | ||
* Strategy DATA means only data chunks are prefetched. | ||
* Strategy PROX means only chunks that are close to the node are prefetched. | ||
* Strategy RACE means all chunks are prefetched: n data chunks and k parity chunks. The first n chunks to arrive are used to reconstruct the file. | ||
* Multiple strategies can be used in a fallback cascade if the swarm redundancy fallback mode is set to true. | ||
* The default strategy is NONE, DATA, falling back to PROX, falling back to RACE | ||
*/ | ||
var RedundancyStrategy; | ||
(function (RedundancyStrategy) { | ||
RedundancyStrategy[RedundancyStrategy["NONE"] = 0] = "NONE"; | ||
RedundancyStrategy[RedundancyStrategy["DATA"] = 1] = "DATA"; | ||
RedundancyStrategy[RedundancyStrategy["PROX"] = 2] = "PROX"; | ||
RedundancyStrategy[RedundancyStrategy["RACE"] = 3] = "RACE"; | ||
})(RedundancyStrategy = exports.RedundancyStrategy || (exports.RedundancyStrategy = {})); | ||
/********************************************************* | ||
@@ -45,0 +74,0 @@ * Writers and Readers interfaces |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.getStampUsage = exports.getStampTtlSeconds = exports.getStampEffectiveBytes = exports.getStampMaximumCapacityBytes = exports.getStampCostInPlur = exports.getStampCostInBzz = exports.getDepthForCapacity = exports.getAmountForTtl = exports.makeMaxTarget = exports.keccak256Hash = exports.readableWebToNode = exports.readableNodeToWeb = exports.normalizeToReadableStream = exports.isReadableStream = exports.isReadable = exports.isNodeReadable = exports.toLittleEndian = exports.makeHexEthAddress = exports.makeEthereumWalletSigner = exports.makeEthAddress = exports.isHexEthAddress = exports.fromLittleEndian = exports.ethToSwarmAddress = exports.makeHexString = exports.isHexString = exports.intToHex = exports.hexToBytes = exports.bytesToHex = exports.assertPrefixedHexString = exports.assertHexString = exports.isFlexBytes = exports.isBytes = exports.flexBytesAtOffset = exports.bytesEqual = exports.bytesAtOffset = exports.assertFlexBytes = exports.assertBytes = exports.getFolderSize = exports.getCollectionSize = void 0; | ||
exports.getRedundancyStats = exports.getRedundancyStat = exports.approximateOverheadForRedundancyLevel = exports.getStampUsage = exports.getStampTtlSeconds = exports.getStampMaximumCapacityBytes = exports.getStampEffectiveBytes = exports.getStampCostInPlur = exports.getStampCostInBzz = exports.getDepthForCapacity = exports.getAmountForTtl = exports.makeMaxTarget = exports.keccak256Hash = exports.readableWebToNode = exports.readableNodeToWeb = exports.normalizeToReadableStream = exports.isReadableStream = exports.isReadable = exports.isNodeReadable = exports.toLittleEndian = exports.makeHexEthAddress = exports.makeEthereumWalletSigner = exports.makeEthAddress = exports.isHexEthAddress = exports.fromLittleEndian = exports.ethToSwarmAddress = exports.makeHexString = exports.isHexString = exports.intToHex = exports.hexToBytes = exports.bytesToHex = exports.assertPrefixedHexString = exports.assertHexString = exports.isFlexBytes = exports.isBytes = exports.flexBytesAtOffset = exports.bytesEqual = exports.bytesAtOffset = exports.assertFlexBytes = exports.assertBytes = exports.getFolderSize = exports.getCollectionSize = void 0; | ||
var collection_1 = require("./collection"); | ||
@@ -48,5 +48,9 @@ Object.defineProperty(exports, "getCollectionSize", { enumerable: true, get: function () { return collection_1.getCollectionSize; } }); | ||
Object.defineProperty(exports, "getStampCostInPlur", { enumerable: true, get: function () { return stamps_1.getStampCostInPlur; } }); | ||
Object.defineProperty(exports, "getStampEffectiveBytes", { enumerable: true, get: function () { return stamps_1.getStampEffectiveBytes; } }); | ||
Object.defineProperty(exports, "getStampMaximumCapacityBytes", { enumerable: true, get: function () { return stamps_1.getStampMaximumCapacityBytes; } }); | ||
Object.defineProperty(exports, "getStampEffectiveBytes", { enumerable: true, get: function () { return stamps_1.getStampEffectiveBytes; } }); | ||
Object.defineProperty(exports, "getStampTtlSeconds", { enumerable: true, get: function () { return stamps_1.getStampTtlSeconds; } }); | ||
Object.defineProperty(exports, "getStampUsage", { enumerable: true, get: function () { return stamps_1.getStampUsage; } }); | ||
var redundancy_1 = require("./redundancy"); | ||
Object.defineProperty(exports, "approximateOverheadForRedundancyLevel", { enumerable: true, get: function () { return redundancy_1.approximateOverheadForRedundancyLevel; } }); | ||
Object.defineProperty(exports, "getRedundancyStat", { enumerable: true, get: function () { return redundancy_1.getRedundancyStat; } }); | ||
Object.defineProperty(exports, "getRedundancyStats", { enumerable: true, get: function () { return redundancy_1.getRedundancyStats; } }); |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.extractUploadHeaders = exports.readFileHeaders = void 0; | ||
exports.extractDownloadHeaders = exports.extractRedundantUploadHeaders = exports.extractUploadHeaders = exports.readFileHeaders = void 0; | ||
const error_1 = require("./error"); | ||
@@ -49,12 +49,38 @@ /** | ||
}; | ||
if (options?.pin) | ||
if (options?.pin) { | ||
headers['swarm-pin'] = String(options.pin); | ||
if (options?.encrypt) | ||
} | ||
if (options?.encrypt) { | ||
headers['swarm-encrypt'] = String(options.encrypt); | ||
if (options?.tag) | ||
} | ||
if (options?.tag) { | ||
headers['swarm-tag'] = String(options.tag); | ||
if (typeof options?.deferred === 'boolean') | ||
} | ||
if (typeof options?.deferred === 'boolean') { | ||
headers['swarm-deferred-upload'] = options.deferred.toString(); | ||
} | ||
return headers; | ||
} | ||
exports.extractUploadHeaders = extractUploadHeaders; | ||
function extractRedundantUploadHeaders(postageBatchId, options) { | ||
const headers = extractUploadHeaders(postageBatchId, options); | ||
if (options?.redundancyLevel) { | ||
headers['swarm-redundancy-level'] = String(options.redundancyLevel); | ||
} | ||
return headers; | ||
} | ||
exports.extractRedundantUploadHeaders = extractRedundantUploadHeaders; | ||
function extractDownloadHeaders(options) { | ||
const headers = {}; | ||
if (options?.redundancyStrategy) { | ||
headers['swarm-redundancy-strategy'] = String(options.redundancyStrategy); | ||
} | ||
if (options?.fallback === false) { | ||
headers['swarm-redundancy-fallback-mode'] = 'false'; | ||
} | ||
if (options?.timeoutMs !== undefined) { | ||
headers['swarm-chunk-retrieval-timeout'] = String(options.timeoutMs); | ||
} | ||
return headers; | ||
} | ||
exports.extractDownloadHeaders = extractDownloadHeaders; |
import { wrapBytesWithHelpers } from "../utils/bytes.js"; | ||
import { extractUploadHeaders } from "../utils/headers.js"; | ||
import { extractDownloadHeaders, extractRedundantUploadHeaders } from "../utils/headers.js"; | ||
import { http } from "../utils/http.js"; | ||
@@ -22,3 +22,3 @@ import { makeTagUid } from "../utils/type.js"; | ||
'content-type': 'application/octet-stream', | ||
...extractUploadHeaders(postageBatchId, options) | ||
...extractRedundantUploadHeaders(postageBatchId, options) | ||
} | ||
@@ -37,6 +37,7 @@ }); | ||
*/ | ||
export async function download(requestOptions, hash) { | ||
export async function download(requestOptions, hash, options) { | ||
const response = await http(requestOptions, { | ||
responseType: 'arraybuffer', | ||
url: `${endpoint}/${hash}` | ||
url: `${endpoint}/${hash}`, | ||
headers: extractDownloadHeaders(options) | ||
}); | ||
@@ -51,8 +52,9 @@ return wrapBytesWithHelpers(new Uint8Array(response.data)); | ||
*/ | ||
export async function downloadReadable(requestOptions, hash) { | ||
export async function downloadReadable(requestOptions, hash, options) { | ||
const response = await http(requestOptions, { | ||
responseType: 'stream', | ||
url: `${endpoint}/${hash}` | ||
url: `${endpoint}/${hash}`, | ||
headers: extractDownloadHeaders(options) | ||
}); | ||
return response.data; | ||
} |
import { wrapBytesWithHelpers } from "../utils/bytes.js"; | ||
import { assertCollection } from "../utils/collection.js"; | ||
import { extractUploadHeaders, readFileHeaders } from "../utils/headers.js"; | ||
import { extractDownloadHeaders, extractRedundantUploadHeaders, readFileHeaders } from "../utils/headers.js"; | ||
import { http } from "../utils/http.js"; | ||
@@ -10,3 +10,3 @@ import { isReadable } from "../utils/stream.js"; | ||
function extractFileUploadHeaders(postageBatchId, options) { | ||
const headers = extractUploadHeaders(postageBatchId, options); | ||
const headers = extractRedundantUploadHeaders(postageBatchId, options); | ||
if (options?.size) headers['content-length'] = String(options.size); | ||
@@ -27,3 +27,5 @@ if (options?.contentType) headers['content-type'] = options.contentType; | ||
if (isReadable(data) && !options?.contentType) { | ||
if (!options) options = {}; | ||
if (!options) { | ||
options = {}; | ||
} | ||
options.contentType = 'application/octet-stream'; | ||
@@ -55,7 +57,8 @@ } | ||
*/ | ||
export async function downloadFile(requestOptions, hash, path = '') { | ||
export async function downloadFile(requestOptions, hash, path = '', options) { | ||
const response = await http(requestOptions, { | ||
method: 'GET', | ||
responseType: 'arraybuffer', | ||
url: `${bzzEndpoint}/${hash}/${path}` | ||
url: `${bzzEndpoint}/${hash}/${path}`, | ||
headers: extractDownloadHeaders(options) | ||
}); | ||
@@ -75,7 +78,8 @@ const file = { | ||
*/ | ||
export async function downloadFileReadable(requestOptions, hash, path = '') { | ||
export async function downloadFileReadable(requestOptions, hash, path = '', options) { | ||
const response = await http(requestOptions, { | ||
method: 'GET', | ||
responseType: 'stream', | ||
url: `${bzzEndpoint}/${hash}/${path}` | ||
url: `${bzzEndpoint}/${hash}/${path}`, | ||
headers: extractDownloadHeaders(options) | ||
}); | ||
@@ -89,5 +93,9 @@ const file = { | ||
function extractCollectionUploadHeaders(postageBatchId, options) { | ||
const headers = extractUploadHeaders(postageBatchId, options); | ||
if (options?.indexDocument) headers['swarm-index-document'] = options.indexDocument; | ||
if (options?.errorDocument) headers['swarm-error-document'] = options.errorDocument; | ||
const headers = extractRedundantUploadHeaders(postageBatchId, options); | ||
if (options?.indexDocument) { | ||
headers['swarm-index-document'] = options.indexDocument; | ||
} | ||
if (options?.errorDocument) { | ||
headers['swarm-error-document'] = options.errorDocument; | ||
} | ||
return headers; | ||
@@ -94,0 +102,0 @@ } |
@@ -25,2 +25,31 @@ export * from "./debug.js"; | ||
export const FEED_INDEX_HEX_LENGTH = 16; | ||
/** | ||
* Add redundancy to the data being uploaded so that downloaders can download it with better UX. | ||
* 0 value is default and does not add any redundancy to the file. | ||
*/ | ||
export var RedundancyLevel; | ||
(function (RedundancyLevel) { | ||
RedundancyLevel[RedundancyLevel["OFF"] = 0] = "OFF"; | ||
RedundancyLevel[RedundancyLevel["MEDIUM"] = 1] = "MEDIUM"; | ||
RedundancyLevel[RedundancyLevel["STRONG"] = 2] = "STRONG"; | ||
RedundancyLevel[RedundancyLevel["INSANE"] = 3] = "INSANE"; | ||
RedundancyLevel[RedundancyLevel["PARANOID"] = 4] = "PARANOID"; | ||
})(RedundancyLevel || (RedundancyLevel = {})); | ||
/** | ||
* Specify the retrieve strategy on redundant data. | ||
* The possible values are NONE, DATA, PROX and RACE. | ||
* Strategy NONE means no prefetching takes place. | ||
* Strategy DATA means only data chunks are prefetched. | ||
* Strategy PROX means only chunks that are close to the node are prefetched. | ||
* Strategy RACE means all chunks are prefetched: n data chunks and k parity chunks. The first n chunks to arrive are used to reconstruct the file. | ||
* Multiple strategies can be used in a fallback cascade if the swarm redundancy fallback mode is set to true. | ||
* The default strategy is NONE, DATA, falling back to PROX, falling back to RACE | ||
*/ | ||
export var RedundancyStrategy; | ||
(function (RedundancyStrategy) { | ||
RedundancyStrategy[RedundancyStrategy["NONE"] = 0] = "NONE"; | ||
RedundancyStrategy[RedundancyStrategy["DATA"] = 1] = "DATA"; | ||
RedundancyStrategy[RedundancyStrategy["PROX"] = 2] = "PROX"; | ||
RedundancyStrategy[RedundancyStrategy["RACE"] = 3] = "RACE"; | ||
})(RedundancyStrategy || (RedundancyStrategy = {})); | ||
/********************************************************* | ||
@@ -27,0 +56,0 @@ * Writers and Readers interfaces |
@@ -9,2 +9,3 @@ export { getCollectionSize } from "./collection.js"; | ||
export { makeMaxTarget } from "./pss.js"; | ||
export { getAmountForTtl, getDepthForCapacity, getStampCostInBzz, getStampCostInPlur, getStampMaximumCapacityBytes, getStampEffectiveBytes, getStampTtlSeconds, getStampUsage } from "./stamps.js"; | ||
export { getAmountForTtl, getDepthForCapacity, getStampCostInBzz, getStampCostInPlur, getStampEffectiveBytes, getStampMaximumCapacityBytes, getStampTtlSeconds, getStampUsage } from "./stamps.js"; | ||
export { approximateOverheadForRedundancyLevel, getRedundancyStat, getRedundancyStats } from "./redundancy.js"; |
@@ -45,7 +45,35 @@ import { BeeError } from "./error.js"; | ||
}; | ||
if (options?.pin) headers['swarm-pin'] = String(options.pin); | ||
if (options?.encrypt) headers['swarm-encrypt'] = String(options.encrypt); | ||
if (options?.tag) headers['swarm-tag'] = String(options.tag); | ||
if (typeof options?.deferred === 'boolean') headers['swarm-deferred-upload'] = options.deferred.toString(); | ||
if (options?.pin) { | ||
headers['swarm-pin'] = String(options.pin); | ||
} | ||
if (options?.encrypt) { | ||
headers['swarm-encrypt'] = String(options.encrypt); | ||
} | ||
if (options?.tag) { | ||
headers['swarm-tag'] = String(options.tag); | ||
} | ||
if (typeof options?.deferred === 'boolean') { | ||
headers['swarm-deferred-upload'] = options.deferred.toString(); | ||
} | ||
return headers; | ||
} | ||
export function extractRedundantUploadHeaders(postageBatchId, options) { | ||
const headers = extractUploadHeaders(postageBatchId, options); | ||
if (options?.redundancyLevel) { | ||
headers['swarm-redundancy-level'] = String(options.redundancyLevel); | ||
} | ||
return headers; | ||
} | ||
export function extractDownloadHeaders(options) { | ||
const headers = {}; | ||
if (options?.redundancyStrategy) { | ||
headers['swarm-redundancy-strategy'] = String(options.redundancyStrategy); | ||
} | ||
if (options?.fallback === false) { | ||
headers['swarm-redundancy-fallback-mode'] = 'false'; | ||
} | ||
if (options?.timeoutMs !== undefined) { | ||
headers['swarm-chunk-retrieval-timeout'] = String(options.timeoutMs); | ||
} | ||
return headers; | ||
} |
import { Index, IndexBytes } from './feed'; | ||
import { FeedType } from './feed/type'; | ||
import type { AddressPrefix, AnyJson, BatchId, BeeOptions, BeeRequestOptions, CollectionUploadOptions, Data, FeedReader, FeedWriter, FileData, FileUploadOptions, JsonFeedOptions, Pin, PssMessageHandler, PssSubscription, PublicKey, Reference, SOCReader, SOCWriter, Signer, Tag, Topic, UploadOptions, UploadResultWithCid } from './types'; | ||
import type { AddressPrefix, AnyJson, BatchId, BeeOptions, BeeRequestOptions, CollectionUploadOptions, Data, FeedReader, FeedWriter, FileData, FileUploadOptions, JsonFeedOptions, Pin, PssMessageHandler, PssSubscription, PublicKey, Reference, SOCReader, SOCWriter, Signer, Tag, Topic, UploadOptions, UploadRedundancyOptions, UploadResultWithCid } from './types'; | ||
import { AllTagsOptions, Collection, FeedManifestResult, Readable, ReferenceCidOrEns, ReferenceOrEns, UploadResult } from './types'; | ||
@@ -42,3 +42,3 @@ import { EthAddress } from './utils/eth'; | ||
*/ | ||
uploadData(postageBatchId: string | BatchId, data: string | Uint8Array, options?: UploadOptions, requestOptions?: BeeRequestOptions): Promise<UploadResult>; | ||
uploadData(postageBatchId: string | BatchId, data: string | Uint8Array, options?: UploadOptions & UploadRedundancyOptions, requestOptions?: BeeRequestOptions): Promise<UploadResult>; | ||
/** | ||
@@ -105,3 +105,3 @@ * Download data as a byte array | ||
*/ | ||
uploadFile(postageBatchId: string | BatchId, data: string | Uint8Array | Readable | File, name?: string, options?: FileUploadOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>; | ||
uploadFile(postageBatchId: string | BatchId, data: string | Uint8Array | Readable | File, name?: string, options?: FileUploadOptions & UploadRedundancyOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>; | ||
/** | ||
@@ -149,3 +149,3 @@ * Download single file. | ||
*/ | ||
uploadFiles(postageBatchId: string | BatchId, fileList: FileList | File[], options?: CollectionUploadOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>; | ||
uploadFiles(postageBatchId: string | BatchId, fileList: FileList | File[], options?: CollectionUploadOptions & UploadRedundancyOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>; | ||
/** | ||
@@ -161,3 +161,3 @@ * Upload Collection that you can assembly yourself. | ||
*/ | ||
uploadCollection(postageBatchId: string | BatchId, collection: Collection<Uint8Array | Readable>, options?: CollectionUploadOptions): Promise<UploadResultWithCid>; | ||
uploadCollection(postageBatchId: string | BatchId, collection: Collection<Uint8Array | Readable>, options?: CollectionUploadOptions & UploadRedundancyOptions): Promise<UploadResultWithCid>; | ||
/** | ||
@@ -179,3 +179,3 @@ * Upload collection of files. | ||
*/ | ||
uploadFilesFromDirectory(postageBatchId: string | BatchId, dir: string, options?: CollectionUploadOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>; | ||
uploadFilesFromDirectory(postageBatchId: string | BatchId, dir: string, options?: CollectionUploadOptions & UploadRedundancyOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>; | ||
/** | ||
@@ -182,0 +182,0 @@ * Create a new Tag which is meant for tracking progres of syncing data across network. |
@@ -1,2 +0,2 @@ | ||
import type { BatchId, BeeRequestOptions, Data, ReferenceOrEns, UploadOptions } from '../types'; | ||
import type { BatchId, BeeRequestOptions, Data, DownloadRedundancyOptions, ReferenceOrEns, UploadOptions, UploadRedundancyOptions } from '../types'; | ||
import { UploadResult } from '../types'; | ||
@@ -11,3 +11,3 @@ /** | ||
*/ | ||
export declare function upload(requestOptions: BeeRequestOptions, data: string | Uint8Array, postageBatchId: BatchId, options?: UploadOptions): Promise<UploadResult>; | ||
export declare function upload(requestOptions: BeeRequestOptions, data: string | Uint8Array, postageBatchId: BatchId, options?: UploadOptions & UploadRedundancyOptions): Promise<UploadResult>; | ||
/** | ||
@@ -19,3 +19,3 @@ * Download data as a byte array | ||
*/ | ||
export declare function download(requestOptions: BeeRequestOptions, hash: ReferenceOrEns): Promise<Data>; | ||
export declare function download(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, options?: DownloadRedundancyOptions): Promise<Data>; | ||
/** | ||
@@ -27,2 +27,2 @@ * Download data as a readable stream | ||
*/ | ||
export declare function downloadReadable(requestOptions: BeeRequestOptions, hash: ReferenceOrEns): Promise<ReadableStream<Uint8Array>>; | ||
export declare function downloadReadable(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, options?: DownloadRedundancyOptions): Promise<ReadableStream<Uint8Array>>; |
@@ -1,2 +0,2 @@ | ||
import { BatchId, BeeRequestOptions, Collection, CollectionUploadOptions, Data, FileData, FileUploadOptions, Readable, ReferenceOrEns, UploadResult } from '../types'; | ||
import { BatchId, BeeRequestOptions, Collection, CollectionUploadOptions, Data, DownloadRedundancyOptions, FileData, FileUploadOptions, Readable, ReferenceOrEns, UploadRedundancyOptions, UploadResult } from '../types'; | ||
/** | ||
@@ -11,3 +11,3 @@ * Upload single file | ||
*/ | ||
export declare function uploadFile(requestOptions: BeeRequestOptions, data: string | Uint8Array | Readable | ArrayBuffer, postageBatchId: BatchId, name?: string, options?: FileUploadOptions): Promise<UploadResult>; | ||
export declare function uploadFile(requestOptions: BeeRequestOptions, data: string | Uint8Array | Readable | ArrayBuffer, postageBatchId: BatchId, name?: string, options?: FileUploadOptions & UploadRedundancyOptions): Promise<UploadResult>; | ||
/** | ||
@@ -20,3 +20,3 @@ * Download single file as a buffer | ||
*/ | ||
export declare function downloadFile(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, path?: string): Promise<FileData<Data>>; | ||
export declare function downloadFile(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, path?: string, options?: DownloadRedundancyOptions): Promise<FileData<Data>>; | ||
/** | ||
@@ -29,3 +29,3 @@ * Download single file as a readable stream | ||
*/ | ||
export declare function downloadFileReadable(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, path?: string): Promise<FileData<ReadableStream<Uint8Array>>>; | ||
export declare function downloadFileReadable(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, path?: string, options?: DownloadRedundancyOptions): Promise<FileData<ReadableStream<Uint8Array>>>; | ||
/** | ||
@@ -38,2 +38,2 @@ * Upload collection | ||
*/ | ||
export declare function uploadCollection(requestOptions: BeeRequestOptions, collection: Collection<Uint8Array>, postageBatchId: BatchId, options?: CollectionUploadOptions): Promise<UploadResult>; | ||
export declare function uploadCollection(requestOptions: BeeRequestOptions, collection: Collection<Uint8Array>, postageBatchId: BatchId, options?: CollectionUploadOptions & UploadRedundancyOptions): Promise<UploadResult>; |
@@ -151,2 +151,46 @@ /// <reference types="node" /> | ||
} | ||
/** | ||
* Add redundancy to the data being uploaded so that downloaders can download it with better UX. | ||
* 0 value is default and does not add any redundancy to the file. | ||
*/ | ||
export declare enum RedundancyLevel { | ||
OFF = 0, | ||
MEDIUM = 1, | ||
STRONG = 2, | ||
INSANE = 3, | ||
PARANOID = 4 | ||
} | ||
export interface UploadRedundancyOptions { | ||
redundancyLevel?: RedundancyLevel; | ||
} | ||
/** | ||
* Specify the retrieve strategy on redundant data. | ||
* The possible values are NONE, DATA, PROX and RACE. | ||
* Strategy NONE means no prefetching takes place. | ||
* Strategy DATA means only data chunks are prefetched. | ||
* Strategy PROX means only chunks that are close to the node are prefetched. | ||
* Strategy RACE means all chunks are prefetched: n data chunks and k parity chunks. The first n chunks to arrive are used to reconstruct the file. | ||
* Multiple strategies can be used in a fallback cascade if the swarm redundancy fallback mode is set to true. | ||
* The default strategy is NONE, DATA, falling back to PROX, falling back to RACE | ||
*/ | ||
export declare enum RedundancyStrategy { | ||
NONE = 0, | ||
DATA = 1, | ||
PROX = 2, | ||
RACE = 3 | ||
} | ||
export interface DownloadRedundancyOptions { | ||
/** | ||
* Specify the retrieve strategy on redundant data. | ||
*/ | ||
redundancyStrategy?: RedundancyStrategy; | ||
/** | ||
* Specify if the retrieve strategies (chunk prefetching on redundant data) are used in a fallback cascade. The default is true. | ||
*/ | ||
fallback?: boolean; | ||
/** | ||
* Specify the timeout for chunk retrieval. The default is 30 seconds. | ||
*/ | ||
timeoutMs?: number; | ||
} | ||
export interface FileUploadOptions extends UploadOptions { | ||
@@ -153,0 +197,0 @@ /** |
@@ -9,2 +9,3 @@ export { getCollectionSize } from './collection'; | ||
export { makeMaxTarget } from './pss'; | ||
export { getAmountForTtl, getDepthForCapacity, getStampCostInBzz, getStampCostInPlur, getStampMaximumCapacityBytes, getStampEffectiveBytes, getStampTtlSeconds, getStampUsage, } from './stamps'; | ||
export { getAmountForTtl, getDepthForCapacity, getStampCostInBzz, getStampCostInPlur, getStampEffectiveBytes, getStampMaximumCapacityBytes, getStampTtlSeconds, getStampUsage, } from './stamps'; | ||
export { approximateOverheadForRedundancyLevel, getRedundancyStat, getRedundancyStats } from './redundancy'; |
@@ -1,3 +0,5 @@ | ||
import { BatchId, FileHeaders, UploadOptions } from '../types'; | ||
import { BatchId, DownloadRedundancyOptions, FileHeaders, UploadOptions, UploadRedundancyOptions } from '../types'; | ||
export declare function readFileHeaders(headers: Record<string, string>): FileHeaders; | ||
export declare function extractUploadHeaders(postageBatchId: BatchId, options?: UploadOptions): Record<string, string>; | ||
export declare function extractRedundantUploadHeaders(postageBatchId: BatchId, options?: UploadOptions & UploadRedundancyOptions): Record<string, string>; | ||
export declare function extractDownloadHeaders(options?: DownloadRedundancyOptions): Record<string, string>; |
{ | ||
"name": "@ethersphere/bee-js", | ||
"version": "6.8.1", | ||
"version": "6.9.0", | ||
"description": "Javascript client for Bee", | ||
@@ -5,0 +5,0 @@ "keywords": [ |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
2089602
191
14824