s3-chunk-uploader
Advanced tools
Comparing version 1.0.2 to 1.1.0
import AWS from 'aws-sdk'; | ||
type ProgressEvent = { | ||
progress: number; | ||
}; | ||
type Strategy = "parallel" | "serial"; | ||
/** | ||
* Uploads a media file to S3 in chunks. | ||
* | ||
* @param {Object} params - The parameters for the upload. | ||
* @param {Blob} params.blob - The media file to upload. | ||
* @param {string} params.bucket - The S3 bucket to upload to. | ||
* @param {string} params.key - The key for the uploaded file. | ||
* @param {AWS.S3.BucketCannedACL} params.ACL - The ACL for the uploaded file. | ||
* @param {"parallel" | "serial"} [params.strategy="serial"] - The strategy to use for uploading chunks. | ||
* "serial" uploads chunks one after another, while "parallel" uploads chunks concurrently. | ||
* | ||
* @returns {Promise<string | undefined>} - The URL of the uploaded file, or undefined if the upload fails. | ||
* | ||
* @throws {Error} - Throws an error if the upload fails after the maximum number of retries. | ||
* Class to handle chunked uploads to S3. | ||
*/ | ||
@@ -22,2 +14,6 @@ declare class ChunkUploader { | ||
private MIN_CHUNK_SIZE; | ||
private totalChunks; | ||
private totalProgress; | ||
private totalFileSize; | ||
private parallelProgressTracker; | ||
constructor({ accessKeyId, secretAccessKey, region, MAX_RETRIES, MIN_CHUNK_SIZE, }: { | ||
@@ -30,5 +26,29 @@ accessKeyId: string; | ||
}); | ||
private delay; | ||
/** | ||
* Delays execution for a specified time. | ||
* @param ms - Milliseconds to delay. | ||
* @returns Promise that resolves after the delay. | ||
*/ | ||
private static delay; | ||
/** | ||
* Uploads a chunk with retry logic. | ||
* @param partParams - Parameters for the chunk upload. | ||
* @param onProgress - Callback for progress updates. | ||
* @param attempt - Current attempt number. | ||
* @param strategy - Upload strategy (serial or parallel). | ||
* @param chunkId - ID of the chunk. | ||
* @returns Promise resolving to the uploaded part information. | ||
*/ | ||
private uploadChunkWithRetry; | ||
uploadMediaInChunks({ blob, bucket, key, ACL, strategy, }: { | ||
/** | ||
* Uploads a media file to S3 in chunks. | ||
* @param blob - The media file to upload. | ||
* @param bucket - The S3 bucket to upload to. | ||
* @param key - The key for the uploaded file. | ||
* @param ACL - The ACL for the uploaded file. | ||
* @param strategy - The strategy to use for uploading chunks. | ||
* @param onProgress - Callback for progress updates. | ||
* @returns Promise resolving to the URL of the uploaded file. | ||
*/ | ||
uploadMediaInChunks({ blob, bucket, key, ACL, strategy, onProgress, }: { | ||
blob: Blob; | ||
@@ -38,3 +58,4 @@ bucket: string; | ||
ACL: AWS.S3.BucketCannedACL; | ||
strategy: "parallel" | "serial"; | ||
strategy?: Strategy; | ||
onProgress?: (event: ProgressEvent) => void; | ||
}): Promise<string | undefined>; | ||
@@ -41,0 +62,0 @@ } |
@@ -39,6 +39,10 @@ "use strict"; | ||
var import_aws_sdk = __toESM(require("aws-sdk")); | ||
var ChunkUploader = class { | ||
var ChunkUploader = class _ChunkUploader { | ||
s3; | ||
MAX_RETRIES; | ||
MIN_CHUNK_SIZE; | ||
totalChunks = 0; | ||
totalProgress = 0; | ||
totalFileSize = 0; | ||
parallelProgressTracker = {}; | ||
constructor({ | ||
@@ -60,13 +64,85 @@ accessKeyId, | ||
} | ||
async delay(ms) { | ||
return new Promise((resolve) => setTimeout(resolve, ms)); | ||
/** | ||
* Delays execution for a specified time. | ||
* @param ms - Milliseconds to delay. | ||
* @returns Promise that resolves after the delay. | ||
*/ | ||
static async delay(ms) { | ||
await new Promise((resolve) => { | ||
setTimeout(() => { | ||
resolve(true); | ||
}, ms); | ||
}); | ||
return true; | ||
} | ||
async uploadChunkWithRetry(partParams, attempt = 1) { | ||
/** | ||
* Uploads a chunk with retry logic. | ||
* @param partParams - Parameters for the chunk upload. | ||
* @param onProgress - Callback for progress updates. | ||
* @param attempt - Current attempt number. | ||
* @param strategy - Upload strategy (serial or parallel). | ||
* @param chunkId - ID of the chunk. | ||
* @returns Promise resolving to the uploaded part information. | ||
*/ | ||
async uploadChunkWithRetry({ | ||
partParams, | ||
onProgress, | ||
attempt = 1, | ||
strategy = "serial", | ||
chunkId | ||
}) { | ||
try { | ||
const part = await this.s3.uploadPart(partParams).promise(); | ||
const handleParallelProgress = ({ loaded }) => { | ||
this.parallelProgressTracker[chunkId] = loaded; | ||
const totalChunksizeUploaded = Object.values( | ||
this.parallelProgressTracker | ||
).reduce((acc, curr) => acc + curr, 0); | ||
const progress = Math.round( | ||
totalChunksizeUploaded / this.totalFileSize * 100 | ||
); | ||
if (onProgress && typeof onProgress === "function") { | ||
let totalProgress = progress; | ||
if (totalProgress > 99) { | ||
totalProgress = 99; | ||
} | ||
onProgress({ progress: totalProgress }); | ||
} | ||
}; | ||
let localProgress = 0; | ||
const handleSerialProgress = ({ | ||
loaded, | ||
total | ||
}) => { | ||
const progress = loaded / total * 100; | ||
const progressOutOfTotalChunks = Math.round( | ||
progress / this.totalChunks | ||
); | ||
localProgress = progressOutOfTotalChunks; | ||
if (onProgress && typeof onProgress === "function") { | ||
let totalProgress = this.totalProgress + progressOutOfTotalChunks; | ||
if (totalProgress > 99) { | ||
totalProgress = 99; | ||
} | ||
onProgress({ progress: totalProgress }); | ||
} | ||
}; | ||
const part = await this.s3.uploadPart(partParams).on("httpUploadProgress", ({ loaded, total }) => { | ||
if (strategy === "serial") { | ||
handleSerialProgress({ loaded, total }); | ||
return; | ||
} | ||
handleParallelProgress({ loaded }); | ||
}).promise(); | ||
this.totalProgress += localProgress; | ||
return { ETag: part.ETag, PartNumber: partParams.PartNumber }; | ||
} catch (error) { | ||
if (attempt < this.MAX_RETRIES) { | ||
await this.delay(1e3); | ||
return this.uploadChunkWithRetry(partParams, attempt + 1); | ||
await _ChunkUploader.delay(1e3); | ||
return this.uploadChunkWithRetry({ | ||
partParams, | ||
onProgress, | ||
attempt: attempt + 1, | ||
strategy, | ||
chunkId | ||
}); | ||
} | ||
@@ -76,2 +152,12 @@ throw error; | ||
} | ||
/** | ||
* Uploads a media file to S3 in chunks. | ||
* @param blob - The media file to upload. | ||
* @param bucket - The S3 bucket to upload to. | ||
* @param key - The key for the uploaded file. | ||
* @param ACL - The ACL for the uploaded file. | ||
* @param strategy - The strategy to use for uploading chunks. | ||
* @param onProgress - Callback for progress updates. | ||
* @returns Promise resolving to the URL of the uploaded file. | ||
*/ | ||
async uploadMediaInChunks({ | ||
@@ -82,4 +168,7 @@ blob, | ||
ACL, | ||
strategy = "serial" | ||
strategy = "serial", | ||
onProgress | ||
}) { | ||
this.totalProgress = 0; | ||
this.totalFileSize = blob.size; | ||
const chunks = []; | ||
@@ -96,2 +185,3 @@ let offset = 0; | ||
} | ||
this.totalChunks = chunks.length; | ||
const multipartParams = { | ||
@@ -116,3 +206,9 @@ Bucket: bucket, | ||
}; | ||
const part = await this.uploadChunkWithRetry(partParams); | ||
const part = await this.uploadChunkWithRetry({ | ||
partParams, | ||
onProgress, | ||
strategy, | ||
chunkId: index, | ||
attempt: 1 | ||
}); | ||
temp.push(part); | ||
@@ -129,3 +225,9 @@ } | ||
}; | ||
return this.uploadChunkWithRetry(partParams); | ||
return this.uploadChunkWithRetry({ | ||
partParams, | ||
onProgress, | ||
strategy, | ||
chunkId: index, | ||
attempt: 1 | ||
}); | ||
}); | ||
@@ -132,0 +234,0 @@ temp = await Promise.all(partsPromises); |
{ | ||
"name": "s3-chunk-uploader", | ||
"version": "1.0.2", | ||
"version": "1.1.0", | ||
"main": "dist/index.js", | ||
@@ -5,0 +5,0 @@ "types": "dist/index.d.ts", |
@@ -36,2 +36,5 @@ # S3 Chunk Uploader | ||
strategy: "parallel", | ||
onProgress: (event) => { | ||
console.log(`Upload progress: ${event.progress}%`); | ||
}, | ||
}); | ||
@@ -50,2 +53,3 @@ ``` | ||
- `strategy`: The upload strategy (e.g., `parallel`). | ||
- `onProgress`: A callback function to track the upload progress. | ||
@@ -52,0 +56,0 @@ ## License |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
19497
510
57