Socket
Socket
Sign inDemoInstall

@aws-sdk/lib-storage

Package Overview
Dependencies
Maintainers
5
Versions
401
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@aws-sdk/lib-storage - npm Package Compare versions

Comparing version 3.578.0 to 3.582.0

176

dist-cjs/index.js

@@ -178,3 +178,3 @@ "use strict";

/**
* S3 multipart upload does not allow more than 10000 parts.
* S3 multipart upload does not allow more than 10,000 parts.
*/

@@ -188,4 +188,7 @@ this.MAX_PARTS = 1e4;

this.concurrentUploaders = [];
this.abortMultipartUploadCommand = null;
this.uploadedParts = [];
this.uploadEnqueuedPartsCount = 0;
this.isMultiPart = true;
this.sent = false;
this.queueSize = options.queueSize || this.queueSize;

@@ -206,2 +209,8 @@ this.partSize = options.partSize || this.partSize;

async done() {
if (this.sent) {
throw new Error(
"@aws-sdk/lib-storage: this instance of Upload has already executed .done(). Create a new instance."
);
}
this.sent = true;
return await Promise.race([this.__doMultipartUpload(), this.__abortTimeout(this.abortController.signal)]);

@@ -281,3 +290,10 @@ }

const createCommandParams = { ...this.params, Body: void 0 };
this.createMultiPartPromise = this.client.send(new import_client_s3.CreateMultipartUploadCommand(createCommandParams));
this.createMultiPartPromise = this.client.send(new import_client_s3.CreateMultipartUploadCommand(createCommandParams)).then((createMpuResponse) => {
this.abortMultipartUploadCommand = new import_client_s3.AbortMultipartUploadCommand({
Bucket: this.params.Bucket,
Key: this.params.Key,
UploadId: createMpuResponse.UploadId
});
return createMpuResponse;
});
}

@@ -288,75 +304,33 @@ return this.createMultiPartPromise;

for await (const dataPart of dataFeeder) {
if (this.uploadedParts.length > this.MAX_PARTS) {
if (this.uploadEnqueuedPartsCount > this.MAX_PARTS) {
throw new Error(
`Exceeded ${this.MAX_PARTS} as part of the upload to ${this.params.Key} and ${this.params.Bucket}.`
`Exceeded ${this.MAX_PARTS} parts in multipart upload to Bucket: ${this.params.Bucket} Key: ${this.params.Key}.`
);
}
try {
if (this.abortController.signal.aborted) {
return;
}
if (dataPart.partNumber === 1 && dataPart.lastPart) {
return await this.__uploadUsingPut(dataPart);
}
if (!this.uploadId) {
const { UploadId } = await this.__createMultipartUpload();
this.uploadId = UploadId;
if (this.abortController.signal.aborted) {
return;
}
if (dataPart.partNumber === 1 && dataPart.lastPart) {
return await this.__uploadUsingPut(dataPart);
}
if (!this.uploadId) {
const { UploadId } = await this.__createMultipartUpload();
this.uploadId = UploadId;
if (this.abortController.signal.aborted) {
return;
}
}
const partSize = byteLength(dataPart.data) || 0;
const requestHandler = this.client.config.requestHandler;
const eventEmitter = requestHandler instanceof import_events.EventEmitter ? requestHandler : null;
let lastSeenBytes = 0;
const uploadEventListener = /* @__PURE__ */ __name((event, request) => {
const requestPartSize = Number(request.query["partNumber"]) || -1;
if (requestPartSize !== dataPart.partNumber) {
return;
}
if (event.total && partSize) {
this.bytesUploadedSoFar += event.loaded - lastSeenBytes;
lastSeenBytes = event.loaded;
}
this.__notifyProgress({
loaded: this.bytesUploadedSoFar,
total: this.totalBytes,
part: dataPart.partNumber,
Key: this.params.Key,
Bucket: this.params.Bucket
});
}, "uploadEventListener");
if (eventEmitter !== null) {
eventEmitter.on("xhr.upload.progress", uploadEventListener);
}
const partResult = await this.client.send(
new import_client_s3.UploadPartCommand({
...this.params,
UploadId: this.uploadId,
Body: dataPart.data,
PartNumber: dataPart.partNumber
})
);
if (eventEmitter !== null) {
eventEmitter.off("xhr.upload.progress", uploadEventListener);
}
if (this.abortController.signal.aborted) {
}
const partSize = byteLength(dataPart.data) || 0;
const requestHandler = this.client.config.requestHandler;
const eventEmitter = requestHandler instanceof import_events.EventEmitter ? requestHandler : null;
let lastSeenBytes = 0;
const uploadEventListener = /* @__PURE__ */ __name((event, request) => {
const requestPartSize = Number(request.query["partNumber"]) || -1;
if (requestPartSize !== dataPart.partNumber) {
return;
}
if (!partResult.ETag) {
throw new Error(
`Part ${dataPart.partNumber} is missing ETag in UploadPart response. Missing Bucket CORS configuration for ETag header?`
);
if (event.total && partSize) {
this.bytesUploadedSoFar += event.loaded - lastSeenBytes;
lastSeenBytes = event.loaded;
}
this.uploadedParts.push({
PartNumber: dataPart.partNumber,
ETag: partResult.ETag,
...partResult.ChecksumCRC32 && { ChecksumCRC32: partResult.ChecksumCRC32 },
...partResult.ChecksumCRC32C && { ChecksumCRC32C: partResult.ChecksumCRC32C },
...partResult.ChecksumSHA1 && { ChecksumSHA1: partResult.ChecksumSHA1 },
...partResult.ChecksumSHA256 && { ChecksumSHA256: partResult.ChecksumSHA256 }
});
if (eventEmitter === null) {
this.bytesUploadedSoFar += partSize;
}
this.__notifyProgress({

@@ -369,10 +343,44 @@ loaded: this.bytesUploadedSoFar,

});
} catch (e) {
if (!this.uploadId) {
throw e;
}
if (this.leavePartsOnError) {
throw e;
}
}, "uploadEventListener");
if (eventEmitter !== null) {
eventEmitter.on("xhr.upload.progress", uploadEventListener);
}
this.uploadEnqueuedPartsCount += 1;
const partResult = await this.client.send(
new import_client_s3.UploadPartCommand({
...this.params,
UploadId: this.uploadId,
Body: dataPart.data,
PartNumber: dataPart.partNumber
})
);
if (eventEmitter !== null) {
eventEmitter.off("xhr.upload.progress", uploadEventListener);
}
if (this.abortController.signal.aborted) {
return;
}
if (!partResult.ETag) {
throw new Error(
`Part ${dataPart.partNumber} is missing ETag in UploadPart response. Missing Bucket CORS configuration for ETag header?`
);
}
this.uploadedParts.push({
PartNumber: dataPart.partNumber,
ETag: partResult.ETag,
...partResult.ChecksumCRC32 && { ChecksumCRC32: partResult.ChecksumCRC32 },
...partResult.ChecksumCRC32C && { ChecksumCRC32C: partResult.ChecksumCRC32C },
...partResult.ChecksumSHA1 && { ChecksumSHA1: partResult.ChecksumSHA1 },
...partResult.ChecksumSHA256 && { ChecksumSHA256: partResult.ChecksumSHA256 }
});
if (eventEmitter === null) {
this.bytesUploadedSoFar += partSize;
}
this.__notifyProgress({
loaded: this.bytesUploadedSoFar,
total: this.totalBytes,
part: dataPart.partNumber,
Key: this.params.Key,
Bucket: this.params.Bucket
});
}

@@ -382,8 +390,16 @@ }

const dataFeeder = getChunk(this.params.Body, this.partSize);
const concurrentUploaderFailures = [];
for (let index = 0; index < this.queueSize; index++) {
const currentUpload = this.__doConcurrentUpload(dataFeeder);
const currentUpload = this.__doConcurrentUpload(dataFeeder).catch((err) => {
concurrentUploaderFailures.push(err);
});
this.concurrentUploaders.push(currentUpload);
}
await Promise.all(this.concurrentUploaders);
if (concurrentUploaderFailures.length >= 1) {
await this.markUploadAsAborted();
throw concurrentUploaderFailures[0];
}
if (this.abortController.signal.aborted) {
await this.markUploadAsAborted();
throw Object.assign(new Error("Upload aborted."), { name: "AbortError" });

@@ -409,2 +425,3 @@ }

}
this.abortMultipartUploadCommand = null;
if (this.tags.length) {

@@ -422,2 +439,13 @@ await this.client.send(

}
/**
* Abort the last multipart upload in progress
* if we know the upload id, the user did not specify to leave the parts, and
* we have a prepared AbortMultipartUpload command.
*/
async markUploadAsAborted() {
if (this.uploadId && !this.leavePartsOnError && null !== this.abortMultipartUploadCommand) {
await this.client.send(this.abortMultipartUploadCommand);
this.abortMultipartUploadCommand = null;
}
}
__notifyProgress(progress) {

@@ -424,0 +452,0 @@ if (this.uploadEvent) {

@@ -1,2 +0,2 @@

import { CompleteMultipartUploadCommand, CreateMultipartUploadCommand, PutObjectCommand, PutObjectTaggingCommand, UploadPartCommand, } from "@aws-sdk/client-s3";
import { AbortMultipartUploadCommand, CompleteMultipartUploadCommand, CreateMultipartUploadCommand, PutObjectCommand, PutObjectTaggingCommand, UploadPartCommand, } from "@aws-sdk/client-s3";
import { AbortController } from "@smithy/abort-controller";

@@ -18,4 +18,7 @@ import { getEndpointFromInstructions, toEndpointV1, } from "@smithy/middleware-endpoint";

this.concurrentUploaders = [];
this.abortMultipartUploadCommand = null;
this.uploadedParts = [];
this.uploadEnqueuedPartsCount = 0;
this.isMultiPart = true;
this.sent = false;
this.queueSize = options.queueSize || this.queueSize;

@@ -36,2 +39,6 @@ this.partSize = options.partSize || this.partSize;

async done() {
if (this.sent) {
throw new Error("@aws-sdk/lib-storage: this instance of Upload has already executed .done(). Create a new instance.");
}
this.sent = true;
return await Promise.race([this.__doMultipartUpload(), this.__abortTimeout(this.abortController.signal)]);

@@ -111,3 +118,12 @@ }

const createCommandParams = { ...this.params, Body: undefined };
this.createMultiPartPromise = this.client.send(new CreateMultipartUploadCommand(createCommandParams));
this.createMultiPartPromise = this.client
.send(new CreateMultipartUploadCommand(createCommandParams))
.then((createMpuResponse) => {
this.abortMultipartUploadCommand = new AbortMultipartUploadCommand({
Bucket: this.params.Bucket,
Key: this.params.Key,
UploadId: createMpuResponse.UploadId,
});
return createMpuResponse;
});
}

@@ -118,69 +134,31 @@ return this.createMultiPartPromise;

for await (const dataPart of dataFeeder) {
if (this.uploadedParts.length > this.MAX_PARTS) {
throw new Error(`Exceeded ${this.MAX_PARTS} as part of the upload to ${this.params.Key} and ${this.params.Bucket}.`);
if (this.uploadEnqueuedPartsCount > this.MAX_PARTS) {
throw new Error(`Exceeded ${this.MAX_PARTS} parts in multipart upload to Bucket: ${this.params.Bucket} Key: ${this.params.Key}.`);
}
try {
if (this.abortController.signal.aborted) {
return;
}
if (dataPart.partNumber === 1 && dataPart.lastPart) {
return await this.__uploadUsingPut(dataPart);
}
if (!this.uploadId) {
const { UploadId } = await this.__createMultipartUpload();
this.uploadId = UploadId;
if (this.abortController.signal.aborted) {
return;
}
if (dataPart.partNumber === 1 && dataPart.lastPart) {
return await this.__uploadUsingPut(dataPart);
}
if (!this.uploadId) {
const { UploadId } = await this.__createMultipartUpload();
this.uploadId = UploadId;
if (this.abortController.signal.aborted) {
return;
}
}
const partSize = byteLength(dataPart.data) || 0;
const requestHandler = this.client.config.requestHandler;
const eventEmitter = requestHandler instanceof EventEmitter ? requestHandler : null;
let lastSeenBytes = 0;
const uploadEventListener = (event, request) => {
const requestPartSize = Number(request.query["partNumber"]) || -1;
if (requestPartSize !== dataPart.partNumber) {
return;
}
if (event.total && partSize) {
this.bytesUploadedSoFar += event.loaded - lastSeenBytes;
lastSeenBytes = event.loaded;
}
this.__notifyProgress({
loaded: this.bytesUploadedSoFar,
total: this.totalBytes,
part: dataPart.partNumber,
Key: this.params.Key,
Bucket: this.params.Bucket,
});
};
if (eventEmitter !== null) {
eventEmitter.on("xhr.upload.progress", uploadEventListener);
}
const partResult = await this.client.send(new UploadPartCommand({
...this.params,
UploadId: this.uploadId,
Body: dataPart.data,
PartNumber: dataPart.partNumber,
}));
if (eventEmitter !== null) {
eventEmitter.off("xhr.upload.progress", uploadEventListener);
}
if (this.abortController.signal.aborted) {
}
const partSize = byteLength(dataPart.data) || 0;
const requestHandler = this.client.config.requestHandler;
const eventEmitter = requestHandler instanceof EventEmitter ? requestHandler : null;
let lastSeenBytes = 0;
const uploadEventListener = (event, request) => {
const requestPartSize = Number(request.query["partNumber"]) || -1;
if (requestPartSize !== dataPart.partNumber) {
return;
}
if (!partResult.ETag) {
throw new Error(`Part ${dataPart.partNumber} is missing ETag in UploadPart response. Missing Bucket CORS configuration for ETag header?`);
if (event.total && partSize) {
this.bytesUploadedSoFar += event.loaded - lastSeenBytes;
lastSeenBytes = event.loaded;
}
this.uploadedParts.push({
PartNumber: dataPart.partNumber,
ETag: partResult.ETag,
...(partResult.ChecksumCRC32 && { ChecksumCRC32: partResult.ChecksumCRC32 }),
...(partResult.ChecksumCRC32C && { ChecksumCRC32C: partResult.ChecksumCRC32C }),
...(partResult.ChecksumSHA1 && { ChecksumSHA1: partResult.ChecksumSHA1 }),
...(partResult.ChecksumSHA256 && { ChecksumSHA256: partResult.ChecksumSHA256 }),
});
if (eventEmitter === null) {
this.bytesUploadedSoFar += partSize;
}
this.__notifyProgress({

@@ -193,11 +171,40 @@ loaded: this.bytesUploadedSoFar,

});
};
if (eventEmitter !== null) {
eventEmitter.on("xhr.upload.progress", uploadEventListener);
}
catch (e) {
if (!this.uploadId) {
throw e;
}
if (this.leavePartsOnError) {
throw e;
}
this.uploadEnqueuedPartsCount += 1;
const partResult = await this.client.send(new UploadPartCommand({
...this.params,
UploadId: this.uploadId,
Body: dataPart.data,
PartNumber: dataPart.partNumber,
}));
if (eventEmitter !== null) {
eventEmitter.off("xhr.upload.progress", uploadEventListener);
}
if (this.abortController.signal.aborted) {
return;
}
if (!partResult.ETag) {
throw new Error(`Part ${dataPart.partNumber} is missing ETag in UploadPart response. Missing Bucket CORS configuration for ETag header?`);
}
this.uploadedParts.push({
PartNumber: dataPart.partNumber,
ETag: partResult.ETag,
...(partResult.ChecksumCRC32 && { ChecksumCRC32: partResult.ChecksumCRC32 }),
...(partResult.ChecksumCRC32C && { ChecksumCRC32C: partResult.ChecksumCRC32C }),
...(partResult.ChecksumSHA1 && { ChecksumSHA1: partResult.ChecksumSHA1 }),
...(partResult.ChecksumSHA256 && { ChecksumSHA256: partResult.ChecksumSHA256 }),
});
if (eventEmitter === null) {
this.bytesUploadedSoFar += partSize;
}
this.__notifyProgress({
loaded: this.bytesUploadedSoFar,
total: this.totalBytes,
part: dataPart.partNumber,
Key: this.params.Key,
Bucket: this.params.Bucket,
});
}

@@ -207,8 +214,16 @@ }

const dataFeeder = getChunk(this.params.Body, this.partSize);
const concurrentUploaderFailures = [];
for (let index = 0; index < this.queueSize; index++) {
const currentUpload = this.__doConcurrentUpload(dataFeeder);
const currentUpload = this.__doConcurrentUpload(dataFeeder).catch((err) => {
concurrentUploaderFailures.push(err);
});
this.concurrentUploaders.push(currentUpload);
}
await Promise.all(this.concurrentUploaders);
if (concurrentUploaderFailures.length >= 1) {
await this.markUploadAsAborted();
throw concurrentUploaderFailures[0];
}
if (this.abortController.signal.aborted) {
await this.markUploadAsAborted();
throw Object.assign(new Error("Upload aborted."), { name: "AbortError" });

@@ -235,2 +250,3 @@ }

}
this.abortMultipartUploadCommand = null;
if (this.tags.length) {

@@ -246,2 +262,8 @@ await this.client.send(new PutObjectTaggingCommand({

}
async markUploadAsAborted() {
if (this.uploadId && !this.leavePartsOnError && null !== this.abortMultipartUploadCommand) {
await this.client.send(this.abortMultipartUploadCommand);
this.abortMultipartUploadCommand = null;
}
}
__notifyProgress(progress) {

@@ -248,0 +270,0 @@ if (this.uploadEvent) {

@@ -11,8 +11,8 @@ import { CompleteMultipartUploadCommandOutput } from "@aws-sdk/client-s3";

private MAX_PARTS;
private queueSize;
private partSize;
private leavePartsOnError;
private tags;
private client;
private params;
private readonly queueSize;
private readonly partSize;
private readonly leavePartsOnError;
private readonly tags;
private readonly client;
private readonly params;
private totalBytes?;

@@ -23,7 +23,10 @@ private bytesUploadedSoFar;

private createMultiPartPromise?;
private abortMultipartUploadCommand;
private uploadedParts;
private uploadId?;
private uploadEnqueuedPartsCount;
uploadId?: string;
uploadEvent?: string;
private isMultiPart;
private singleUploadResult?;
private sent;
constructor(options: Options);

@@ -37,2 +40,3 @@ abort(): Promise<void>;

private __doMultipartUpload;
private markUploadAsAborted;
private __notifyProgress;

@@ -39,0 +43,0 @@ private __abortTimeout;

@@ -12,11 +12,11 @@ /// <reference types="node" />

/**
* S3 multipart upload does not allow more than 10000 parts.
* S3 multipart upload does not allow more than 10,000 parts.
*/
private MAX_PARTS;
private queueSize;
private partSize;
private leavePartsOnError;
private tags;
private client;
private params;
private readonly queueSize;
private readonly partSize;
private readonly leavePartsOnError;
private readonly tags;
private readonly client;
private readonly params;
private totalBytes?;

@@ -27,7 +27,13 @@ private bytesUploadedSoFar;

private createMultiPartPromise?;
private abortMultipartUploadCommand;
private uploadedParts;
private uploadId?;
private uploadEnqueuedPartsCount;
/**
* Last UploadId if the upload was done with MultipartUpload and not PutObject.
*/
uploadId?: string;
uploadEvent?: string;
private isMultiPart;
private singleUploadResult?;
private sent;
constructor(options: Options);

@@ -41,2 +47,8 @@ abort(): Promise<void>;

private __doMultipartUpload;
/**
* Abort the last multipart upload in progress
* if we know the upload id, the user did not specify to leave the parts, and
* we have a prepared AbortMultipartUpload command.
*/
private markUploadAsAborted;
private __notifyProgress;

@@ -43,0 +55,0 @@ private __abortTimeout;

{
"name": "@aws-sdk/lib-storage",
"version": "3.578.0",
"version": "3.582.0",
"description": "Storage higher order operation",

@@ -31,3 +31,3 @@ "main": "./dist-cjs/index.js",

"@smithy/middleware-endpoint": "^3.0.0",
"@smithy/smithy-client": "^3.0.0",
"@smithy/smithy-client": "^3.0.1",
"buffer": "5.6.0",

@@ -39,6 +39,6 @@ "events": "3.3.0",

"peerDependencies": {
"@aws-sdk/client-s3": "^3.577.0"
"@aws-sdk/client-s3": "^3.582.0"
},
"devDependencies": {
"@aws-sdk/client-s3": "3.577.0",
"@aws-sdk/client-s3": "3.582.0",
"@smithy/types": "^3.0.0",

@@ -45,0 +45,0 @@ "@tsconfig/recommended": "1.0.1",

@@ -19,8 +19,19 @@ # @aws-sdk/lib-storage

// optional tags
tags: [
/*...*/
], // optional tags
queueSize: 4, // optional concurrency configuration
partSize: 1024 * 1024 * 5, // optional size of each part, in bytes, at least 5MB
leavePartsOnError: false, // optional manually handle dropped parts
],
// additional optional fields show default values below:
// (optional) concurrency configuration
queueSize: 4,
// (optional) size of each part, in bytes, at least 5MB
partSize: 1024 * 1024 * 5,
// (optional) when true, do not automatically call AbortMultipartUpload when
// a multipart upload fails to complete. You should then manually handle
// the leftover parts.
leavePartsOnError: false,
});

@@ -27,0 +38,0 @@

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc