Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@cumulus/aws-client

Package Overview
Dependencies
Maintainers
12
Versions
135
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@cumulus/aws-client - npm Package Compare versions

Comparing version 9.9.0 to 10.0.0-beta.0

19

lib/S3MultipartUploads.d.ts

@@ -8,3 +8,20 @@ export interface CompleteMultipartUploadOutput extends AWS.S3.CompleteMultipartUploadOutput {

};
export declare const createMultipartChunks: (objectSize: number, maxChunkSize?: number) => Chunk[];
/**
* Each part of a multi-part copy needs to specify a byte range to be copied.
* This byte range has a starting byte and an ending byte (inclusive) that makes
* up the part. The maximum allowed chunk size is 5368709120 bytes.
*
* This function takes a file size and an optional maxSize. It returns an array
* of objects, each containing a `start` and an `end` value. These will make up
* the ranges of the multi-part copy.
*
* From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
*
* https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
*
* @param {number} objectSize - size of the object
* @param {number} chunkSize - chunk size of the S3 multipart uploads
* @returns {Promise<Array<Chunk>>} - array of chunks
*/
export declare const createMultipartChunks: (objectSize: number, chunkSize?: number) => Chunk[];
export declare const createMultipartUpload: (params: AWS.S3.CreateMultipartUploadRequest) => Promise<import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/s3").CreateMultipartUploadOutput, import("aws-sdk").AWSError>>;

@@ -11,0 +28,0 @@ export declare const completeMultipartUpload: (params: AWS.S3.CompleteMultipartUploadRequest) => Promise<CompleteMultipartUploadOutput>;

32

lib/S3MultipartUploads.js

@@ -11,17 +11,23 @@ "use strict";

const MB = 1024 * 1024;
// Each part of a multi-part copy needs to specify a byte range to be copied.
// This byte range has a starting byte and an ending byte (inclusive) that makes
// up the part. The maximum allowed chunk size is 5368709120 bytes.
//
// This function takes a file size and an optional maxSize. It returns an array
// of objects, each containing a `start` and an `end` value. These will make up
// the ranges of the multi-part copy.
//
// From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
const createMultipartChunks = (objectSize, maxChunkSize = 250 * MB) => (0, range_1.default)(0, objectSize, maxChunkSize)
/**
* Each part of a multi-part copy needs to specify a byte range to be copied.
* This byte range has a starting byte and an ending byte (inclusive) that makes
* up the part. The maximum allowed chunk size is 5368709120 bytes.
*
* This function takes a file size and an optional maxSize. It returns an array
* of objects, each containing a `start` and an `end` value. These will make up
* the ranges of the multi-part copy.
*
* From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
*
* https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
*
* @param {number} objectSize - size of the object
* @param {number} chunkSize - chunk size of the S3 multipart uploads
* @returns {Promise<Array<Chunk>>} - array of chunks
*/
const createMultipartChunks = (objectSize, chunkSize = 250 * MB) => (0, range_1.default)(0, objectSize, chunkSize)
.map((start) => ({
start,
end: Math.min(start + maxChunkSize, objectSize) - 1,
end: Math.min(start + chunkSize, objectSize) - 1,
}));

@@ -28,0 +34,0 @@ exports.createMultipartChunks = createMultipartChunks;

{
"name": "@cumulus/aws-client",
"version": "9.9.0",
"version": "10.0.0-beta.0",
"description": "Utilities for working with AWS",

@@ -46,5 +46,5 @@ "keywords": [

"dependencies": {
"@cumulus/checksum": "9.9.0",
"@cumulus/errors": "9.9.0",
"@cumulus/logger": "9.9.0",
"@cumulus/checksum": "10.0.0-beta.0",
"@cumulus/errors": "10.0.0-beta.0",
"@cumulus/logger": "10.0.0-beta.0",
"aws-sdk": "^2.814.0",

@@ -58,3 +58,3 @@ "jsonpath-plus": "^1.1.0",

},
"gitHead": "ac89218dfaa5ba8cc228db95321e1371d3e46e88"
"gitHead": "bc283986be627ba06a4084cabd4e01d1540d14c7"
}

@@ -285,3 +285,5 @@ /**

**/
export declare const recursivelyDeleteS3Bucket: (bucket: string) => Promise<void>;
export declare const recursivelyDeleteS3Bucket: (bucket: string) => Promise<{
$response: import("aws-sdk").Response<{}, import("aws-sdk").AWSError>;
}>;
/**

@@ -416,2 +418,3 @@ * Delete a list of buckets and all of their objects from S3

* @param {boolean} [params.copyTags=false]
* @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
* @returns {Promise.<{ etag: string }>} object containing the ETag of the

@@ -428,3 +431,3 @@ * destination object

copyTags?: boolean;
copyMetadata?: boolean;
chunkSize?: number;
}) => Promise<{

@@ -443,2 +446,3 @@ etag: string;

* @param {boolean} [params.copyTags=false]
* @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
* @returns {Promise<undefined>}

@@ -453,4 +457,5 @@ */

copyTags?: boolean;
}) => Promise<void>;
chunkSize?: number;
}) => Promise<import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/s3").DeleteObjectOutput, import("aws-sdk").AWSError>>;
export {};
//# sourceMappingURL=S3.d.ts.map

@@ -169,3 +169,3 @@ "use strict";

const { bucket, key, interval = 1000, timeout = 30 * 1000, } = params;
await (0, p_wait_for_1.default)(() => (0, exports.s3ObjectExists)({ Bucket: bucket, Key: key }), { interval, timeout });
return await (0, p_wait_for_1.default)(() => (0, exports.s3ObjectExists)({ Bucket: bucket, Key: key }), { interval, timeout });
};

@@ -427,4 +427,3 @@ exports.waitForObjectToExist = waitForObjectToExist;

try {
const r = await (0, services_1.s3)().headObject({ Key: key, Bucket: bucket }).promise();
return r;
return await (0, services_1.s3)().headObject({ Key: key, Bucket: bucket }).promise();
}

@@ -494,3 +493,3 @@ catch (error) {

await (0, exports.deleteS3Files)(s3Objects);
await (0, services_1.s3)().deleteBucket({ Bucket: bucket }).promise();
return await (0, services_1.s3)().deleteBucket({ Bucket: bucket }).promise();
});

@@ -749,2 +748,3 @@ /**

* @param {boolean} [params.copyTags=false]
* @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
* @returns {Promise.<{ etag: string }>} object containing the ETag of the

@@ -755,3 +755,3 @@ * destination object

var _a;
const { sourceBucket, sourceKey, destinationBucket, destinationKey, ACL, copyTags = false, } = params;
const { sourceBucket, sourceKey, destinationBucket, destinationKey, ACL, copyTags = false, chunkSize, } = params;
const sourceObject = (_a = params.sourceObject) !== null && _a !== void 0 ? _a : await (0, exports.headObject)(sourceBucket, sourceKey);

@@ -774,3 +774,3 @@ // Create a multi-part upload (copy) and get its UploadId

}
const chunks = S3MultipartUploads.createMultipartChunks(objectSize);
const chunks = S3MultipartUploads.createMultipartChunks(objectSize, chunkSize);
// Submit all of the upload (copy) parts to S3

@@ -820,2 +820,3 @@ const uploadPartCopyResponses = await Promise.all(chunks.map(({ start, end }, index) => uploadPartCopy({

* @param {boolean} [params.copyTags=false]
* @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
* @returns {Promise<undefined>}

@@ -831,6 +832,7 @@ */

copyTags: (0, isBoolean_1.default)(params.copyTags) ? params.copyTags : true,
chunkSize: params.chunkSize,
});
await (0, exports.deleteS3Object)(params.sourceBucket, params.sourceKey);
return await (0, exports.deleteS3Object)(params.sourceBucket, params.sourceKey);
};
exports.moveObject = moveObject;
//# sourceMappingURL=S3.js.map

@@ -68,3 +68,5 @@ /**

export declare const getExecutionStatus: (executionArn: string) => Promise<{
execution: import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput, import("aws-sdk").AWSError>;
execution: import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput & {
$response: import("aws-sdk").Response<import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput, import("aws-sdk").AWSError>;
};
executionHistory: {

@@ -71,0 +73,0 @@ events: import("aws-sdk/clients/stepfunctions").HistoryEventList;

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc