Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

s3

Package Overview
Dependencies
Maintainers
1
Versions
29
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

s3 - npm Package Compare versions

Comparing version 0.3.1 to 1.0.0

807

index.js

@@ -1,85 +0,770 @@

var knox = require('knox')
, EventEmitter = require('events').EventEmitter
, fs = require('fs');
var AWS = require('aws-sdk');
var EventEmitter = require('events').EventEmitter;
var fs = require('fs');
var rimraf = require('rimraf');
var findit = require('findit');
var Pend = require('pend');
var path = require('path');
var crypto = require('crypto');
var StreamCounter = require('stream-counter');
var mkdirp = require('mkdirp');
// greater than 5 gigabytes and S3 requires a multipart upload. Multipart
// uploads have a different ETag format. For multipart upload ETags it is
// impossible to tell how to generate the ETag.
// unfortunately we're still assuming that files <= 5 GB were not uploaded with
// via multipart upload.
var MAX_PUTOBJECT_SIZE = 5 * 1024 * 1024 * 1024;
var MAX_DELETE_COUNT = 1000;
exports.createClient = function(options) {
var client = new Client();
client.knox = knox.createClient(options);
return client;
return new Client(options);
};
exports.fromKnox = function(knoxClient) {
var client = new Client();
client.knox = knoxClient;
return client;
exports.Client = Client;
function Client(options) {
options = options || {};
this.s3 = options.s3Client || new AWS.S3(options.s3Options);
this.s3Pend = new Pend();
this.s3Pend.max = options.maxAsyncS3 || Infinity;
this.s3RetryCount = options.s3RetryCount || 3;
this.s3RetryDelay = options.s3RetryDelay || 1000;
}
function Client(options) {}
Client.prototype.deleteObjects = function(s3Params) {
var self = this;
var ee = new EventEmitter();
Client.prototype.upload = function(localFile, remoteFile, headers) {
if (typeof headers != 'object')
headers = { };
var uploader = new EventEmitter();
var knoxUpload = this.knox.putFile(localFile, remoteFile, headers, function (err, resp) {
var params = {
Bucket: s3Params.Bucket,
Delete: extend({}, s3Params.Delete),
MFA: s3Params.MFA,
};
var slices = chunkArray(params.Delete.Objects, MAX_DELETE_COUNT);
var errorOccurred = false;
var pend = new Pend();
ee.progressAmount = 0;
ee.progressTotal = params.Delete.Objects.length;
slices.forEach(uploadSlice);
pend.wait(function(err) {
if (err) {
uploader.emit('error', err);
} else if (resp.statusCode === 200 || resp.statusCode === 307) {
// sometimes resp.req is undefined. nobody knows why
uploader.emit('end', resp.req ? resp.req.url : resp.url);
} else {
uploader.emit('error', new Error("s3 http status code " + resp.statusCode));
ee.emit('error', err);
return;
}
ee.emit('end');
});
knoxUpload.on('progress', function (progress) {
uploader.emit('progress', progress.written, progress.total);
});
return ee;
function uploadSlice(slice) {
pend.go(function(cb) {
doWithRetry(tryDeletingObjects, self.s3RetryCount, self.s3RetryDelay, function(err, data) {
if (err) {
cb(err);
} else {
ee.progressAmount += slice.length;
ee.emit('progress');
ee.emit('data', data);
cb();
}
});
});
function tryDeletingObjects(cb) {
self.s3Pend.go(function(pendCb) {
params.Delete.Objects = slice;
self.s3.deleteObjects(params, function(err, data) {
pendCb();
cb(err, data);
});
});
}
}
};
Client.prototype.uploadFile = function(params) {
var self = this;
var uploader = new EventEmitter();
uploader.progressMd5Amount = 0;
uploader.progressAmount = 0;
uploader.progressTotal = 1;
var localFile = params.localFile;
var localFileStat = params.localFileStat;
var s3Params = extend({}, params.s3Params);
if (!localFileStat || !localFileStat.md5sum) {
doStatAndMd5Sum();
} else {
uploader.progressTotal = localFileStat.size;
startPuttingObject();
}
return uploader;
function doStatAndMd5Sum() {
var md5sum;
var pend = new Pend();
pend.go(doStat);
pend.go(doMd5Sum);
pend.wait(function(err) {
if (err) {
uploader.emit('error', err);
return;
}
localFileStat.md5sum = md5sum;
startPuttingObject();
});
function doStat(cb) {
fs.stat(localFile, function(err, stat) {
if (!err) {
localFileStat = stat;
uploader.progressTotal = stat.size;
}
cb(err);
});
}
function doMd5Sum(cb) {
var inStream = fs.createReadStream(localFile);
var counter = new StreamCounter();
inStream.on('error', function(err) {
cb(err);
});
var hash = crypto.createHash('md5');
hash.on('data', function(digest) {
md5sum = digest;
cb();
});
counter.on('progress', function() {
uploader.progressMd5Amount = counter.bytes;
uploader.emit('progress');
});
inStream.pipe(hash);
inStream.pipe(counter);
}
}
function startPuttingObject() {
if (localFileStat.size > MAX_PUTOBJECT_SIZE) {
uploader.emit('error', new Error("file exceeded max size for putObject"));
return;
}
doWithRetry(tryPuttingObject, self.s3RetryCount, self.s3RetryDelay, function(err, data) {
if (err) {
uploader.emit('error', err);
return;
}
uploader.emit('end', data);
});
}
function tryPuttingObject(cb) {
self.s3Pend.go(function(pendCb) {
var inStream = fs.createReadStream(localFile);
var errorOccurred = false;
inStream.on('error', function(err) {
if (errorOccurred) return;
errorOccurred = true;
uploader.emit('error', err);
});
s3Params.Body = inStream;
s3Params.ContentMD5 = localFileStat.md5sum.toString('base64');
s3Params.ContentLength = localFileStat.size;
uploader.progressAmount = 0;
var counter = new StreamCounter();
counter.on('progress', function() {
uploader.progressAmount = counter.bytes;
uploader.emit('progress');
});
inStream.pipe(counter);
self.s3.putObject(s3Params, function(err, data) {
pendCb();
if (errorOccurred) return;
if (err) {
errorOccurred = true;
cb(err);
return;
}
if (!compareETag(data.ETag, localFileStat.md5sum)) {
errorOccurred = true;
cb(new Error("ETag does not match MD5 checksum"));
return;
}
cb(null, data);
});
});
}
};
Client.prototype.download = function(remoteFile, localFile) {
Client.prototype.downloadFile = function(params) {
var self = this;
var downloader = new EventEmitter();
var headers;
var amountDone = 0;
var amountTotal;
var writeStream;
var knoxDownload = this.knox.getFile(remoteFile, function (err, resp) {
var localFile = params.localFile;
var s3Params = extend({}, params.s3Params);
var dirPath = path.dirname(localFile);
downloader.progressAmount = 0;
mkdirp(dirPath, function(err) {
if (err) {
downloader.emit('error', err);
} else if (resp.statusCode === 200 || resp.statusCode === 307) {
amountTotal = parseInt(resp.headers['content-length'], 10);
headers = resp.headers;
var writeStream = fs.createWriteStream(localFile);
writeStream.on('error', onError);
resp.on('error', onError);
resp.on('end', onSuccess);
resp.on('data', onData);
resp.pipe(writeStream);
} else {
downloader.emit('error', new Error("s3 http status code " + resp.statusCode));
return;
}
function removeListeners() {
writeStream.removeListener('error', onError);
resp.removeListener('error', onError);
resp.removeListener('end', onSuccess);
doWithRetry(doTheDownload, self.s3RetryCount, self.s3RetryDelay, function(err) {
if (err) {
downloader.emit('error', err);
return;
}
downloader.emit('end');
});
});
return downloader;
function doTheDownload(cb) {
var request = self.s3.getObject(s3Params);
var response = request.createReadStream();
var outStream = fs.createWriteStream(localFile);
var counter = new StreamCounter();
var hash = crypto.createHash('md5');
var errorOccurred = false;
var eTag = "";
response.on('error', handleError);
outStream.on('error', handleError);
request.on('httpHeaders', function(statusCode, headers, resp) {
if (statusCode < 300) {
var contentLength = parseInt(headers['content-length'], 10);
downloader.progressTotal = contentLength;
downloader.progressAmount = 0;
downloader.emit('progress');
downloader.emit('httpHeaders', statusCode, headers, resp);
eTag = headers.etag || "";
} else {
handleError(new Error("http status code " + statusCode));
}
});
hash.on('data', function(digest) {
if (!compareETag(eTag, digest)) {
handleError(new Error("ETag does not match MD5 checksum"));
}
});
counter.on('progress', function() {
downloader.progressAmount = counter.bytes;
downloader.emit('progress');
});
outStream.on('close', function() {
if (errorOccurred) return;
cb();
});
response.pipe(counter);
response.pipe(outStream);
response.pipe(hash);
function handleError(err) {
if (errorOccurred) return;
errorOccurred = true;
cb(err);
}
function onError(err) {
removeListeners();
writeStream.destroy();
downloader.emit('error', err);
}
};
/* params:
* - recursive: false
* - s3Params:
* - Bucket: params.s3Params.Bucket,
* - Delimiter: null,
* - Marker: null,
* - MaxKeys: null,
* - Prefix: prefix,
*/
Client.prototype.listObjects = function(params) {
var self = this;
var ee = new EventEmitter();
var s3Details = extend({}, params.s3Params);
var recursive = !!params.recursive;
var abort = false;
ee.progressAmount = 0;
ee.objectsFound = 0;
ee.dirsFound = 0;
findAllS3Objects(s3Details.Marker, s3Details.Prefix, function(err, data) {
if (err) {
ee.emit('error', err);
return;
}
function onSuccess() {
removeListeners();
// make sure the stream has ended before we emit the event
writeStream.end(null, null, function() {
downloader.emit('end', { headers: headers });
ee.emit('end');
});
ee.abort = function() {
abort = true;
};
return ee;
function findAllS3Objects(marker, prefix, cb) {
if (abort) return;
doWithRetry(listObjects, self.s3RetryCount, self.s3RetryDelay, function(err, data) {
if (abort) return;
if (err) return cb(err);
ee.progressAmount += 1;
ee.objectsFound += data.Contents.length;
ee.dirsFound += data.CommonPrefixes.length;
ee.emit('progress');
ee.emit('data', data);
var pend = new Pend();
if (recursive) {
data.CommonPrefixes.forEach(recurse);
data.CommonPrefixes = [];
}
if (data.IsTruncated) {
pend.go(findNext1000);
}
pend.wait(function(err) {
cb(err);
});
function findNext1000(cb) {
findAllS3Objects(data.NextMarker, prefix, cb);
}
function recurse(dirObj) {
var prefix = dirObj.Prefix;
pend.go(function(cb) {
findAllS3Objects(null, prefix, cb);
});
}
});
function listObjects(cb) {
if (abort) return;
self.s3Pend.go(function(pendCb) {
if (abort) {
pendCb();
return;
}
s3Details.Marker = marker;
s3Details.Prefix = prefix;
self.s3.listObjects(s3Details, function(err, data) {
pendCb();
if (abort) return;
cb(err, data);
});
});
}
function onData(data) {
amountDone += data.length;
downloader.emit('progress', amountDone, amountTotal);
}
};
/* params:
* - deleteRemoved - delete s3 objects with no corresponding local file. default false
* - localDir - path on local file system to sync
* - s3Params:
* - Bucket (required)
* - Key (required)
*/
Client.prototype.uploadDir = function(params) {
return syncDir(this, params, true);
};
Client.prototype.downloadDir = function(params) {
return syncDir(this, params, false);
};
Client.prototype.deleteDir = function(s3Params) {
var self = this;
var ee = new EventEmitter();
var bucket = s3Params.Bucket;
var mfa = s3Params.MFA;
var listObjectsParams = {
recursive: true,
s3Params: {
Bucket: bucket,
Prefix: s3Params.Prefix,
},
};
var finder = self.listObjects(listObjectsParams);
var pend = new Pend();
ee.progressAmount = 0;
ee.progressTotal = 0;
finder.on('error', function(err) {
ee.emit('error', err);
});
finder.on('data', function(objects) {
ee.progressTotal += objects.Contents.length;
ee.emit('progress');
if (objects.Contents.length > 0) {
pend.go(deleteThem);
}
function deleteThem(cb) {
var params = {
Bucket: bucket,
Delete: {
Objects: objects.Contents.map(keyOnly),
Quiet: true,
},
MFA: mfa,
};
var deleter = self.deleteObjects(params);
deleter.on('error', function(err) {
finder.abort();
ee.emit('error', err);
});
deleter.on('end', function() {
ee.progressAmount += objects.Contents.length;
ee.emit('progress');
cb();
});
}
});
return downloader;
finder.on('end', function() {
pend.wait(function() {
ee.emit('end');
});
});
return ee;
};
function syncDir(self, params, directionIsToS3) {
var ee = new EventEmitter();
var localDir = params.localDir;
var localFiles = {};
var localFilesSize = 0;
var localDirs = {};
var s3Objects = {};
var s3ObjectsSize = 0;
var s3Dirs = {};
var deleteRemoved = params.deleteRemoved === true;
var prefix = ensureSep(params.s3Params.Prefix);
var bucket = params.s3Params.Bucket;
var listObjectsParams = {
recursive: true,
s3Params: {
Bucket: bucket,
Marker: null,
MaxKeys: null,
Prefix: prefix,
},
};
var upDownFileParams = {
localFile: null,
localFileStat: null,
s3Params: extend({}, params.s3Params),
};
delete upDownFileParams.s3Params.Prefix;
ee.progressTotal = 0;
ee.progressAmount = 0;
var pend = new Pend();
pend.go(findAllS3Objects);
pend.go(findAllFiles);
pend.wait(compareResults);
return ee;
function compareResults(err) {
if (err) {
ee.emit('error', err);
return;
}
var pend = new Pend();
if (directionIsToS3) {
ee.progressTotal = localFilesSize;
if (deleteRemoved) pend.go(deleteRemovedObjects);
pend.go(uploadDifferentObjects);
} else {
ee.progressTotal = s3ObjectsSize;
if (deleteRemoved) pend.go(deleteRemovedLocalFiles);
pend.go(downloadDifferentObjects);
}
ee.emit('progress');
pend.wait(function(err) {
if (err) {
ee.emit('error', err);
return;
}
ee.emit('end');
});
}
function downloadDifferentObjects(cb) {
var pend = new Pend();
for (var relPath in s3Objects) {
var s3Object = s3Objects[relPath];
var localFileStat = localFiles[relPath];
if (!localFileStat || !compareETag(s3Object.ETag, localFileStat.md5sum)) {
downloadOneFile(relPath);
} else {
ee.progressAmount += localFileStat.size;
}
}
pend.wait(cb);
function downloadOneFile(relPath) {
var fullPath = path.join(localDir, relPath);
pend.go(function(cb) {
upDownFileParams.s3Params.Key = prefix + relPath;
upDownFileParams.localFile = fullPath;
upDownFileParams.localFileStat = null;
var downloader = self.downloadFile(upDownFileParams);
var prevAmountDone = 0;
downloader.on('error', function(err) {
cb(err);
});
downloader.on('progress', function() {
var delta = downloader.progressAmount - prevAmountDone;
prevAmountDone = downloader.progressAmount;
ee.progressAmount += delta;
ee.emit('progress');
});
downloader.on('end', function() {
cb();
});
});
}
}
function deleteRemovedLocalFiles(cb) {
var pend = new Pend();
var relPath;
for (relPath in localFiles) {
var localFileStat = localFiles[relPath];
var s3Object = s3Objects[relPath];
if (!s3Object) {
deleteOneFile(relPath);
}
}
for (relPath in localDirs) {
var localDirStat = localDirs[relPath];
var s3Dir = s3Dirs[relPath];
if (!s3Dir) {
deleteOneDir(relPath);
}
}
pend.wait(cb);
function deleteOneDir(relPath) {
var fullPath = path.join(localDir, relPath);
pend.go(function(cb) {
rimraf(fullPath, function(err) {
// ignore ENOENT errors
if (err && err.code === 'ENOENT') err = null;
cb(err);
});
});
}
function deleteOneFile(relPath) {
var fullPath = path.join(localDir, relPath);
pend.go(function(cb) {
fs.unlink(fullPath, function(err) {
// ignore ENOENT errors
if (err && err.code === 'ENOENT') err = null;
cb(err);
});
});
}
}
function uploadDifferentObjects(cb) {
var pend = new Pend();
for (var relPath in localFiles) {
var localFileStat = localFiles[relPath];
var s3Object = s3Objects[relPath];
if (!s3Object || !compareETag(s3Object.ETag, localFileStat.md5sum)) {
uploadOneFile(relPath, localFileStat);
} else {
ee.progressAmount += s3Object.Size;
}
}
ee.emit('progress');
pend.wait(cb);
function uploadOneFile(relPath, localFileStat) {
var fullPath = path.join(localDir, relPath);
pend.go(function(cb) {
upDownFileParams.s3Params.Key = prefix + relPath;
upDownFileParams.localFile = fullPath;
upDownFileParams.localFileStat = localFileStat;
var uploader = self.uploadFile(upDownFileParams);
var prevAmountDone = 0;
uploader.on('error', function(err) {
cb(err);
});
uploader.on('progress', function() {
var delta = uploader.progressAmount - prevAmountDone;
prevAmountDone = uploader.progressAmount;
ee.progressAmount += delta;
ee.emit('progress');
});
uploader.on('end', function() {
cb();
});
});
}
}
function deleteRemovedObjects(cb) {
var objectsToDelete = [];
for (var relPath in s3Objects) {
var s3Object = s3Objects[relPath];
var localFileStat = localFiles[relPath];
if (!localFileStat) {
objectsToDelete.push({Key: prefix + relPath});
}
}
var params = {
Bucket: bucket,
Delete: {
Objects: objectsToDelete,
Quiet: true,
},
};
var deleter = self.deleteObjects(params);
deleter.on('error', function(err) {
cb(err);
});
deleter.on('end', function() {
cb();
});
}
function findAllS3Objects(cb) {
var finder = self.listObjects(listObjectsParams);
finder.on('error', function(err) {
cb(err);
});
finder.on('data', function(data) {
data.Contents.forEach(function(object) {
var key = object.Key.substring(prefix.length);
s3Objects[key] = object;
s3ObjectsSize += object.Size;
var dirname = path.dirname(key);
if (dirname === '.') return;
s3Dirs[path.dirname(key)] = true;
});
});
finder.on('end', function() {
cb();
});
}
function findAllFiles(cb) {
var dirWithSlash = ensureSep(localDir);
var walker = findit(dirWithSlash);
var errorOccurred = false;
var pend = new Pend();
walker.on('error', function(err) {
if (errorOccurred) return;
errorOccurred = true;
walker.stop();
cb(err);
});
walker.on('directory', function(dir, stat) {
var relPath = path.relative(localDir, dir);
if (relPath === '') return;
localDirs[relPath] = stat;
});
walker.on('file', function(file, stat) {
var relPath = path.relative(localDir, file);
if (stat.size > MAX_PUTOBJECT_SIZE) {
stat.md5sum = new Buffer(0); // ETag has different format for files this big
localFiles[relPath] = stat;
return;
}
pend.go(function(cb) {
var inStream = fs.createReadStream(file);
var hash = crypto.createHash('md5');
inStream.on('error', function(err) {
if (errorOccurred) return;
errorOccurred = true;
walker.stop();
cb(err);
});
hash.on('data', function(digest) {
stat.md5sum = digest;
localFiles[relPath] = stat;
localFilesSize += stat.size;
cb();
});
inStream.pipe(hash);
});
});
walker.on('end', function() {
if (errorOccurred) return;
pend.wait(cb);
});
}
}
function ensureSep(dir) {
return (dir[dir.length - 1] === path.sep) ? dir : (dir + path.sep);
}
function doWithRetry(fn, tryCount, delay, cb) {
var tryIndex = 0;
tryOnce();
function tryOnce() {
fn(function(err, result) {
if (err) {
tryIndex += 1;
if (tryIndex >= tryCount) {
cb(err);
} else {
setTimeout(tryOnce, delay);
}
} else {
cb(null, result);
}
});
}
}
function extend(target, source) {
for (var propName in source) {
target[propName] = source[propName];
}
return target;
}
function chunkArray(array, maxLength) {
var slices = [array];
while (slices[slices.length - 1].length > maxLength) {
slices.push(slices[slices.length - 1].splice(maxLength));
}
return slices;
}
function compareETag(eTag, md5Buffer) {
eTag = eTag.replace(/^\s*'?\s*"?\s*(.*?)\s*"?\s*'?\s*$/, "$1");
var hex = md5Buffer.toString('hex');
return eTag === hex;
}
function keyOnly(item) {
return {
Key: item.Key,
VersionId: item.VersionId,
};
}

25

package.json
{
"name": "s3",
"version": "0.3.1",
"description": "high level amazon s3 client using knox as a backend",
"version": "1.0.0",
"description": "high level amazon s3 client. upload and download files and directories",
"main": "index.js",

@@ -16,5 +16,9 @@ "scripts": {

"s3",
"high",
"level",
"api"
"sync",
"folder",
"directory",
"retry",
"limit",
"stream",
"async"
],

@@ -24,8 +28,13 @@ "author": "Andrew Kelley",

"devDependencies": {
"mocha": "~1.9.0",
"mkdirp": "~0.3.5"
"mocha": "^1.18.2",
"ncp": "^0.5.1"
},
"dependencies": {
"knox": "~0.8.3"
"aws-sdk": "^2.0.0-rc.16",
"findit": "^1.2.0",
"pend": "^1.1.1",
"stream-counter": "^1.0.0",
"mkdirp": "^0.5.0",
"rimraf": "^2.2.8"
}
}

@@ -1,65 +0,348 @@

Install:
--------
`npm install --save s3`
# High Level Amazon S3 Client
Usage:
------
## Features and Limitations
* Automatically retry a configurable number of times when S3 returns an error.
* Includes logic to make multiple requests when there is a 1000 object limit.
* Ability to set a limit on the maximum parallelization of S3 requests.
Retries get pushed to the end of the paralellization queue.
* Ability to sync a dir to and from S3.
* Progress reporting.
* Limited to files less than 5GB.
* Limited to objects which were not uploaded using a multipart request.
See also the companion CLI tool, [s3-cli](https://github.com/andrewrk/node-s3-cli).
## Synopsis
### Create a client
```js
// configure
var s3 = require('s3');
// createClient allows any options that knox does.
var client = s3.createClient({
key: "your s3 key",
secret: "your s3 secret",
bucket: "your s3 bucket"
maxAsyncS3: Infinity,
s3RetryCount: 3,
s3RetryDelay: 1000,
s3Options: {
accessKeyId: "your s3 key",
secretAccessKey: "your s3 secret",
// any other options are passed to new AWS.S3()
// See: http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Config.html#constructor-property
},
});
```
// optional headers
var headers = {
'Content-Type' : 'image/jpg',
'x-amz-acl' : 'public-read'
### Create a client from existing AWS.S3 object
```js
var s3 = require('s3');
var awsS3Client = new AWS.S3(s3Options);
var options = {
s3Client: awsS3Client,
};
var client = s3.fromAwsSdkS3(options);
```
// upload a file to s3
var uploader = client.upload("some/local/file", "some/remote/file", headers);
### Upload a file to S3
```js
var params = {
localFile: "some/local/file",
s3Params: {
Bucket: "s3 bucket name",
Key: "some/remote/file",
// other options supported by putObject, except Body and ContentLength.
// See: http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property
},
};
var uploader = client.uploadFile(params);
uploader.on('error', function(err) {
console.error("unable to upload:", err.stack);
});
uploader.on('progress', function(amountDone, amountTotal) {
console.log("progress", amountDone, amountTotal);
uploader.on('progress', function() {
console.log("progress", uploader.progressMd5Amount,
uploader.progressAmount, uploader.progressTotal);
});
uploader.on('end', function(url) {
console.log("file available at", url);
uploader.on('end', function() {
console.log("done uploading");
});
```
// download a file from s3
var downloader = client.download("some/remote/file", "some/local/file");
### Download a file from S3
```js
var params = {
localFile: "some/local/file",
s3Params: {
Bucket: "s3 bucket name",
Key: "some/remote/file",
// other options supported by getObject
// See: http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#getObject-property
},
};
var downloader = client.downloadFile(params);
downloader.on('error', function(err) {
console.error("unable to download:", err.stack);
});
downloader.on('progress', function(amountDone, amountTotal) {
console.log("progress", amountDone, amountTotal);
downloader.on('progress', function() {
console.log("progress", downloader.progressAmount, downloader.progressTotal);
});
downloader.on('end', function() {
console.log("done");
console.log("done downloading");
});
```
// instantiate from existing knox client
var knoxClient = knox.createClient(options);
var client = s3.fromKnox(knoxClient);
### Sync a directory to S3
```js
var params = {
localDir: "some/local/dir",
deleteRemoved: true, // default false, whether to remove s3 objects
// that have no corresponding local file.
s3Params: {
Bucket: "s3 bucket name",
Prefix: "some/remote/dir/",
// other options supported by putObject, except Body and ContentLength.
// See: http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property
},
};
var uploader = client.uploadDir(params);
uploader.on('error', function(err) {
console.error("unable to sync:", err.stack);
});
uploader.on('end', function() {
console.log("done uploading");
});
```
This module uses [knox](https://github.com/LearnBoost/knox) as a backend. If
you want to do more low-level things, use knox for those things. It's ok to use
both.
## API Documentation
Testing:
--------
### s3.createClient(options)
Creates an S3 client.
`options`:
* `s3Client` - optional, an instance of `AWS.S3`. Leave blank if you provide `s3Options`.
* `s3Options` - optional, provide this if you don't provide `s3Client`.
- See AWS SDK documentation for available options which are passed to `new AWS.S3()`:
http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Config.html#constructor-property
* `maxAsyncS3` - maximum number of simultaneous requests this client will
ever have open to S3. defaults to `Infinity`.
* `s3RetryCount` - how many times to try an S3 operation before giving up.
* `s3RetryDelay` - how many milliseconds to wait before retrying an S3 operation.
### client.uploadFile(params)
See http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#putObject-property
`params`:
* `s3Params`: params to pass to AWS SDK `putObject`.
* `localFile`: path to the file on disk you want to upload to S3.
* `localFileStat`: optional - if you happen to have the stat object from
`fs.stat` and the md5sum of the file, you can provide it here. Otherwise it
will be computed for you.
The difference between using AWS SDK `putObject` and this one:
* This works with files, not streams or buffers.
* If the reported MD5 upon upload completion does not match, it retries.
* Retry based on the client's retry settings.
* Progress reporting.
Returns an `EventEmitter` with these properties:
* `progressMd5Amount`
* `progressAmount`
* `progressTotal`
And these events:
* `'error' (err)`
* `'end' (data)` - emitted when the file is uploaded successfully
- `data` is the same object that you get from `putObject` in AWS SDK
* `'progress'` - emitted when `progressMd5Amount`, `progressAmount`, and
`progressTotal` properties change.
### client.downloadFile(params)
See http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#getObject-property
`params`:
* `localFile` - the destination path on disk to write the s3 object into
* `s3Params`: params to pass to AWS SDK `getObject`.
The difference between using AWS SDK `getObject` and this one:
* This works with a destination file, not a stream or a buffer.
* If the reported MD5 upon download completion does not match, it retries.
* Retry based on the client's retry settings.
* Progress reporting.
Returns an `EventEmitter` with these properties:
* `progressAmount`
* `progressTotal`
And these events:
* `'error' (err)`
* `'end'` - emitted when the file is uploaded successfully
* `'progress'` - emitted when `progressAmount` and `progressTotal`
properties change.
### client.listObjects(params)
See http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#listObjects-property
`params`:
* `recursive` - `true` or `false` whether or not you want to recurse
into directories.
* `s3Params` - params to pass to AWS SDK `listObjects`.
Note that if you set `Delimiter` in `s3Params` then you will get a list of
objects and folders in the directory you specify. You probably do not want to
set `recursive` to `true` at the same time as specifying a `Delimiter` because
this will cause a request per directory. If you want all objects that share a
prefix, leave the `Delimiter` option `null` or `undefined`.
Be sure that `s3Params.Prefix` ends with a trailing slash (`/`) unless you
are requesting the top-level listing, in which case `s3Params.Prefix` should
be empty string.
The difference between using AWS SDK `listObjects` and this one:
* Retry based on the client's retry settings.
* Supports recursive directory listing.
* Make multiple requests if the number of objects to list is greater than 1000.
Returns an `EventEmitter` with these properties:
* `progressAmount`
* `objectsFound`
* `dirsFound`
And these events:
* `'error' (err)`
* `'end'` - emitted when done listing and no more 'data' events will be emitted.
* `'data' (data)` - emitted when a batch of objects are found. This is
the same as the `data` object in AWS SDK.
* `'progress'` - emitted when `progressAmount`, `objectsFound`, and
`dirsFound` properties change.
And these methods:
* `abort()` - call this to stop the find operation.
### client.deleteObjects(s3Params)
See http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#deleteObjects-property
`s3Params` are the same.
The difference between using AWS SDK `deleteObjects` and this one is that this one will:
* Retry based on the client's retry settings.
* Make multiple requests if the number of objects you want to delete is
greater than 1000.
Returns an `EventEmitter` with these properties:
* `progressAmount`
* `progressTotal`
And these events:
* `'error' (err)`
* `'end'` - emitted when all objects are deleted.
* `'progress'` - emitted when the `progressAmount` or `progressTotal` properties change.
* `'data' (data)` - emitted when a request completes. There may be more.
### client.uploadDir(params)
Syncs an entire directory to S3.
`params`:
* `deleteRemoved` - delete s3 objects with no corresponding local file. default false
* `localDir` - source path on local file system to sync to S3
* `s3Params`
- `Prefix` (required)
- `Bucket` (required)
Returns an `EventEmitter` with these properties:
* `progressAmount`
* `progressTotal`
And these events:
* `'error' (err)`
* `'end'` - emitted when all files are uploaded
* `'progress'` - emitted when the `progressAmount` or `progressTotal` properties change.
### client.downloadDir(params)
Syncs an entire directory from S3.
`params`:
* `deleteRemoved` - delete local files with no corresponding s3 object. default `false`
* `localDir` - destination directory on local file system to sync to
* `s3Params`
- `Prefix` (required)
- `Bucket` (required)
Returns an `EventEmitter` with these properties:
* `progressAmount`
* `progressTotal`
And these events:
* `'error' (err)`
* `'end'` - emitted when all files are uploaded
* `'progress'` - emitted when the `progressAmount` or `progressTotal` properties change.
### client.deleteDir(s3Params)
Deletes an entire directory on S3.
`s3Params`:
* `Bucket`
* `Prefix`
* `MFA` (optional)
Returns an `EventEmitter` with these properties:
* `progressAmount`
* `progressTotal`
And these events:
* `'error' (err)`
* `'end'` - emitted when all objects are deleted.
* `'progress'` - emitted when the `progressAmount` or `progressTotal` properties change.
## Testing
`S3_KEY=<valid_s3_key> S3_SECRET=<valid_s3_secret> S3_BUCKET=<valid_s3_bucket> npm test`
History:
--------
## History
### 1.0.0
* complete module rewrite
* depend on official AWS SDK instead of knox
* support `uploadDir`, `downloadDir`, `listObjects`, `deleteObject`, and `deleteDir`
### 0.3.1

@@ -66,0 +349,0 @@

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc