Comparing version 0.1.0 to 0.1.1
/*global require, process, console*/ | ||
// node modules | ||
var fs = require('fs'); | ||
var url = require('url'); | ||
var http = require('http'); | ||
var request = require('request'); | ||
var crypto = require('crypto'); | ||
// npm modules | ||
var _ = require('lodash'); | ||
var mime = require('mime'); | ||
var async = require('async'); | ||
var crypto = require('crypto'); | ||
var fs = require('fs'); | ||
var pkgcloud = require('pkgcloud'); | ||
var File = require('../node_modules/pkgcloud/lib/pkgcloud/rackspace/storage/file.js').File; | ||
var utils = require('./utils'); | ||
// An empty function for callbacks | ||
function nothing() {} | ||
var Rackit = function (options) { | ||
// Previous versions of rackit had the 'region' spcify the auth region.. normalize | ||
if (options && options.region && (options.region === 'US' || options.region === 'UK')) { | ||
options.authRegion = options.region; | ||
delete options.region; | ||
} | ||
var Rackit = function (options) { | ||
this.options = Object.create(Rackit.defaultOptions); | ||
@@ -26,7 +32,12 @@ | ||
this._client = pkgcloud.storage.createClient({ | ||
provider : 'rackspace', | ||
username : this.options.user, | ||
apiKey : this.options.key, | ||
region : this.options.region | ||
}); | ||
this.config = null; | ||
this.aContainers = null; | ||
this.hContainers = null; | ||
this.aCDNContainers = null; | ||
this.hCDNContainers = null; | ||
}; | ||
@@ -38,6 +49,7 @@ | ||
prefix : 'dev', | ||
region : 'US', | ||
baseURIs : { | ||
'UK': 'https://lon.auth.api.rackspacecloud.com/v1.0', | ||
'US': 'https://auth.api.rackspacecloud.com/v1.0' | ||
authRegion : 'US', | ||
region : '', | ||
authURIs : { | ||
'UK' : 'https://lon.identity.api.rackspacecloud.com/v2.0', | ||
'US' : 'https://identity.api.rackspacecloud.com/v2.0' | ||
}, | ||
@@ -67,3 +79,14 @@ tempURLKey : null, | ||
auth : function (cb) { | ||
o1._authenticate(cb); | ||
o1._client.auth(function (err) { | ||
if (err && !(err instanceof Error)) { | ||
err = new Error(err.message || err); | ||
} | ||
if (!err) { | ||
o1.config = { | ||
storage : o1._client.getServiceUrl('object-store') | ||
}; | ||
} | ||
cb(err); | ||
}); | ||
}, | ||
@@ -87,17 +110,6 @@ two : function (cb) { | ||
} | ||
}, function(err, results) { | ||
}, function (err, results) { | ||
// Avoid passing the results parameter on | ||
return cb(err); | ||
}); | ||
// Set up an interval to refresh the auth token that expires every 23 hours (it expires every 24). | ||
// Using this method, an API request should never return 401, facilitating easier streaming. | ||
var oneHour = 60 * 60 * 1000; | ||
setInterval(function() { | ||
o1._authenticate(function(err) { | ||
if (err) { | ||
o1._log('Reauthentication failed'); | ||
} | ||
}); | ||
}, 23 * oneHour); | ||
}; | ||
@@ -112,129 +124,3 @@ | ||
Rackit.prototype.hGoodStatuses = { | ||
200 : 'Ok', | ||
201 : 'Created', | ||
202 : 'Accepted', | ||
204 : 'No Content' | ||
}; | ||
Rackit.prototype.hBadStatuses = { | ||
401 : 'Unauthorized', | ||
404 : 'Not Found', | ||
412 : 'Length Required', | ||
422 : 'Unprocessable Entity' | ||
}; | ||
/** | ||
* Authenticates the user with Rackspace CloudFiles and stores the auth token. | ||
* Called once upon creation and periodically as token expires. | ||
* @param {function(Object)} cb - callback that returns an error | ||
*/ | ||
Rackit.prototype._authenticate = function (cb) { | ||
var o1 = this; | ||
o1._log('authenticating...'); | ||
// Build the request options | ||
var options = { | ||
headers : { | ||
'X-Auth-User' : o1.options.user, | ||
'X-Auth-Key' : o1.options.key | ||
} | ||
}; | ||
if(o1.options.baseURI) { | ||
options.uri = o1.options.baseURI; | ||
} else { | ||
options.uri = o1.options.baseURIs[o1.options.region]; | ||
} | ||
request(options, function (err, res, body) { | ||
o1.config = null; | ||
if (err) { | ||
return cb(err); | ||
} | ||
if (!o1.hGoodStatuses.hasOwnProperty(res.statusCode)) { | ||
return cb(new Error('Error code ' + res.statusCode)); | ||
} | ||
o1._log('authenticated'); | ||
// Store the config info | ||
o1.config = { | ||
storage : res.headers['x-storage-url'], | ||
CDN : res.headers['x-cdn-management-url'], | ||
authToken : res.headers['x-auth-token'] | ||
}; | ||
// Change URLs to s-net urls if it is enabled | ||
if (o1.options.useSNET) { | ||
o1._log('using s-net!'); | ||
o1.config.storage = o1.config.storage.replace('https://', 'https://snet-'); | ||
} else { | ||
o1._log('not using s-net!'); | ||
} | ||
cb(); | ||
}); | ||
}; | ||
/** | ||
* Sends a request message to the cloud server. Checks for errors, and bad | ||
* status codes indicating failure. | ||
* @param {Object} options - options for request() | ||
* @param {function(err, res, body)} cb - callback when the non-authentication request goes through | ||
* @return Request - a Request object to be used with streaming | ||
*/ | ||
Rackit.prototype._cloudRequest = function (options, cb) { | ||
var o1 = this; | ||
cb = cb || function() {}; | ||
options.headers = options.headers || {}; | ||
options.headers['X-Auth-Token'] = o1.config.authToken; | ||
// Create and return the request object | ||
return request(options, function (err, res, body) { | ||
if (err) { | ||
return cb(err); | ||
} | ||
// Problem | ||
if (!o1.hGoodStatuses.hasOwnProperty(res.statusCode)) { | ||
o1._log('request failed'); | ||
return cb(new Error('Error code ' + res.statusCode), res); | ||
} | ||
// Everything went fine | ||
cb(null, res, body); | ||
}); | ||
}; | ||
/** | ||
* Sends requests to the cloud server. The beginning of the URL is automatically prepended with the base storage url. | ||
* @param {string} method | ||
* @param {string} url | ||
* @param {Object} headers | ||
* @param {function(err)} cb | ||
*/ | ||
Rackit.prototype.storageRequest = function(method, url, headers, cb) { | ||
var o1 = this; | ||
// Normalize parameters | ||
if (typeof headers === 'function') { | ||
cb = headers; | ||
headers = {}; | ||
} | ||
var options = { | ||
method : method.toUpperCase(), | ||
uri : o1.config.storage + '/' + url, | ||
headers : headers | ||
}; | ||
o1._cloudRequest(options, cb); | ||
}; | ||
/** | ||
* Set's the account metadata key used for generating temporary URLs | ||
@@ -248,3 +134,2 @@ */ | ||
method : 'POST', | ||
uri : o1.config.storage, | ||
headers : { | ||
@@ -255,3 +140,3 @@ 'X-Account-Meta-Temp-Url-Key' : o1.options.tempURLKey | ||
o1._cloudRequest(options, function (err, res, body) { | ||
o1._client.request(options, function (err, body) { | ||
cb(err); | ||
@@ -270,27 +155,11 @@ }); | ||
async.parallel({ | ||
async.parallel([ | ||
// Get regular container info | ||
one : function (cb) { | ||
// Build request options | ||
var options = { | ||
uri : o1.config.storage + '?format=json' | ||
}; | ||
o1._cloudRequest(options, function (err, res, body) { | ||
if (err) { | ||
function (cb) { | ||
o1._client.getContainers(function (err, containers) { | ||
if (err) | ||
return cb(err); | ||
} | ||
o1.aContainers = JSON.parse(body); | ||
// Create the global hash of containers from the array | ||
o1.hContainers = {}; | ||
var container; | ||
var i = o1.aContainers.length; | ||
while (i--) { | ||
container = o1.aContainers[i]; | ||
o1.hContainers[container.name] = container; | ||
} | ||
o1._log('got containers', Object.keys(o1.hContainers)); | ||
o1.aContainers = containers; | ||
o1._log('got containers', _.pluck(o1.aContainers, 'name')); | ||
cb(); | ||
@@ -300,116 +169,79 @@ }); | ||
// Get CDN container info | ||
two : function (cb) { | ||
// Build request options | ||
var options = { | ||
uri : o1.config.CDN + '?format=json' | ||
}; | ||
o1._cloudRequest(options, function (err, res, body) { | ||
if (err) { | ||
function (cb) { | ||
o1._client.getCdnContainers(function (err, containers) { | ||
if (err) | ||
return cb(err); | ||
} | ||
o1.aCDNContainers = JSON.parse(body); | ||
o1.aCDNContainers = containers; | ||
// Build a hash from the CDN container array.. this is used for lookups | ||
o1.hCDNContainers = {}; | ||
var i = o1.aCDNContainers.length; | ||
var CDNContainer; | ||
while (i--) { | ||
CDNContainer = o1.aCDNContainers[i]; | ||
o1.hCDNContainers[CDNContainer['name']] = CDNContainer; | ||
} | ||
o1._log('got CDN containers', Object.keys(o1.hCDNContainers)); | ||
o1._log('got CDN containers', _.pluck(o1.aCDNContainers, 'name')); | ||
cb(); | ||
}); | ||
} | ||
}, cb); | ||
], cb); | ||
}; | ||
/** | ||
* Adds a container with a particular name. Can optionally CDN enable the container. | ||
* @param {string} sName | ||
* Adds a new prefixed container. Can optionally CDN enable the container. | ||
* @param {function(err)} cb | ||
*/ | ||
Rackit.prototype._createContainer = function (sName, cb) { | ||
Rackit.prototype._createContainer = function (cb) { | ||
var o1 = this; | ||
var aContainers = o1._getPrefixedContainers(o1.aContainers); | ||
var container; | ||
var numExisting = aContainers.length; | ||
var topContainer = numExisting && aContainers[numExisting - 1]; | ||
var sName; | ||
if (!topContainer) | ||
sName = o1.options.prefix + '0'; | ||
else { | ||
sName = o1.options.prefix + (parseInt(topContainer.name.match(/\d+$/)[0]) + 1); | ||
} | ||
o1._log('adding container \'' + sName + '\'...'); | ||
async.waterfall([ | ||
async.series([ | ||
// Create the container | ||
function (cb) { | ||
var options = { | ||
method : 'PUT', | ||
uri : o1.config.storage + '/' + sName | ||
}; | ||
o1._client.createContainer(sName, function (err, _container) { | ||
if (err) | ||
return cb(err); | ||
o1._cloudRequest(options, cb); | ||
}, | ||
// check that a parallel operation didn't just add the same container | ||
container = _.find(o1.aContainers, { name : sName }); | ||
// Add the container locally, but first check if it exists first (the Rackspace API is idempotent) | ||
function (res, body, cb) { | ||
var container; | ||
if (!container) { | ||
o1.aContainers.push(_container); | ||
container = _container; | ||
} | ||
if (!o1.hContainers[sName]) { | ||
container = { | ||
name : sName, | ||
count : 0, | ||
bytes : 0 | ||
}; | ||
// Add the container to the array | ||
o1.aContainers.push(container); | ||
// Add the container to the hash | ||
o1.hContainers[sName] = container; | ||
} | ||
cb(); | ||
cb(); | ||
}); | ||
}, | ||
// CDN enable the container, if necessary | ||
function (cb) { | ||
if (!o1.options.useCDN) { | ||
cb(null, {}, ''); | ||
return; | ||
} | ||
if (!o1.options.useCDN) | ||
return cb(); | ||
o1._log('CDN enabling the container'); | ||
var options = { | ||
method : 'PUT', | ||
uri : o1.config.CDN + '/' + sName | ||
}; | ||
container.enableCdn(function (err, _container) { | ||
if (err) | ||
return cb(err); | ||
o1._cloudRequest(options, function (err, res, body) { | ||
cb(err, res || {}, body || ''); | ||
}); | ||
}, | ||
// check that a parallel operation didn't just CDN enable the same container | ||
var CDNContainer = _.find(o1.aCDNContainers, { name : sName }); | ||
// Add the CDN container locally, but first check the container wasn't already CDN enabled | ||
function (res, body, cb) { | ||
var CDNContainer; | ||
// Add the container locally | ||
if (res.statusCode === 201 /* Created */) { | ||
// The new CDN container object for local storage | ||
CDNContainer = { | ||
'cdn_streaming_uri' : res.headers['x-cdn-streaming-uri'], | ||
'cdn_uri' : res.headers['x-cdn-uri'], | ||
'cdn_ssl_uri' : res.headers['x-cdn-ssl-uri'], | ||
'cdn_enabled' : true, | ||
'ttl' : 259200, | ||
'log_retention' : false, | ||
'name' : sName | ||
}; | ||
if (!CDNContainer) { | ||
o1.aCDNContainers.push(_container); | ||
} | ||
// Add CDN container to the array | ||
o1.aCDNContainers.push(CDNContainer); | ||
// Add CDN container to the hash | ||
o1.hCDNContainers[sName] = CDNContainer; | ||
} | ||
cb(); | ||
cb(); | ||
}); | ||
}], | ||
cb); | ||
function (err) { | ||
cb(err, container); | ||
}); | ||
}; | ||
@@ -419,29 +251,19 @@ | ||
* This method searches through the container cache for those that match the prefix | ||
* @return an array containing container names that match the prefix, in sorted order | ||
* @param {Array} an array of container objects from Rackspace | ||
* @return an array of container objects that match the prefix, in sorted order | ||
* @private | ||
*/ | ||
Rackit.prototype._getPrefixedContainers = function() { | ||
Rackit.prototype._getPrefixedContainers = function (containers) { | ||
var o1 = this; | ||
var reg1 = new RegExp('^' + o1.options.prefix + '\\d+$'); | ||
var reg2 = /\d+$/; | ||
var reg = new RegExp('^' + o1.options.prefix + '\\d+$'); | ||
var asContainers = []; | ||
var idx = o1.aContainers.length; | ||
while (idx--) { | ||
var sContainer = o1.aContainers[idx].name; | ||
// Check that the container name matches | ||
if (sContainer.match(reg)) | ||
asContainers.push(sContainer); | ||
} | ||
// Sort the container array by numerical index | ||
var reg = /\d+$/; | ||
asContainers.sort(function(a, b) { | ||
a = parseInt(a.match(reg)[0]); | ||
b = parseInt(b.match(reg)[0]); | ||
return a-b; | ||
}); | ||
return asContainers; | ||
return _(containers) | ||
.where(function (container) { | ||
return container.name.match(reg1); | ||
}) | ||
.sortBy(function (container) { | ||
return +container.name.match(reg2)[0]; | ||
}) | ||
.value(); | ||
}; | ||
@@ -457,29 +279,23 @@ | ||
var o1 = this; | ||
var aContainers = o1._getPrefixedContainers(o1.aContainers); | ||
var asContainers = o1._getPrefixedContainers(); | ||
// Check that we found a prefixed container | ||
var name; | ||
if (!asContainers.length) { | ||
if (!aContainers.length) { | ||
// If no existing containers, create one! | ||
name = o1.options.prefix + '0'; | ||
return o1._createContainer(name, function (err) { | ||
cb(err, name); | ||
}); | ||
o1._createContainer(cb); | ||
return; | ||
} | ||
// We have containers. Get the most recent one. | ||
name = asContainers.pop(); | ||
var container = _.last(aContainers); | ||
// Check if the container is full | ||
if (o1.hContainers[name].count >= 50000) { | ||
if (container.count >= 50000) { | ||
// The container is full, create the next one | ||
name = o1.options.prefix + (parseInt(name.match(/\d+$/)[0]) + 1); | ||
return o1._createContainer(name, function (err) { | ||
cb(err, name); | ||
}); | ||
o1._createContainer(cb); | ||
return; | ||
} | ||
// The container we found is fine.. return it. | ||
cb(null, name); | ||
cb(null, container); | ||
}; | ||
@@ -496,2 +312,6 @@ | ||
// Ensure things have been initialized | ||
if (!o1.aContainers) | ||
throw new Error('Attempting to use container information without initializing Rackit. Please call rackit.init() first.'); | ||
// Normalize options | ||
@@ -532,3 +352,18 @@ if (typeof options === 'function') { | ||
// Determine the type | ||
var type = options.type; | ||
if (!type) { | ||
if (fromFile) | ||
type = mime.lookup(source); | ||
else { | ||
// The source is a stream, so it might already be a reqeust with a content-type header. | ||
// In this case, the content-type will be forwarded automatically | ||
type = source.headers && source.headers['content-type']; | ||
if (!type) { | ||
return cb(new Error('Unable to determine content-type. You must specify the type for file streams.')); | ||
} | ||
} | ||
} | ||
async.parallel({ | ||
@@ -542,20 +377,2 @@ // If the source is a file, make sure it exists | ||
}, | ||
// Get the file type (passed in or find) | ||
type : function (cb) { | ||
if (options.type) { | ||
// The type was explicity defined | ||
cb(null, options.type); | ||
} else if (fromFile) { | ||
// The source is a file so we can find its type | ||
cb(null, mime.lookup(source)); | ||
} else { | ||
// The source is a stream, so it might already be a reqeust with a content-type header. | ||
// In this case, the content-type will be forwarded automatically | ||
if (source.headers && source.headers['content-type']) { | ||
cb(null, source.headers['content-type']); | ||
} else { | ||
cb(new Error('Unable to determine content-type. You must specify the type for file streams.')); | ||
} | ||
} | ||
}, | ||
// Get the file container | ||
@@ -580,13 +397,13 @@ container : function (cb) { | ||
if (fromFile) { | ||
headers['content-length'] = '' + results.stats.size; | ||
} else if (source.headers && source.headers['content-length']) { | ||
headers['content-length'] = source.headers['content-length']; | ||
} else { | ||
headers['transfer-encoding'] = 'chunked'; | ||
if (type) | ||
headers['content-type'] = type; | ||
// set the content-length of transfer-encoding as appropriate | ||
if (!fromFile) { | ||
if (source.headers && source.headers['content-length']) | ||
headers['content-length'] = source.headers['content-length']; | ||
else | ||
headers['transfer-encoding'] = 'chunked'; | ||
} | ||
if (results.type) | ||
headers['content-type'] = results.type; | ||
// Add any additonal headers | ||
@@ -600,42 +417,30 @@ var sKey; | ||
// Add any metadata headers | ||
for (sKey in options.meta) { | ||
if (options.meta.hasOwnProperty(sKey)) { | ||
headers['x-object-meta-' + sKey] = options.meta[sKey]; | ||
} | ||
} | ||
// | ||
// Generate the cloud request options | ||
// | ||
var cloudPath = results.container + '/' + id; | ||
var reqOptions = { | ||
method : 'PUT', | ||
uri : o1.config.storage + '/' + cloudPath, | ||
headers : headers | ||
var _options = { | ||
container : results.container.name, | ||
remote : id, | ||
headers : headers, | ||
metadata : options.meta | ||
}; | ||
if (fromFile) | ||
_options.local = source; | ||
else { | ||
_options.stream = source; | ||
source.resume(); | ||
if (source.headers) { | ||
// We want to remove any headers from the source stream so they don't clobber our own headers. | ||
delete source.headers; | ||
} | ||
} | ||
// Make the actual request | ||
var req = o1._cloudRequest(reqOptions, function (err, res, body) { | ||
// Done with request.. | ||
o1._log('done adding file to cloud'); | ||
// Increment the container count | ||
if (!err) { | ||
o1.hContainers[results.container].count++; | ||
} | ||
o1._client.upload(_options, function (err, result) { | ||
if (err) | ||
return cb(err); | ||
cb(err, cloudPath); | ||
results.container.count++; | ||
cb(err, results.container.name + '/' + id); | ||
}); | ||
// Open a file stream, and pipe it to the request. | ||
if (fromFile) | ||
source = fs.createReadStream(source); | ||
else if (source.headers) { | ||
// We want to remove any headers from the source stream so they don't clobber our own headers. | ||
delete source.headers; | ||
} | ||
source.resume(); | ||
source.pipe(req); | ||
}); | ||
@@ -655,3 +460,5 @@ }; | ||
var asContainers = o1._getPrefixedContainers(); | ||
// Ensure things have been initialized | ||
if (!o1.aContainers) | ||
throw new Error('Attempting to use container information without initializing Rackit. Please call rackit.init() first.'); | ||
@@ -665,9 +472,9 @@ // Normalize the parameters | ||
// List the objects for each container in parallel | ||
var aContainers = o1._getPrefixedContainers(o1.aContainers); | ||
async.forEach( | ||
asContainers, | ||
function (sContainer, cb) { | ||
o1._listContainer(sContainer, function (err, aSomeObjects) { | ||
if (err) { | ||
aContainers, | ||
function (container, cb) { | ||
o1._listContainer(container, function (err, aSomeObjects) { | ||
if (err) | ||
return cb(err); | ||
} | ||
@@ -696,3 +503,3 @@ aObjects = aObjects.concat(aSomeObjects); | ||
// Returns an array of all the objects in a container | ||
Rackit.prototype._listContainer = function(sContainer, cb) { | ||
Rackit.prototype._listContainer = function (container, cb) { | ||
var o1 = this; | ||
@@ -712,3 +519,3 @@ var objects = []; | ||
if (someObjects.length >= o1.options.listLimit) { | ||
o1._listContainerPart(sContainer, someObjects.pop().name, receiveSomeResults); | ||
o1._listContainerPart(container, someObjects.pop().name, receiveSomeResults); | ||
return; | ||
@@ -722,45 +529,22 @@ } | ||
// Set of the listing for this container | ||
o1._listContainerPart(sContainer, null, receiveSomeResults); | ||
o1._listContainerPart(container, null, receiveSomeResults); | ||
}; | ||
// Returns some of the objects in a container | ||
Rackit.prototype._listContainerPart = function (sContainer, marker, cb) { | ||
// Get from the 0 container | ||
Rackit.prototype._listContainerPart = function (container, marker, cb) { | ||
var o1 = this; | ||
var options = { limit : o1.options.listLimit }; | ||
var uri = o1.config.storage + '/' + sContainer + '?format=json&limit=' + o1.options.listLimit; | ||
// Add a maker if specified | ||
if (marker) | ||
uri += '&marker=' + marker; | ||
options.marker = marker; | ||
var options = { | ||
uri : uri | ||
}; | ||
o1._cloudRequest(options, function (err, res, body) { | ||
if (err) { | ||
o1._client.getFiles(container, options, function (err, files) { | ||
if (err) | ||
return cb(err); | ||
} | ||
// Check the response for no-content | ||
if (res.statusCode === 204 || !body) { | ||
return cb(null, []); | ||
} | ||
var i = files.length; | ||
while (i--) | ||
files[i].cloudpath = container.name + '/' + files[i].name; | ||
// Just get the cloudpaths of the returned objects | ||
var aObjects; | ||
try { | ||
aObjects = JSON.parse(body); | ||
} catch(e) { | ||
return new Error('Error parsing JSON response'); | ||
} | ||
// Add the cloudpath parameter | ||
var i; | ||
for (i = 0; i < aObjects.length; i++) { | ||
aObjects[i].cloudpath = sContainer + '/' + aObjects[i].name; | ||
} | ||
cb(null, aObjects); | ||
cb(null, files); | ||
}); | ||
@@ -786,15 +570,32 @@ }; | ||
cb = cb || function () { | ||
}; | ||
var aPieces = sCloudPath.match(/^\/{0,1}([^/]+)\/(.+)$/); | ||
var sContainer = aPieces[1]; | ||
var sName = aPieces[2]; | ||
var options = { | ||
method : 'GET', | ||
uri : o1.config.storage + '/' + sCloudPath | ||
container : sContainer, | ||
remote : sName | ||
}; | ||
var req = o1._cloudRequest(options, cb); | ||
var stream = o1._client.download(options, function (err, data) { | ||
if (err) | ||
return cb(err); | ||
// Pipe the request response to the output file, if specified | ||
if (localPath && typeof localPath === 'string') { | ||
req.pipe(fs.createWriteStream(localPath)); | ||
if (!localPath) { | ||
cb(); | ||
} | ||
}); | ||
if (localPath) { | ||
var w = fs.createWriteStream(localPath); | ||
stream.pipe(w); | ||
w.on('finish', function () { | ||
cb(); | ||
}); | ||
} | ||
return req; | ||
return stream; | ||
}; | ||
@@ -809,8 +610,17 @@ | ||
var o1 = this; | ||
var options = { | ||
method : 'DELETE', | ||
uri : o1.config.storage + '/' + sCloudPath | ||
}; | ||
o1._cloudRequest(options, cb); | ||
var aPieces = sCloudPath.match(/^\/{0,1}([^/]+)\/(.+)$/); | ||
var sContainer = aPieces[1]; | ||
var sName = aPieces[2]; | ||
o1._client.removeFile(sContainer, sName, function (err) { | ||
if (err) | ||
return cb(err); | ||
// decrement the internal container size | ||
var container = _.find(o1.aContainers, { name : sContainer }); | ||
container.count--; | ||
cb(); | ||
}); | ||
}; | ||
@@ -826,19 +636,17 @@ | ||
var o1 = this; | ||
var headers = {}; | ||
// Add any metadata headers | ||
var sKey; | ||
for (sKey in meta) { | ||
if (meta.hasOwnProperty(sKey)) { | ||
headers['x-object-meta-' + sKey] = meta[sKey]; | ||
} | ||
} | ||
var aPieces = sCloudPath.match(/^\/{0,1}([^/]+)\/(.+)$/); | ||
var sContainer = aPieces[1]; | ||
var sName = aPieces[2]; | ||
var options = { | ||
method : 'POST', | ||
uri : o1.config.storage + '/' + sCloudPath, | ||
headers : headers | ||
}; | ||
// updateFileMetadata() requires a File instance, so make a dummy one | ||
var file = new File(o1._client, { name : sName }); | ||
file.metadata = meta; | ||
o1._cloudRequest(options, cb); | ||
o1._client.updateFileMetadata(sContainer, file, function (err) { | ||
if (err) | ||
return cb(err); | ||
cb(); | ||
}); | ||
}; | ||
@@ -854,35 +662,14 @@ | ||
*/ | ||
Rackit.prototype.getMeta = function(cloudpath, cb) { | ||
Rackit.prototype.getMeta = function (sCloudPath, cb) { | ||
var o1 = this; | ||
var options = { | ||
method : 'HEAD', | ||
uri : o1.config.storage + '/' + cloudpath | ||
}; | ||
var aPieces = sCloudPath.match(/^\/{0,1}([^/]+)\/(.+)$/); | ||
var sContainer = aPieces[1]; | ||
var sName = aPieces[2]; | ||
o1._cloudRequest(options, function(err, response, body) { | ||
var | ||
sKey, | ||
meta = {}, | ||
details = {}, | ||
headers = response && response.headers, | ||
prefix = 'x-object-meta-', | ||
prefixes = {'etag':'etag', | ||
'x-timestamp':'timestamp', | ||
'content-type':'content-type'}; | ||
o1._client.getFile(sContainer, sName, function (err, file) { | ||
if (err) | ||
return cb(err); | ||
if (err) return cb(err); | ||
for (sKey in headers) { | ||
if (headers.hasOwnProperty(sKey)) { | ||
if(sKey.indexOf(prefix) === 0) { | ||
meta[sKey.substr(prefix.length)] = headers[sKey]; | ||
} | ||
else if(prefixes[sKey]) { | ||
details[prefixes[sKey]] = headers[sKey]; | ||
} | ||
} | ||
} | ||
cb(null, meta, details); | ||
cb(null, file.metadata); | ||
}); | ||
@@ -914,3 +701,3 @@ }; | ||
var CDNContainer = o1.hCDNContainers[sContainer]; | ||
var CDNContainer = _.find(o1.aCDNContainers, { name : sContainer }); | ||
if (!CDNContainer) { | ||
@@ -921,3 +708,3 @@ o1._log('The container ' + sContainer + ' is not CDN enabled. Unable to get CDN URI'); | ||
var uri = CDNContainer[o1.options.useSSL ? 'cdn_ssl_uri' : 'cdn_uri'] + '/' + localPath; | ||
var uri = CDNContainer[o1.options.useSSL ? 'cdnSslUri' : 'cdnUri'] + '/' + localPath; | ||
return uri; | ||
@@ -985,3 +772,3 @@ }; | ||
// Determine which container property to search for.. ssl uri or regular uri | ||
var uriProperty = parts.protocol === 'https:' ? 'cdn_ssl_uri' : 'cdn_uri'; | ||
var uriProperty = parts.protocol === 'https:' ? 'cdnSslUri' : 'cdnUri'; | ||
@@ -991,16 +778,9 @@ // Get the base part of the given uri. This should equal the containers cdn_ssl_uri or cdn_uri | ||
// Iterate through all of the CDN containers, looking for one that has a matching base URI | ||
var found = false; | ||
var container; | ||
for (container in o1.hCDNContainers) { | ||
if (o1.hCDNContainers.hasOwnProperty(container)) { | ||
if (base === o1.hCDNContainers[container][uriProperty]) { | ||
found = true; | ||
break; | ||
} | ||
} | ||
} | ||
// Find the CDN container that has a matching base URI | ||
var search = {}; | ||
search[uriProperty] = base; | ||
var container = _.find(o1.aCDNContainers, search); | ||
// If we couldn't find a container, output a message | ||
if (!found) { | ||
if (!container) { | ||
o1._log('The container with URI ' + base + ' could not be found. Unable to get Cloudpath'); | ||
@@ -1011,3 +791,3 @@ return null; | ||
// Get the cloudpath | ||
var cloudpath = container + parts.pathname; | ||
var cloudpath = container.name + parts.pathname; | ||
return cloudpath; | ||
@@ -1014,0 +794,0 @@ }; |
@@ -5,3 +5,3 @@ { | ||
"keywords": ["api", "storage", "rackspace", "cloud", "cloudfiles"], | ||
"version": "0.1.0", | ||
"version": "0.1.1", | ||
"author": "Ross Johnson <ross@mazira.com>", | ||
@@ -18,14 +18,16 @@ "contributors": [], | ||
"engines": { | ||
"node": ">= 0.5.x" | ||
"node": ">= 0.8.x" | ||
}, | ||
"dependencies": { | ||
"request": ">= 2.9.100", | ||
"async": ">= 0.1.18", | ||
"mime": ">= 0.2.0" | ||
"lodash" : "2.4.1", | ||
"async": ">= 0.2.9", | ||
"mime": ">= 1.2.11", | ||
"pkgcloud": "0.8.17" | ||
}, | ||
"devDependencies": { | ||
"mocha": "1.5.0", | ||
"should": "1.2.0", | ||
"nock": "0.13.4" | ||
"mocha": "1.15.1", | ||
"should": "2.1.1", | ||
"nock": "0.25.0", | ||
"request": ">= 2.29.0" | ||
} | ||
} |
@@ -58,3 +58,4 @@ # Rackit | ||
prefix : 'dev', // The prefix for your Cloud Files containers (may contain forward slash) | ||
region : 'US', // Determines the API entry point - other option of 'UK' | ||
authRegion : 'US', // Specifies the authentication API entry point - other option of 'UK' | ||
region : '', // Specifies the geographic datacenter to prefer - defaults to the user's default region. Explicit options of 'ORD', 'DFW', 'HKG', 'LON', 'IAD', 'SYD' are accepted. | ||
tempURLKey : null, // A secret for generating temporary URLs | ||
@@ -138,5 +139,5 @@ useSNET : false, | ||
Copyright (C) 2012 Ross Johnson (ross@mazira.com) | ||
Copyright (C) 2014 Ross Johnson (ross@mazira.com) | ||
Copyright (C) 2012 Mazira, LLC | ||
Copyright (C) 2014 Mazira, LLC | ||
@@ -143,0 +144,0 @@ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: |
@@ -1,2 +0,4 @@ | ||
/*global require, __dirname, describe, it, before, beforeEach, after*/ | ||
/*global require, describe, it, before, beforeEach, after*/ // global functions | ||
/*global Error, Buffer*/ // global classes | ||
/*global __dirname*/ // global vars | ||
var | ||
@@ -10,12 +12,11 @@ // Node modules | ||
// Npm modules | ||
_ = require('lodash'), | ||
async = require('async'), | ||
should = require('should'), | ||
nock = require('nock'), | ||
request = require('request'); | ||
request = require('request'), | ||
var Rackit = require('../lib/main.js').Rackit; | ||
// Project modules | ||
Rackit = require('../lib/main.js').Rackit, | ||
CloudFilesMock = require('./cloudfiles.mock.js'); | ||
// Fake vars for our mock | ||
var clientOptions = Rackit.defaultOptions; | ||
var rackitOptions = { | ||
@@ -27,7 +28,2 @@ user : 'boopity', | ||
var mockOptions = { | ||
storage : 'https://storage.blablah.com/v1/blah', | ||
cdn : 'https://cdn.blablah.com/v1/blah', | ||
token : 'boopitybopitydadabop' | ||
}; | ||
@@ -40,2 +36,3 @@ // Info for the file we will upload | ||
testFile.data = fs.readFileSync(testFile.path, 'utf8'); | ||
testFile.size = Buffer.byteLength(testFile.data); | ||
@@ -46,33 +43,3 @@ /** | ||
*/ | ||
var superNock = { | ||
scopes : [], | ||
typicalResponse : function () { | ||
return this.auth().storage().CDN(); | ||
}, | ||
auth : function () { | ||
// Setup nock to respond to a good auth request, twice | ||
var path = url.parse(clientOptions.baseURIs[clientOptions.region]).pathname; | ||
var scope = nock(clientOptions.baseURIs[clientOptions.region]) | ||
.get(path) | ||
.matchHeader('X-Auth-User', rackitOptions.user) | ||
.matchHeader('X-Auth-Key', rackitOptions.key) | ||
.reply(204, 'No Content', { | ||
'x-storage-url' : mockOptions.storage, | ||
'x-cdn-management-url' : mockOptions.cdn, | ||
'x-auth-token' : mockOptions.token | ||
}); | ||
this.scopes.push(scope); | ||
return this; | ||
}, | ||
tempURL : function () { | ||
var path = url.parse(mockOptions.storage).pathname; | ||
var scope = nock(mockOptions.storage) | ||
.post(path) | ||
.matchHeader('X-Account-Meta-Temp-Url-Key', rackitOptions.tempURLKey) | ||
.reply(204, 'No Content'); | ||
this.scopes.push(scope); | ||
return this; | ||
}, | ||
var containers = { | ||
aContainers : [ | ||
@@ -98,15 +65,18 @@ { | ||
bytes : 2000, | ||
objects : [{ | ||
name : 'obj1', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2012-1-01T00:00:0.0' | ||
}, { | ||
name : 'obj2', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2012-1-01T00:00:0.0' | ||
}] | ||
objects : [ | ||
{ | ||
name : 'obj1', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2013-12-14T00:05:20.908090' | ||
}, | ||
{ | ||
name : 'obj2', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2013-12-14T00:05:20.908090' | ||
} | ||
] | ||
}, | ||
@@ -117,21 +87,25 @@ { | ||
bytes : 3000, | ||
objects : [{ | ||
name : 'obj1', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2012-1-01T00:00:0.0' | ||
}, { | ||
name : 'obj2', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2012-1-01T00:00:0.0' | ||
}, { | ||
name : 'obj3', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2012-1-01T00:00:0.0' | ||
}] | ||
objects : [ | ||
{ | ||
name : 'obj1', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2013-12-14T00:05:20.908090' | ||
}, | ||
{ | ||
name : 'obj2', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2013-12-14T00:05:20.908090' | ||
}, | ||
{ | ||
name : 'obj3', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2013-12-14T00:05:20.908090' | ||
} | ||
] | ||
}, | ||
@@ -142,15 +116,18 @@ { | ||
bytes : 2000, | ||
objects : [{ | ||
name : 'obj4', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2012-1-01T00:00:0.0' | ||
}, { | ||
name : 'obj5', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2012-1-01T00:00:0.0' | ||
}] | ||
objects : [ | ||
{ | ||
name : 'obj4', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2013-12-14T00:05:20.908090' | ||
}, | ||
{ | ||
name : 'obj5', | ||
hash : 'randomhash', | ||
bytes : 1000, | ||
content_type : 'application\/octet-stream', | ||
last_modified : '2013-12-14T00:05:20.908090' | ||
} | ||
] | ||
}, | ||
@@ -163,14 +140,4 @@ { | ||
], | ||
storage : function () { | ||
var path = url.parse(mockOptions.storage).pathname + '?format=json'; | ||
var scope = nock(mockOptions.storage) | ||
.get(path) | ||
.matchHeader('X-Auth-Token', mockOptions.token) | ||
.reply(200, JSON.stringify(this.aContainers)); | ||
this.scopes.push(scope); | ||
return this; | ||
}, | ||
aCDNContainers : [{ | ||
aCDNContainers : [ | ||
{ | ||
name : 'one', | ||
@@ -193,135 +160,11 @@ cdn_enabled : true, | ||
} | ||
], | ||
// Gets an array of containers matching a prefix | ||
getPrefixedContainers : function (prefix) { | ||
var container, containers = []; | ||
var i = this.aContainers.length; | ||
var reg = new RegExp('^' + prefix + '\\d+$'); | ||
] | ||
}; | ||
while (i--) { | ||
container = this.aContainers[i]; | ||
var superNock = new CloudFilesMock(rackitOptions, containers.aContainers, containers.aCDNContainers); | ||
// If the container doesn't have the prefix, skip it | ||
if (!container.name.match(reg)) | ||
continue; | ||
containers.push(container); | ||
} | ||
return containers; | ||
}, | ||
CDN : function () { | ||
var path = url.parse(mockOptions.cdn).pathname + '?format=json'; | ||
var scope = nock(mockOptions.cdn) | ||
.get(path) | ||
.matchHeader('X-Auth-Token', mockOptions.token) | ||
.reply(200, JSON.stringify(this.aCDNContainers)); | ||
this.scopes.push(scope); | ||
return this; | ||
}, | ||
add : function (container, data, type, chunked) { | ||
var path = url.parse(mockOptions.storage).pathname + '/' + container + '/filename'; | ||
var scope = nock(mockOptions.storage) | ||
.filteringPath(new RegExp(container + '/.*', 'g'), container + '/filename') | ||
.put(path, data) | ||
.matchHeader('X-Auth-Token', mockOptions.token) | ||
.matchHeader('Content-Type', type) | ||
.matchHeader('ETag', undefined); | ||
if (chunked) { | ||
scope.matchHeader('Transfer-Encoding', 'chunked'); | ||
} else { | ||
scope.matchHeader('Content-Length', '' + Buffer.byteLength(data)); | ||
} | ||
scope = scope.reply(201); | ||
this.scopes.push(scope); | ||
return this; | ||
}, | ||
get : function (cloudpath, data) { | ||
var path = url.parse(mockOptions.storage).pathname + '/' + cloudpath; | ||
var scope = nock(mockOptions.storage) | ||
.get(path) | ||
.reply(200, data); | ||
}, | ||
createContainer : function (container) { | ||
var path = url.parse(mockOptions.storage).pathname + '/' + container; | ||
var scope = nock(mockOptions.storage) | ||
.put(path) | ||
.matchHeader('X-Auth-Token', mockOptions.token) | ||
.reply(201); | ||
this.scopes.push(scope); | ||
return this; | ||
}, | ||
enableCDN : function (container) { | ||
var path = url.parse(mockOptions.cdn).pathname + '/' + container; | ||
var scope = nock(mockOptions.cdn) | ||
.put(path) | ||
.matchHeader('X-Auth-Token', mockOptions.token) | ||
.reply(201); | ||
this.scopes.push(scope); | ||
return this; | ||
}, | ||
list : function (prefix, limit) { | ||
var containers = this.getPrefixedContainers(prefix); | ||
var i = containers.length; | ||
var container, basepath, path, count, j, objects; | ||
var scope = nock(mockOptions.storage); | ||
// There may be more than one container with this prefix, and the client will be requesting from all | ||
while (i--) { | ||
container = containers[i]; | ||
// Skip containers that don't have the given prefix | ||
if (container.name.indexOf(prefix) !== 0) | ||
continue; | ||
basepath = url.parse(mockOptions.storage).pathname + '/' + container.name; | ||
basepath += '?format=json&limit=' + limit; | ||
// If the container has no objects, respond with 204. | ||
if (!container.objects || container.objects.length === 0) { | ||
scope.get(basepath).reply(204); | ||
continue; | ||
} | ||
// The client may have to make multiple requests to this container depending on the limit | ||
for (count = 0; count <= container.objects.length; count += limit) { | ||
path = basepath; | ||
// If count > 0, the client will be requesting with a marker item from last response | ||
if (count > 0) { | ||
path = basepath + '&marker=' + container.objects[count-1].name | ||
} | ||
// Generate an array of object data to reply with | ||
objects = []; | ||
for (j = count; j < count+limit && j < container.objects.length; j++) { | ||
objects.push(container.objects[j]); | ||
} | ||
scope.get(path).reply(200, JSON.stringify(objects)); | ||
} | ||
} | ||
this.scopes.push(scope); | ||
return this; | ||
}, | ||
allDone : function () { | ||
// Assert that all the scopes are done | ||
for ( var i = 0; i < this.scopes.length; i++ ) { | ||
this.scopes[i].done(); | ||
} | ||
// Clear all scopes | ||
this.scopes = []; | ||
} | ||
}; | ||
describe('Rackit', function () { | ||
describe('Constructor', function () { | ||
it('should have default options', function () { | ||
@@ -332,15 +175,19 @@ var rackit = new Rackit(); | ||
rackit.options.useCDN.should.equal(true); | ||
rackit.options.region.should.equal('US'); | ||
rackit.options.baseURIs[clientOptions.region].should.equal('https://auth.api.rackspacecloud.com/v1.0'); | ||
rackit.options.baseURIs['UK'].should.equal('https://lon.auth.api.rackspacecloud.com/v1.0'); | ||
rackit.options.region.should.equal(''); | ||
rackit.options.authRegion.should.equal('US'); | ||
rackit.options.authURIs['US'].should.equal('https://identity.api.rackspacecloud.com/v2.0'); | ||
rackit.options.authURIs['UK'].should.equal('https://lon.identity.api.rackspacecloud.com/v2.0'); | ||
}); | ||
it('should allow overriding of default options', function () { | ||
var rackit = new Rackit({ | ||
pre : 'dep', | ||
useCDN : false | ||
useCDN : false, | ||
region : 'LON' | ||
}); | ||
rackit.options.pre.should.equal('dep'); | ||
rackit.options.useCDN.should.equal(false); | ||
rackit.options.region.should.equal('LON'); | ||
// Check non-overridden options are still there | ||
rackit.options.region.should.equal('US'); | ||
rackit.options.authRegion.should.equal('US'); | ||
}); | ||
@@ -363,8 +210,11 @@ }); | ||
// Setup nock to respond to bad auth request | ||
var path = url.parse(clientOptions.baseURIs[clientOptions.region]).pathname; | ||
var scope = nock(clientOptions.baseURIs[clientOptions.region]).get(path).reply(401, 'Unauthorized'); | ||
var username = rackitOptions.user + 'blahblah'; | ||
var apiKey = rackitOptions.key + 'bloopidy'; | ||
superNock.auth(username, apiKey); | ||
var rackit = new Rackit({ | ||
user : rackitOptions.user + 'blahblah', | ||
key : rackitOptions.key + 'bloopidy' | ||
user : username, | ||
key : apiKey, | ||
tempURLKey : rackitOptions.tempURLKey | ||
}); | ||
@@ -374,3 +224,3 @@ rackit.init(function (err) { | ||
err.should.be.an['instanceof'](Error); | ||
scope.done(); | ||
superNock.allDone(); | ||
cb(); | ||
@@ -383,6 +233,3 @@ }); | ||
var rackit = new Rackit({ | ||
user : rackitOptions.user, | ||
key : rackitOptions.key | ||
}); | ||
var rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
@@ -396,9 +243,5 @@ should.not.exist(err); | ||
it('should set temp url key if provided', function (cb) { | ||
superNock.typicalResponse().tempURL(); | ||
superNock.typicalResponse(); | ||
var rackit = new Rackit({ | ||
user : rackitOptions.user, | ||
key : rackitOptions.key, | ||
tempURLKey : rackitOptions.tempURLKey | ||
}); | ||
var rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
@@ -414,6 +257,3 @@ should.not.exist(err); | ||
var rackit = new Rackit({ | ||
user : rackitOptions.user, | ||
key : rackitOptions.key | ||
}); | ||
var rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
@@ -427,3 +267,5 @@ var i; | ||
for (i = 0; i < superNock.aContainers.length; i++) { | ||
rackit.hContainers.should.have.ownProperty(superNock.aContainers[i].name); | ||
rackit.aContainers[i].should.have.property('name', superNock.aContainers[i].name); | ||
rackit.aContainers[i].should.have.property('count', superNock.aContainers[i].count); | ||
rackit.aContainers[i].should.have.property('bytes', superNock.aContainers[i].bytes); | ||
} | ||
@@ -434,3 +276,5 @@ | ||
for (i = 0; i < superNock.aCDNContainers.length; i++) { | ||
rackit.hCDNContainers.should.have.ownProperty(superNock.aCDNContainers[i].name); | ||
rackit.aCDNContainers[i].should.have.property('name', superNock.aCDNContainers[i].name); | ||
rackit.aCDNContainers[i].should.have.property('cdnUri', superNock.aCDNContainers[i].cdn_uri); | ||
rackit.aCDNContainers[i].should.have.property('cdnSslUri', superNock.aCDNContainers[i].cdn_ssl_uri); | ||
} | ||
@@ -445,3 +289,12 @@ | ||
describe('#_getPrefixedContainers', function() { | ||
describe('improper initialization', function () { | ||
it('should throw an error if attempting to call certain methods before init()', function () { | ||
var rackit = new Rackit(rackitOptions); | ||
(function () { | ||
rackit.add(testFile.path); | ||
}).should.throw(/^Attempting to use/); | ||
}); | ||
}); | ||
describe('#_getPrefixedContainers', function () { | ||
var rackit; | ||
@@ -452,6 +305,3 @@ | ||
superNock.typicalResponse(); | ||
rackit = new Rackit({ | ||
user : rackitOptions.user, | ||
key : rackitOptions.key | ||
}); | ||
rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
@@ -463,8 +313,10 @@ superNock.allDone(); | ||
it('should return an empty array if no prefixed containers have been made', function() { | ||
it('should return an empty array if no prefixed containers have been made', function () { | ||
// Hack some data into Rackit | ||
rackit.options.prefix = 'nonexistent'; | ||
rackit.aContainers = [{ | ||
name: 'existent' | ||
}]; | ||
rackit.aContainers = [ | ||
{ | ||
name : 'existent' | ||
} | ||
]; | ||
@@ -474,34 +326,53 @@ rackit._getPrefixedContainers().should.have.length(0); | ||
it('should return a sorted array of prefixed containers', function() { | ||
it('should return a sorted array of prefixed containers', function () { | ||
// Hack some data into Rackit | ||
rackit.options.prefix = 'existent'; | ||
rackit.aContainers = [{ | ||
name: 'blah0' | ||
}, { | ||
name: 'existent2' | ||
}, { | ||
name: 'existent3' | ||
}, { | ||
name: 'existent0' | ||
}]; | ||
var containers = [ | ||
{ | ||
name : 'blah0' | ||
}, | ||
{ | ||
name : 'existent2' | ||
}, | ||
{ | ||
name : 'existent3' | ||
}, | ||
{ | ||
name : 'existent0' | ||
} | ||
]; | ||
rackit._getPrefixedContainers().should.eql(['existent0', 'existent2', 'existent3']); | ||
var aContainers = rackit._getPrefixedContainers(containers); | ||
aContainers.should.have.length(3); | ||
aContainers[0].should.eql(containers[3]); | ||
aContainers[1].should.eql(containers[1]); | ||
aContainers[2].should.eql(containers[2]); | ||
}); | ||
it('should not include containers with a matching sub-prefix', function() { | ||
it('should not include containers with a matching sub-prefix', function () { | ||
// Hack some data into Rackit | ||
rackit.options.prefix = 'existent'; | ||
rackit.aContainers = [{ | ||
name: 'blah0' | ||
}, { | ||
name: 'existent2' | ||
}, { | ||
name: 'existent3' | ||
}, { | ||
name: 'existent0' | ||
}, { | ||
name: 'existenter0' | ||
}]; | ||
var containers = [ | ||
{ | ||
name : 'blah0' | ||
}, | ||
{ | ||
name : 'existent2' | ||
}, | ||
{ | ||
name : 'existent3' | ||
}, | ||
{ | ||
name : 'existent0' | ||
}, | ||
{ | ||
name : 'existenter0' | ||
} | ||
]; | ||
rackit._getPrefixedContainers().should.eql(['existent0', 'existent2', 'existent3']); | ||
var aContainers = rackit._getPrefixedContainers(containers); | ||
aContainers.should.have.length(3); | ||
aContainers[0].should.eql(containers[3]); | ||
aContainers[1].should.eql(containers[1]); | ||
aContainers[2].should.eql(containers[2]); | ||
}); | ||
@@ -518,16 +389,12 @@ }); | ||
// Get the prefix | ||
var prefix = container.replace(/\d+$/, ''); | ||
rackit.options.prefix = container.replace(/\d+$/, ''); | ||
rackit.options.prefix = prefix; | ||
// Assert that the container exists, and is not to capacity | ||
rackit.hContainers.should.have.property(container); | ||
var count = rackit.hContainers[container].count; | ||
count.should.be.below(50000); | ||
return count; | ||
var _container = _.find(rackit.aContainers, { name : container }); | ||
_container.count.should.be.below(50000); | ||
return _container.count; | ||
} | ||
// Asserts that a successful file upload occured. | ||
function assertAdd(container, count, cb) { | ||
function assertAdd(sContainer, count, cb) { | ||
return function (err, cloudpath) { | ||
@@ -543,9 +410,10 @@ if (err) { | ||
// Assert the container exists | ||
rackit.hContainers.should.have.property(container); | ||
var container = _.find(rackit.aContainers, { name : sContainer }); | ||
should.exist(container); | ||
// Assert the file was added to the expected container | ||
cloudpath.split('/')[0].should.equal(container); | ||
cloudpath.split('/')[0].should.equal(sContainer); | ||
// Assert the containers file count is as expected | ||
rackit.hContainers[container].count.should.equal(count); | ||
container.count.should.equal(count); | ||
@@ -559,8 +427,5 @@ // Execute the callback for additonal asserts | ||
beforeEach(function (cb) { | ||
superNock.typicalResponse().tempURL(); | ||
rackit = new Rackit({ | ||
user : rackitOptions.user, | ||
key : rackitOptions.key, | ||
tempURLKey : rackitOptions.tempURLKey | ||
}); | ||
superNock.typicalResponse(); | ||
rackit = new Rackit(rackitOptions); | ||
rackit.options.prefix = 'empty'; | ||
rackit.init(function (err) { | ||
@@ -572,3 +437,3 @@ superNock.allDone(); | ||
describe('local file upload (string param)', function() { | ||
describe('local file upload (string param)', function () { | ||
@@ -596,3 +461,3 @@ it('should return an error if the file does not exist', function (cb) { | ||
// Perform the actual test | ||
superNock.add(container, testFile.data, testFile.type); | ||
superNock.add(container, testFile.data, testFile.type, testFile.size); | ||
rackit.add(testFile.path, assertAdd(container, count + 1, cb)); | ||
@@ -606,4 +471,4 @@ }); | ||
var type = 'text/mytype'; | ||
superNock.add(container, testFile.data, type); | ||
rackit.add(testFile.path, { type: type }, assertAdd(container, count + 1, cb)); | ||
superNock.add(container, testFile.data, type, testFile.size); | ||
rackit.add(testFile.path, { type : type }, assertAdd(container, count + 1, cb)); | ||
}); | ||
@@ -613,3 +478,3 @@ | ||
describe('streaming upload (ReadableStream param)', function() { | ||
describe('streaming upload (ReadableStream param)', function () { | ||
@@ -628,3 +493,3 @@ it('should return an error if the stream is not readable', function (cb) { | ||
it('should return an error if no type is specified (and no content-type header)', function(cb) { | ||
it('should return an error if no type is specified (and no content-type header)', function (cb) { | ||
var stream = fs.createReadStream(testFile.path); | ||
@@ -644,4 +509,4 @@ rackit.add(stream, function (err, cloudpath) { | ||
var stream = fs.createReadStream(testFile.path); | ||
superNock.add(container, testFile.data, testFile.type, true); | ||
rackit.add(stream, {type: testFile.type}, assertAdd(container, count + 1, cb)); | ||
superNock.add(container, testFile.data, testFile.type); | ||
rackit.add(stream, {type : testFile.type}, assertAdd(container, count + 1, cb)); | ||
}); | ||
@@ -653,6 +518,6 @@ | ||
superNock.add(container, testFile.data, testFile.type, true); | ||
superNock.add(container, testFile.data, testFile.type); | ||
// Set up the small server that will forward the request to Rackit | ||
var server = http.createServer(function(req, res) { | ||
var server = http.createServer(function (req, res) { | ||
rackit.add(req, assertAdd(container, count + 1, cb)); | ||
@@ -665,4 +530,4 @@ server.close(); | ||
uri : 'http://localhost:7357', | ||
headers: { | ||
'content-type': 'text/plain' | ||
headers : { | ||
'content-type' : 'text/plain' | ||
} | ||
@@ -678,6 +543,6 @@ }); | ||
superNock.add(container, testFile.data, testFile.type, false); | ||
superNock.add(container, testFile.data, testFile.type, ''+testFile.size); | ||
// Set up the small server that will forward the request to Rackit | ||
var server = http.createServer(function(req, res) { | ||
var server = http.createServer(function (req, res) { | ||
rackit.add(req, assertAdd(container, count + 1, cb)); | ||
@@ -690,5 +555,5 @@ server.close(); | ||
uri : 'http://localhost:7357', | ||
headers: { | ||
'content-type': 'text/plain', | ||
'content-length': '' + Buffer.byteLength(testFile.data) | ||
headers : { | ||
'content-type' : 'text/plain', | ||
'content-length' : '' + Buffer.byteLength(testFile.data) | ||
} | ||
@@ -704,6 +569,6 @@ }); | ||
superNock.add(container, testFile.data, testFile.type, true); | ||
superNock.add(container, testFile.data, testFile.type); | ||
// Set up the small server that will forward the request to Rackit | ||
var server = http.createServer(function(req, res) { | ||
var server = http.createServer(function (req, res) { | ||
rackit.add(req, assertAdd(container, count + 1, cb)); | ||
@@ -716,4 +581,4 @@ server.close(); | ||
uri : 'http://localhost:7357', | ||
headers: { | ||
'content-type': 'text/plain', | ||
headers : { | ||
'content-type' : 'text/plain', | ||
'etag' : 'somehashvalue234' | ||
@@ -731,6 +596,6 @@ } | ||
superNock.add(container, testFile.data, type, true); | ||
superNock.add(container, testFile.data, type); | ||
// Set up the small server that will forward the request to Rackit | ||
var server = http.createServer(function(req, res) { | ||
var server = http.createServer(function (req, res) { | ||
rackit.add(req, {type : type}, assertAdd(container, count + 1, cb)); | ||
@@ -743,4 +608,4 @@ server.close(); | ||
uri : 'http://localhost:7357', | ||
headers: { | ||
'content-type': 'text/plain' | ||
headers : { | ||
'content-type' : 'text/plain' | ||
} | ||
@@ -757,2 +622,3 @@ }); | ||
it('should create a prefixed, non-CDN container when none exist', function (cb) { | ||
var prefix = 'new'; | ||
@@ -765,3 +631,3 @@ var container = prefix + '0'; | ||
// Assert that the container does not exist | ||
rackit.hContainers.should.not.have.property(container); | ||
should.not.exist(_.find(rackit.aContainers, { name : container })); | ||
@@ -771,7 +637,8 @@ // Add on the mock for the add request | ||
.createContainer(container) | ||
.add(container, testFile.data, testFile.type); | ||
.add(container, testFile.data, testFile.type, testFile.size); | ||
rackit.add(testFile.path, assertAdd(container, 1, function () { | ||
// Assert the container is not CDN enabled | ||
rackit.hCDNContainers.should.not.have.property(container); | ||
var _container = _.find(rackit.aCDNContainers, { name : container }); | ||
should.not.exist(_container); | ||
cb(); | ||
@@ -789,3 +656,3 @@ })); | ||
// Assert that the container does not exist | ||
rackit.hContainers.should.not.have.property(container); | ||
should.not.exist(_.find(rackit.aContainers, { name : container })); | ||
@@ -795,7 +662,8 @@ // Add on the mock for the add request | ||
.createContainer(container) | ||
.add(container, testFile.data, testFile.type); | ||
.add(container, testFile.data, testFile.type, testFile.size); | ||
rackit.add(testFile.path, assertAdd(container, 1, function () { | ||
// Assert the container is not CDN enabled | ||
rackit.hCDNContainers.should.not.have.property(container); | ||
var _container = _.find(rackit.aCDNContainers, { name : container }); | ||
should.not.exist(_container); | ||
cb(); | ||
@@ -817,3 +685,3 @@ })); | ||
// Assert that the container does not exist | ||
rackit.hContainers.should.not.have.property(container); | ||
should.not.exist(_.find(rackit.aContainers, { name : container })); | ||
@@ -824,7 +692,8 @@ // Add on the mock for the add request | ||
.enableCDN(container) | ||
.add(container, testFile.data, testFile.type); | ||
.add(container, testFile.data, testFile.type, testFile.size); | ||
rackit.add(testFile.path, assertAdd(container, 1, function () { | ||
// Assert the container is CDN enabled | ||
rackit.hCDNContainers.should.have.property(container); | ||
var _container = _.find(rackit.aCDNContainers, { name : container }); | ||
should.exist(_container); | ||
cb(); | ||
@@ -842,3 +711,3 @@ })); | ||
// Assert that the container does not exist | ||
rackit.hContainers.should.not.have.property(container); | ||
should.not.exist(_.find(rackit.aContainers, { name : container })); | ||
@@ -849,7 +718,8 @@ // Add on the mock for the add request | ||
.enableCDN(container) | ||
.add(container, testFile.data, testFile.type); | ||
.add(container, testFile.data, testFile.type, testFile.size); | ||
rackit.add(testFile.path, assertAdd(container, 1, function () { | ||
// Assert the container is CDN enabled | ||
rackit.hCDNContainers.should.have.property(container); | ||
var _container = _.find(rackit.aCDNContainers, { name : container }); | ||
should.exist(_container); | ||
cb(); | ||
@@ -860,3 +730,3 @@ })); | ||
describe('automatic container creation - concurrent operations', function (cb) { | ||
describe('automatic container creation - concurrent operations', function () { | ||
@@ -871,3 +741,3 @@ it('parallel operations should produce one new container when none exist', function (cb) { | ||
// Assert that the container does not exist | ||
rackit.hContainers.should.not.have.property(container); | ||
should.not.exist(_.find(rackit.aContainers, { name : container })); | ||
@@ -878,4 +748,4 @@ // Setup the nock with two add operations | ||
.createContainer(container) | ||
.add(container, testFile.data, testFile.type) | ||
.add(container, testFile.data, testFile.type); | ||
.add(container, testFile.data, testFile.type, testFile.size) | ||
.add(container, testFile.data, testFile.type, testFile.size); | ||
@@ -897,6 +767,7 @@ // Upload two files in parallel | ||
// Assert the container was created | ||
rackit.hContainers.should.have.property(container); | ||
var _container = _.find(rackit.aContainers, { name : container }); | ||
should.exist(_container); | ||
// Assert the container count | ||
rackit.hContainers[container].count.should.equal(2); | ||
_container.count.should.equal(2); | ||
@@ -919,3 +790,3 @@ // Assert the file was added to the expected container | ||
// Assert that the container does not exist | ||
rackit.hContainers.should.not.have.property(container); | ||
should.not.exist(_.find(rackit.aContainers, { name : container })); | ||
@@ -926,4 +797,4 @@ // Setup the nock with two add operations | ||
.createContainer(container) | ||
.add(container, testFile.data, testFile.type) | ||
.add(container, testFile.data, testFile.type); | ||
.add(container, testFile.data, testFile.type, testFile.size) | ||
.add(container, testFile.data, testFile.type, testFile.size); | ||
@@ -945,6 +816,7 @@ // Upload two files in parallel | ||
// Assert the container was created | ||
rackit.hContainers.should.have.property(container); | ||
var _container = _.find(rackit.aContainers, { name : container }); | ||
should.exist(_container); | ||
// Assert the container count | ||
rackit.hContainers[container].count.should.equal(2); | ||
_container.count.should.equal(2); | ||
@@ -966,6 +838,3 @@ // Assert the file was added to the expected container | ||
superNock.typicalResponse(); | ||
rackit = new Rackit({ | ||
user : rackitOptions.user, | ||
key : rackitOptions.key | ||
}); | ||
rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
@@ -979,3 +848,2 @@ superNock.allDone(); | ||
var cloudpath = 'container/file'; | ||
var filepath = __dirname + '/tempfile.txt'; | ||
superNock.get(cloudpath, testFile.data); | ||
@@ -987,7 +855,7 @@ | ||
var data = ''; | ||
stream.on('data', function(chunk) { | ||
stream.on('data', function (chunk) { | ||
data += chunk; | ||
}); | ||
stream.on('end', function() { | ||
stream.on('end', function () { | ||
superNock.allDone(); | ||
@@ -1005,5 +873,7 @@ data.should.equal(testFile.data); | ||
// Get the file | ||
rackit.get(cloudpath, filepath, function(err) { | ||
rackit.get(cloudpath, filepath, function (err) { | ||
should.not.exist(err); | ||
// Test the data | ||
fs.readFile(filepath, 'utf8', function(err, data) { | ||
fs.readFile(filepath, 'utf8', function (err, data) { | ||
should.not.exist(err); | ||
data.should.equal(testFile.data); | ||
@@ -1016,12 +886,128 @@ fs.unlink(filepath, cb); | ||
describe('#getCloudpath', function () { | ||
describe('#remove', function () { | ||
var rackit; | ||
beforeEach(function (cb) { | ||
superNock.typicalResponse(); | ||
rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
superNock.allDone(); | ||
cb(err); | ||
}); | ||
}); | ||
it('should send a "delete" request to Cloud Files', function (cb) { | ||
var cloudpath = 'multiple0/obj2'; | ||
superNock.remove(cloudpath, 204); | ||
// Get the file | ||
rackit.remove(cloudpath, function (err) { | ||
superNock.allDone(); | ||
should.not.exist(err); | ||
cb(); | ||
}); | ||
}); | ||
it('should decrement the internal container count by 1', function (cb) { | ||
var cloudpath = 'multiple0/obj2'; | ||
superNock.remove(cloudpath, 204); | ||
// Get the file | ||
rackit.remove(cloudpath, function (err) { | ||
superNock.allDone(); | ||
should.not.exist(err); | ||
var _container = _.find(rackit.aContainers, { name : 'multiple0' }); | ||
_container.count.should.equal(2); | ||
cb(); | ||
}); | ||
}); | ||
it('should return an error if file does not exist', function (cb) { | ||
var cloudpath = 'multiple0/objFake'; | ||
superNock.remove(cloudpath, 404); | ||
// Get the file | ||
rackit.remove(cloudpath, function (err) { | ||
superNock.allDone(); | ||
should.exist(err); | ||
err.should.be.an.instanceOf(Error); | ||
cb(); | ||
}); | ||
}); | ||
it('should not decrement the internal container count if file does not exist', function (cb) { | ||
var cloudpath = 'multiple0/objFake'; | ||
superNock.remove(cloudpath, 404); | ||
// Get the file | ||
rackit.remove(cloudpath, function (err) { | ||
superNock.allDone(); | ||
var _container = _.find(rackit.aContainers, { name : 'multiple0' }); | ||
_container.count.should.equal(3); | ||
cb(); | ||
}); | ||
}); | ||
}); | ||
describe('#setMeta', function () { | ||
var rackit; | ||
before(function (cb) { | ||
superNock.typicalResponse().tempURL(); | ||
rackit = new Rackit({ | ||
user : rackitOptions.user, | ||
key : rackitOptions.key, | ||
tempURLKey : rackitOptions.tempURLKey | ||
superNock.typicalResponse(); | ||
rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
superNock.allDone(); | ||
cb(err); | ||
}); | ||
}); | ||
it('should return an error if file does not exist', function (cb) { | ||
var cloudpath = 'multiple0/objFake'; | ||
superNock.post(cloudpath, 404); | ||
rackit.setMeta(cloudpath, {}, function (err) { | ||
superNock.allDone(); | ||
should.exist(err); | ||
err.should.be.an.instanceOf(Error); | ||
cb(); | ||
}); | ||
}); | ||
it('should post fields as prefixed headers', function (cb) { | ||
var cloudpath = 'multiple0/obj1'; | ||
superNock.post(cloudpath, 202, { | ||
'X-Object-Meta-meatOne' : 'Bacon1', | ||
'X-Object-Meta-MeatTwo' : 'Bacon2 yum yum!' | ||
}); | ||
rackit.setMeta(cloudpath, { | ||
meatOne : 'Bacon1', | ||
MeatTwo : 'Bacon2 yum yum!' | ||
}, function (err) { | ||
superNock.allDone(); | ||
should.not.exist(err); | ||
cb(); | ||
}); | ||
}); | ||
it('should post with no headers if empty object given', function (cb) { | ||
var cloudpath = 'multiple0/obj1'; | ||
superNock.post(cloudpath, 202); | ||
rackit.setMeta(cloudpath, {}, function (err) { | ||
superNock.allDone(); | ||
should.not.exist(err); | ||
cb(); | ||
}); | ||
}); | ||
}); | ||
describe('#getMeta', function () { | ||
var rackit; | ||
before(function (cb) { | ||
superNock.typicalResponse(); | ||
rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
@@ -1033,2 +1019,59 @@ superNock.allDone(); | ||
it('should return an error if file does not exist', function (cb) { | ||
var cloudpath = 'multiple0/objFake'; | ||
superNock.head(cloudpath, 404); | ||
rackit.getMeta(cloudpath, function (err, metadata) { | ||
superNock.allDone(); | ||
should.exist(err); | ||
err.should.be.an.instanceOf(Error); | ||
cb(); | ||
}); | ||
}); | ||
it('should return hash of metadata fields', function (cb) { | ||
var cloudpath = 'multiple0/obj1'; | ||
superNock.head(cloudpath, 200, { | ||
'X-Object-Meta-meatOne' : 'Bacon1', | ||
'X-Object-Meta-MeatTwo' : 'Bacon2 yum yum!' | ||
}); | ||
rackit.getMeta(cloudpath, function (err, metadata) { | ||
superNock.allDone(); | ||
should.not.exist(err); | ||
should.exist(metadata); | ||
metadata.should.eql({ | ||
meatone : 'Bacon1', | ||
meattwo : 'Bacon2 yum yum!' | ||
}); | ||
cb(); | ||
}); | ||
}); | ||
it('should return empty object in case of no metadata', function (cb) { | ||
var cloudpath = 'multiple0/obj1'; | ||
superNock.head(cloudpath, 200); | ||
rackit.getMeta(cloudpath, function (err, metadata) { | ||
superNock.allDone(); | ||
should.not.exist(err); | ||
should.exist(metadata); | ||
metadata.should.eql({}); | ||
cb(); | ||
}); | ||
}); | ||
}); | ||
describe('#getCloudpath', function () { | ||
var rackit; | ||
before(function (cb) { | ||
superNock.typicalResponse(); | ||
rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
superNock.allDone(); | ||
cb(err); | ||
}); | ||
}); | ||
it('should return null when given URI from container that does not exist', function () { | ||
@@ -1094,6 +1137,3 @@ should.not.exist(rackit.getCloudpath('http://not.a.real.cdn.container.uri.rackcdn.com/nofile')); | ||
superNock.typicalResponse(); | ||
rackit = new Rackit({ | ||
user : rackitOptions.user, | ||
key : rackitOptions.key | ||
}); | ||
rackit = new Rackit(rackitOptions); | ||
rackit.init(function (err) { | ||
@@ -1106,6 +1146,5 @@ superNock.allDone(); | ||
// Gets all of the object cloudpaths belonging to the given containers. This function gets the objects | ||
// from the mock (the "actual" data store) for validation of what Rackit gives | ||
function getObjects (containers) { | ||
function getObjects(containers) { | ||
var i, j, container, object, objects = []; | ||
@@ -1127,6 +1166,6 @@ | ||
name : object.name, | ||
hash : object.hash, | ||
etag : object.hash, | ||
bytes : object.bytes, | ||
content_type : object.content_type, | ||
last_modified : object.last_modified | ||
contentType : object.content_type, | ||
lastModified : new Date(object.last_modified) | ||
}); | ||
@@ -1138,3 +1177,3 @@ } | ||
function getObjectCloudpaths (objects) { | ||
function getObjectCloudpaths(objects) { | ||
var i = objects.length; | ||
@@ -1157,3 +1196,3 @@ while (i--) | ||
// Call Rackits list method | ||
rackit.list(function(err, list) { | ||
rackit.list(function (err, list) { | ||
superNock.allDone(); | ||
@@ -1255,3 +1294,3 @@ should.not.exist(err); | ||
// Call Rackits list method | ||
rackit.list({ extended : true }, function(err, list) { | ||
rackit.list({ extended : true }, function (err, list) { | ||
superNock.allDone(); | ||
@@ -1266,3 +1305,6 @@ should.not.exist(err); | ||
for (var i = 0; i < objects.length; i++) { | ||
list.should.includeEql(objects[i]); | ||
for (var p in objects[i]) { | ||
list[i].should.have.property(p); | ||
list[i][p].should.eql(objects[i][p]); | ||
} | ||
} | ||
@@ -1269,0 +1311,0 @@ |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
78756
12
2132
146
4
4
+ Addedlodash@2.4.1
+ Addedpkgcloud@0.8.17
+ Addedasn1@0.1.11(transitive)
+ Addedassert-plus@0.1.5(transitive)
+ Addedasync@0.1.220.2.100.9.2(transitive)
+ Addedaws-sign@0.3.0(transitive)
+ Addedbalanced-match@1.0.2(transitive)
+ Addedboom@0.4.2(transitive)
+ Addedbrace-expansion@1.1.11(transitive)
+ Addedcombined-stream@0.0.7(transitive)
+ Addedconcat-map@0.0.1(transitive)
+ Addedcookie-jar@0.3.0(transitive)
+ Addedcryptiles@0.2.2(transitive)
+ Addedctype@0.5.3(transitive)
+ Addeddeep-equal@0.2.2(transitive)
+ Addeddelayed-stream@0.0.5(transitive)
+ Addederrs@0.2.4(transitive)
+ Addedeventemitter2@0.4.14(transitive)
+ Addedfiled@0.0.7(transitive)
+ Addedforever-agent@0.5.2(transitive)
+ Addedform-data@0.0.8(transitive)
+ Addedfs.realpath@1.0.0(transitive)
+ Addedglob@7.2.3(transitive)
+ Addedhawk@0.13.1(transitive)
+ Addedhoek@0.8.50.9.1(transitive)
+ Addedhttp-signature@0.10.1(transitive)
+ Addedi@0.3.7(transitive)
+ Addedinflight@1.0.6(transitive)
+ Addedinherits@2.0.4(transitive)
+ Addedip@0.0.5(transitive)
+ Addedjson-stringify-safe@4.0.0(transitive)
+ Addedlodash@2.4.1(transitive)
+ Addedmime@1.2.11(transitive)
+ Addedminimatch@3.1.2(transitive)
+ Addedminimist@1.2.8(transitive)
+ Addedmkdirp@0.5.6(transitive)
+ Addedncp@1.0.1(transitive)
+ Addednode-uuid@1.4.8(transitive)
+ Addedoauth-sign@0.3.0(transitive)
+ Addedonce@1.4.0(transitive)
+ Addedpath-is-absolute@1.0.1(transitive)
+ Addedpkgcloud@0.8.17(transitive)
+ Addedpkginfo@0.2.3(transitive)
+ Addedqs@0.6.6(transitive)
+ Addedrequest@2.22.0(transitive)
+ Addedrimraf@2.7.1(transitive)
+ Addedsax@1.4.1(transitive)
+ Addedsntp@0.2.4(transitive)
+ Addedthrough@2.3.8(transitive)
+ Addedtunnel-agent@0.3.0(transitive)
+ Addedunderscore@1.4.4(transitive)
+ Addedurl-join@0.0.1(transitive)
+ Addedutile@0.3.0(transitive)
+ Addedwrappy@1.0.2(transitive)
+ Addedxml2js@0.1.14(transitive)
- Removedrequest@>= 2.9.100
- Removedajv@6.12.6(transitive)
- Removedasn1@0.2.6(transitive)
- Removedassert-plus@1.0.0(transitive)
- Removedasynckit@0.4.0(transitive)
- Removedaws-sign2@0.7.0(transitive)
- Removedaws4@1.13.2(transitive)
- Removedbcrypt-pbkdf@1.0.2(transitive)
- Removedcaseless@0.12.0(transitive)
- Removedcombined-stream@1.0.8(transitive)
- Removedcore-util-is@1.0.2(transitive)
- Removeddashdash@1.14.1(transitive)
- Removeddelayed-stream@1.0.0(transitive)
- Removedecc-jsbn@0.1.2(transitive)
- Removedextend@3.0.2(transitive)
- Removedextsprintf@1.3.0(transitive)
- Removedfast-deep-equal@3.1.3(transitive)
- Removedfast-json-stable-stringify@2.1.0(transitive)
- Removedforever-agent@0.6.1(transitive)
- Removedform-data@2.3.3(transitive)
- Removedgetpass@0.1.7(transitive)
- Removedhar-schema@2.0.0(transitive)
- Removedhar-validator@5.1.5(transitive)
- Removedhttp-signature@1.2.0(transitive)
- Removedis-typedarray@1.0.0(transitive)
- Removedisstream@0.1.2(transitive)
- Removedjsbn@0.1.1(transitive)
- Removedjson-schema@0.4.0(transitive)
- Removedjson-schema-traverse@0.4.1(transitive)
- Removedjson-stringify-safe@5.0.1(transitive)
- Removedjsprim@1.4.2(transitive)
- Removedmime-db@1.52.0(transitive)
- Removedmime-types@2.1.35(transitive)
- Removedoauth-sign@0.9.0(transitive)
- Removedperformance-now@2.1.0(transitive)
- Removedpsl@1.13.0(transitive)
- Removedpunycode@2.3.1(transitive)
- Removedqs@6.5.3(transitive)
- Removedrequest@2.88.2(transitive)
- Removedsafe-buffer@5.2.1(transitive)
- Removedsafer-buffer@2.1.2(transitive)
- Removedsshpk@1.18.0(transitive)
- Removedtough-cookie@2.5.0(transitive)
- Removedtunnel-agent@0.6.0(transitive)
- Removedtweetnacl@0.14.5(transitive)
- Removeduri-js@4.4.1(transitive)
- Removeduuid@3.4.0(transitive)
- Removedverror@1.10.0(transitive)
Updatedasync@>= 0.2.9
Updatedmime@>= 1.2.11