Comparing version 0.9.9 to 0.9.10
@@ -38,3 +38,3 @@ 'use strict'; | ||
const fullPath = env.diskStorageUri(request); | ||
const fullPath = env.diskStorageUri(request.id); | ||
const readStream = fs.createReadStream(fullPath, { | ||
@@ -61,3 +61,3 @@ flags: 'r', | ||
} else { | ||
req(this._createRequestHeader(env.webStorageUri(request), range)) | ||
req(this._createRequestHeader(env.webStorageUri(request.id), range)) | ||
.on('response', (staticResponse) => { | ||
@@ -64,0 +64,0 @@ response.addHttpProperty(N.CONTENT_LENGTH, staticResponse.headers[N.CONTENT_LENGTH]); |
@@ -29,2 +29,3 @@ 'use strict'; | ||
response.addHttpProperty(N.CONTENT_LENGTH, response.proxy.original.size); | ||
response.addHttpProperty(N.COPY_STATUS, response.proxy.original.copyStatus); | ||
res.set(response.httpProps); | ||
@@ -31,0 +32,0 @@ res.status(200).send(); |
@@ -65,4 +65,2 @@ 'use strict'; | ||
app.use(`/blobs`, express.static(env.localStoragePath)); | ||
app.use(`/blobs/${env.virtualDirUri}`, express.static(env.virtualDirPath)); | ||
app.use(`/blobs/${env.snapshotUri}`, express.static(env.snapshotPath)); | ||
require('./routes/AccountRoute')(app); | ||
@@ -69,0 +67,0 @@ require('./routes/ContainerRoute')(app); |
@@ -22,8 +22,3 @@ 'use strict'; | ||
this.dbName = '__azurite_db__.json'; | ||
this.virtualDirUri = 'virtualdirs'; | ||
this.snapshotUri = 'snapshots'; | ||
this.localStoragePath = path.join(this.azuriteRootPath, '__blobstorage__'); | ||
this.commitsPath = path.join(this.azuriteRootPath, '__commits__'); | ||
this.virtualDirPath = path.join(this.azuriteRootPath, '__virtualdirs__'); | ||
this.snapshotPath = path.join(this.azuriteRootPath, '__snapshots__'); | ||
this.azuriteDBPath = path.join(this.azuriteRootPath, this.dbName); | ||
@@ -33,5 +28,2 @@ this.emulatedStorageAccountName = 'devstoreaccount1'; | ||
return fs.mkdirsAsync(this.localStoragePath) | ||
.then(() => { | ||
return fs.mkdirsAsync(this.commitsPath); | ||
}) | ||
} | ||
@@ -43,57 +35,40 @@ | ||
* | ||
* @param {any} request | ||
* @param {string} id of the blob | ||
* | ||
* @memberof Environment | ||
* */ | ||
webStorageUri(request) { | ||
return (request.isVirtualDirectory() && !request.isSnapshot()) | ||
? `http://localhost:${this.port}/blobs/${this.virtualDirUri}/${request.containerName}/${request.publicName()}` | ||
: (request.isSnapshot()) | ||
? `http://localhost:${this.port}/blobs/${this.snapshotUri}/${request.containerName}/${request.publicName()}` | ||
: `http://localhost:${this.port}/blobs/${request.containerName}/${request.publicName()}`; | ||
webStorageUri(id) { | ||
return `http://localhost:${this.port}/blobs/${id}`; | ||
} | ||
/** | ||
* Based on the request it creates the full path to the location of a blob on disk. | ||
* Creates the full path to the location of a blob on disk based on its ID. | ||
* | ||
* Virtual directories are stored in a special folder that is not accessible through the Standard REST API. | ||
* This is to make sure that not special characters or words need to be reserved in the regular blob workspace. | ||
* Since virtual directories contain trailing slashes (which are invalid filename characters) we store the | ||
* Base64 representation on disk. | ||
* | ||
* Snapshots are also stored in a special folder since we encode the snapshot date into the name as <blobname>-<snapshotId>. | ||
* | ||
* @param {AzuriteBlobRequest} request | ||
* @param {any} parent if defined the path to the parent (block blobs) blob or origin (snapshot) of the block is returned in any case | ||
* @returns Full path on disk | ||
* | ||
* @param {any} id | ||
* @returns full path to blob on disk | ||
* @memberof Environment | ||
*/ | ||
diskStorageUri(request, parent) { | ||
let containerPath; | ||
if (request.isVirtualDirectory()) { | ||
containerPath = path.join(this.virtualDirPath, request.containerName); | ||
} else if (request.isSnapshot() && parent === true) { | ||
containerPath = path.join(this.localStoragePath, request.containerName); | ||
} else if (request.isSnapshot()) { | ||
containerPath = path.join(this.snapshotPath, request.containerName); | ||
} else if (request.blockId && parent === undefined) { | ||
const blockPath = path.join(this.commitsPath, request.blockName); | ||
return utils.escapeBlobDelimiter(blockPath); | ||
} else { | ||
containerPath = path.join(this.localStoragePath, request.containerName); | ||
} | ||
diskStorageUri(id) { | ||
return path.join(this.localStoragePath, id); | ||
} | ||
let blobPath; | ||
if (request.isSnapshot() && parent === true) { | ||
blobPath = path.join(containerPath, request.blobName); | ||
} else { | ||
blobPath = path.join(containerPath, request.publicName()); | ||
} | ||
return (request.isVirtualDirectory()) | ||
? blobPath | ||
: utils.escapeBlobDelimiter(blobPath); | ||
// We prepend a specific character to guarantee unique ids. | ||
// This is neccessary since otherwise snapshot IDs could overlap with blob IDs could overlap with page IDs, .... | ||
blobId(containerName, blobName) { | ||
return Buffer.from(`A${containerName}${blobName}`, 'utf8').toString('base64'); | ||
} | ||
blockId(containerName, blobName, blockId) { | ||
return Buffer.from(`B${containerName}${blobName}${blockId}`, 'utf8').toString('base64'); | ||
} | ||
snapshotId(containerName, blobName, date) { | ||
return Buffer.from(`C${containerName}${blobName}${date}`, 'utf8').toString('base64'); | ||
} | ||
pageId(containerName, blobName) { | ||
return Buffer.from(`D${containerName}${blobName}`, 'utf8').toString('base64'); | ||
} | ||
} | ||
module.exports = new Environment(); |
@@ -49,3 +49,4 @@ 'use strict'; | ||
InvalidRange: new ErrorCode('InvalidRange', 416, 'The range specified is invalid for the current size of the resource.'), | ||
InternalError: new ErrorCode('InternalError', 500, 'The server encountered an internal error. Please retry the request.') | ||
InternalError: new ErrorCode('InternalError', 500, 'The server encountered an internal error. Please retry the request.'), | ||
PendingCopyOperation: new ErrorCode('PendingCopyOperation', 409, 'There is currently a pending copy operation.') | ||
} |
@@ -36,7 +36,8 @@ 'use strict'; | ||
ConflictingContainerVal = require('./../validation/ConflictingContainer'), | ||
BlobLeaseUsageValidation = require('./../validation/BlobLeaseUsage'); | ||
BlobLeaseUsageValidation = require('./../validation/BlobLeaseUsage'), | ||
CopyStatusValidation = require('./../validation/CopyStatus'); | ||
module.exports = (req, res, next) => { | ||
BbPromise.try(() => { | ||
if (req.azuriteOperation === undefined) { | ||
if (req.azuriteOperation === undefined || req.azuriteOperation === Operations.Blob.COPY_BLOB) { | ||
res.status(501).send('Not Implemented yet.'); | ||
@@ -49,3 +50,3 @@ return; | ||
const containerProxy = o.containerProxy; | ||
const { blobProxy } = sm._getCollectionAndBlob(request.containerName, request.isSnapshot() ? request.snapshotName() : request.blobName); | ||
const { blobProxy } = sm._getCollectionAndBlob(request.containerName, request.id); | ||
const validationContext = new ValidationContext({ | ||
@@ -142,2 +143,3 @@ request: request, | ||
const blockName = `${request.containerName}-${request.blobName}-${block.id}`; | ||
// TODO: FIXME: This should be replaced with a BlockExists Validation | ||
valContext.run(BlobExistsVal, { | ||
@@ -285,3 +287,3 @@ blobProxy: { publicName: () => { return blockName } } | ||
.run(ConditionalRequestHeadersVal, { usage: Usage.Write }) | ||
// TODO: Add check whether intended copy operation is pending | ||
.run(CopyStatusValidation); | ||
} |
@@ -32,9 +32,15 @@ 'use strict'; | ||
if (this.query.snapshot) { | ||
this.snapshotDate = this.query.snapshot | ||
this.snapshotDate = new Date(this.query.snapshot).toUTCString(); | ||
this.snapshot = true; | ||
this.id = env.snapshotId(this.containerName, this.blobName, this.snapshotDate); | ||
this.originId = env.blobId(this.containerName, this.blobName); | ||
this.originUri = env.diskStorageUri(this.originId); | ||
} else if (this.blockId) { | ||
this.id = env.blockId(this.containerName, this.blobName, this.blockId); | ||
this.parentId = env.blobId(this.containerName, this.blobName); | ||
this.parentUri = env.diskStorageUri(this.parentId); | ||
} else { | ||
this.id = env.blobId(this.containerName, this.blobName); | ||
} | ||
if (this.blockId) { | ||
this.parent = `${this.containerName}-${this.blobName}`; | ||
this.blockName = `${this.containerName}-${this.blobName}-${this.blockId}`; | ||
} | ||
this.uri = env.diskStorageUri(this.id); | ||
} | ||
@@ -57,18 +63,2 @@ | ||
enableSnapshot(snapshotDate) { | ||
this.snapshotDate = snapshotDate; | ||
this.snapshot = true; | ||
} | ||
/** | ||
* Checks whether blob name corresponds to a virtual directory. This is true if the name ends with at least one trailing slash. | ||
* | ||
* @returns true if name is followed by at least one '/' character, false otherwise. | ||
* | ||
* @memberof AzuriteBlobRequest | ||
*/ | ||
isVirtualDirectory() { | ||
return this.blobName.match('.*\/+$') !== null; | ||
} | ||
isSnapshot() { | ||
@@ -78,47 +68,18 @@ return this.snapshot; | ||
snapshotName() { | ||
return `${this.blobName}-${Date.parse(this.snapshotDate)}`; | ||
} | ||
/** | ||
* The name of the blob that is used in the web and disk interface. | ||
* Return the blob's URI of Azurite's internal file system location. | ||
* | ||
* @returns | ||
* | ||
* @memberof AzuriteBlobRequest | ||
*/ | ||
publicName() { | ||
if (this.isSnapshot()) { | ||
return this.snapshotName(); | ||
} | ||
if (this.isVirtualDirectory()) { | ||
return Buffer.from(this.blobName, 'utf8').toString('base64'); | ||
} | ||
return this.blobName; | ||
} | ||
/** | ||
* Return the blob's URI of its external location or Azurite's internal file system location. | ||
* | ||
* @memberof AzuriteBlobRequest | ||
*/ | ||
copySourceUrl() { | ||
// External storage account (supported since version 2015-02-21) | ||
if (this.httpProps[N.COPY_SOURCE === undefined]) { | ||
throw new InternalAzuriteError('Request: copySourceUrl was called without copy-source header set.') | ||
} | ||
const result = {}; | ||
let uri; | ||
const source = this.httpProps[N.COPY_SOURCE]; | ||
if (source.match('https?:\/\/')) { | ||
result.type = 'external'; | ||
result.uri = source; | ||
} else { | ||
// Same (emulator) storage account | ||
const regex = /\/(.*)\/(.*)\/(.*)/g | ||
const match = regex.exec(source); | ||
// Due to previous validation it is guaranteed that match !== null | ||
if (match !== null) { | ||
result.type = 'internal'; | ||
result.uri = env.diskStorageUri(request); | ||
} | ||
// Same Storage account | ||
if (source.includes('http://127.0.0.1/devstoreaccount1/')) { | ||
source = source.replace('http://127.0.0.1/devstoreaccount1/', ''); | ||
uri = env.diskStorageUri(this); | ||
} | ||
@@ -125,0 +86,0 @@ return result; |
@@ -37,2 +37,8 @@ 'use strict'; | ||
entity.name = request.blobName; | ||
entity.id = request.id; | ||
// Parent ID refers to the blob a block belongs to | ||
entity.parentId = request.parentId; entity.parentId === undefined ? delete entity.parentId : (() => {/*NOOP*/ }); | ||
// Origin ID refers to the blob a snapshot belongs to | ||
entity.originId = request.originId; entity.originId === undefined ? delete entity.originId : (() => {/*NOOP*/ }); | ||
entity.uri = request.uri; | ||
entity.snapshot = false; | ||
@@ -49,3 +55,3 @@ entity.committed = request.commit; // this is true per default | ||
entity.contentDisposition = request.httpProps[N.CONTENT_DISPOSITION]; entity.contentDisposition === undefined ? delete entity.contentDisposition : (() => {/*NOOP*/ }); | ||
entity.md5 = request.httpProps[N.CONTENT_MD5]; entity.md5 === undefined ? delete entity.md5 : (() => {/*NOOP*/ }); | ||
entity.md5 = request.httpProps[N.CONTENT_MD5]; entity.md5 === undefined ? delete entity.md5 : (() => {/*NOOP*/ }); | ||
} | ||
@@ -57,3 +63,3 @@ // Specific to Append Blobs | ||
// optionally set in Content-MD5 header is not stored with the blob, thus we delete it. | ||
delete entity.md5; | ||
delete entity.md5; | ||
} | ||
@@ -63,4 +69,4 @@ // Specific to Block Blobs that are potentially part of a commit | ||
entity.blockId = request.blockId; | ||
entity.parent = `${request.containerName}-${request.blobName}`; | ||
entity.name = `${entity.parent}-${entity.blockId}`; | ||
// entity.parent = `${request.containerName}-${request.blobName}`; | ||
// entity.name = `${entity.parent}-${entity.blockId}`; | ||
entity.committed = false; | ||
@@ -73,3 +79,3 @@ } | ||
// MD5 calculation of a page blob seems to be wrong, thus deleting it for now... | ||
delete entity.md5; | ||
delete entity.md5; | ||
} | ||
@@ -76,0 +82,0 @@ return entity; |
@@ -7,3 +7,4 @@ 'use strict'; | ||
* during unit-tests chances are high that subsequent snapshots of a blob collide time-wise since | ||
* they only differ at the milliseconds level. | ||
* they only differ at the milliseconds level which is unlikely in a prod setting when communicating with | ||
* Azure Blob Storage over network. | ||
* | ||
@@ -23,10 +24,9 @@ * SnapshotTimeManager provides means to avoid such conflicts by returning a timestamp that is at least | ||
* | ||
* @param {String} containerName | ||
* @param {String} blobName | ||
* @param {Date} date | ||
* @param {String} id of the blob | ||
* @param {Date} date | ||
* | ||
* @memberof SnapshotTimeManager | ||
*/ | ||
_update(containerName, blobName, date) { | ||
this.times[`${containerName}-${blobName}`] = date; | ||
_update(id, date) { | ||
this.times[id] = date; | ||
} | ||
@@ -38,12 +38,11 @@ | ||
* | ||
* @param {String} containerName | ||
* @param {String} blobName | ||
* @param {String} id of the blob | ||
* @param {Date} now reference time for the snapshot to be taken | ||
* | ||
* @memberof SnapshotTimeManager | ||
*/ | ||
getDate(containerName, blobName) { | ||
let date = this.times[`${containerName}-${blobName}`]; | ||
const now = new Date(); | ||
getDate(id, now) { | ||
const date = this.times[id]; | ||
if (date === undefined || (now.getTime() - date.getTime()) > 1000) { | ||
this._update(containerName, blobName, now); | ||
this._update(id, now); | ||
return now; | ||
@@ -53,3 +52,3 @@ } | ||
updatedDate.setSeconds(date.getSeconds() + 1); | ||
this._update(containerName, blobName, updatedDate); | ||
this._update(id, updatedDate); | ||
return updatedDate; | ||
@@ -56,0 +55,0 @@ } |
@@ -44,8 +44,2 @@ 'use strict'; | ||
} | ||
if (!this.db.getCollection(StorageTables.Commits)) { | ||
this.db.addCollection(StorageTables.Commits); | ||
} | ||
if (!this.db.getCollection(StorageTables.Pages)) { | ||
this.db.addCollection(StorageTables.Pages); | ||
} | ||
return this.db.saveDatabaseAsync(); | ||
@@ -57,4 +51,2 @@ }) | ||
this.db.addCollection(StorageTables.Containers); | ||
this.db.addCollection(StorageTables.Commits); | ||
this.db.addCollection(StorageTables.Pages); | ||
return this.db.saveDatabaseAsync(); | ||
@@ -77,15 +69,18 @@ } | ||
this.db.addCollection(entity.name); | ||
return fs.ensureDir(request.fullPath()) | ||
.then(() => { | ||
return new AzuriteResponse({ proxy: containerProxy }); | ||
}); | ||
return BbPromise.resolve({ proxy: containerProxy }); | ||
} | ||
deleteContainer(request) { | ||
const coll = this.db.getCollection(StorageTables.Containers); | ||
coll.chain().find({ 'name': { '$eq': request.containerName } }).remove(); | ||
this.db.removeCollection(request.containerName); | ||
// TODO: Remove Blocks in Committed Directory and Committed Blocks in DB | ||
return fs.remove(request.fullPath()) | ||
const conColl = this.db.getCollection(StorageTables.Containers); | ||
conColl.chain().find({ 'name': { '$eq': request.containerName } }).remove(); | ||
const entities = this.db.getCollection(request.containerName).chain() | ||
.find({ 'name': { '$contains': '' } }).data(); // get every entity in this collection | ||
const promises = []; | ||
for (const entity of entities) { | ||
promises.push(fs.remove(entity.uri)); | ||
} | ||
return BbPromise.all(promises) | ||
.then(() => { | ||
this.db.removeCollection(request.containerName); | ||
return new AzuriteResponse({}); | ||
@@ -108,4 +103,4 @@ }); | ||
const coll = this.db.getCollection(request.containerName), | ||
blobProxy = this._createOrUpdateBlob(coll, request, request.blobName); | ||
return fs.outputFile(env.diskStorageUri(request), request.body, { encoding: request.httpProps[N.CONTENT_ENCODING] }) | ||
blobProxy = this._createOrUpdateBlob(coll, request); | ||
return fs.outputFile(request.uri, request.body, { encoding: request.httpProps[N.CONTENT_ENCODING] }) | ||
.then(() => { | ||
@@ -117,7 +112,7 @@ return new AzuriteResponse({ proxy: blobProxy }); | ||
putAppendBlock(request) { | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.blobName); | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
blobProxy.original[N.BLOB_COMMITTED_BLOCK_COUNT] += 1; | ||
blobProxy.original.size += request.body.length; | ||
coll.update(blobProxy.release()); | ||
return fsn.appendFileAsync(env.diskStorageUri(request), request.body, { encoding: request.httpProps[N.CONTENT_ENCODING] }) | ||
return fsn.appendFileAsync(request.uri, request.body, { encoding: request.httpProps[N.CONTENT_ENCODING] }) | ||
.then(() => { | ||
@@ -131,17 +126,23 @@ return new AzuriteResponse({ proxy: blobProxy }); | ||
snapshoteDeleteQueryParam = request.httpProps[N.DELETE_SNAPSHOTS]; | ||
// Fixme: We are currently not deleting snapshot files from disk (need to refactor env.diskStorageUri to support wildcard expr). | ||
// Since source of truth is in-memory DB this does not matter from a client's perspective, though. | ||
if (snapshoteDeleteQueryParam === 'include') { | ||
coll.chain().find({ 'name': { '$eq': request.blobName } }).remove(); | ||
coll.chain().find({ 'origin': { '$eq': request.blobName } }).remove(); | ||
return fs.remove(env.diskStorageUri(request)) | ||
let promises = []; | ||
if (snapshoteDeleteQueryParam === 'include' || snapshoteDeleteQueryParam === 'only') { | ||
const result = coll.chain().find({ 'originId': { '$eq': request.id } }); | ||
for (const entity of result.data()) { | ||
promises.push(fs.remove(entity.uri)); | ||
} | ||
result.remove(); | ||
if (snapshoteDeleteQueryParam === 'include') { | ||
coll.chain().find({ 'id': { '$eq': request.id } }).remove(); | ||
promises.push(fs.remove(request.uri)); | ||
} | ||
return BbPromise.all(promises) | ||
.then(() => { | ||
return new AzuriteResponse({}); | ||
}); | ||
} else if (snapshoteDeleteQueryParam === 'only') { | ||
coll.chain().find({ 'origin': { '$eq': request.blobName } }).remove(); | ||
return BbPromise.resolve(new AzuriteResponse({})); | ||
} else { | ||
coll.chain().find({ 'name': { '$eq': request.blobName } }).remove(); | ||
return fs.remove(env.diskStorageUri(request)) | ||
coll.chain().find({ 'id': { '$eq': request.id } }).remove(); | ||
coll.chain().find({ 'parentId': { '$eq': request.id } }).remove(); // Removing (un-)committed blocks | ||
return fs.remove(request.uri) | ||
.then(() => { | ||
@@ -156,12 +157,3 @@ return new AzuriteResponse({}); | ||
const blob = coll.chain() | ||
.find({ | ||
'$and': [ | ||
{ | ||
'name': { '$eq': request.publicName() } | ||
}, | ||
{ | ||
'snapshot': { '$eq': request.isSnapshot() } | ||
} | ||
] | ||
}) | ||
.find({ 'id': { '$eq': request.id } }) | ||
.data()[0]; | ||
@@ -206,3 +198,3 @@ | ||
// We only create the parent blob in DB if it does not already exists. | ||
const { blobProxy } = this._getCollectionAndBlob(request.containerName, request.blobName); | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.parentId); | ||
if (blobProxy === undefined) { | ||
@@ -212,15 +204,14 @@ // If blockId is set we would generate a commit storage entity, thus we | ||
const parentBlobRequest = AzuriteBlobRequest.clone(request); | ||
parentBlobRequest.id = parentBlobRequest.parentId; | ||
delete parentBlobRequest.parentId; | ||
delete parentBlobRequest.blockId; | ||
parentBlobRequest.commit = false; | ||
const containerColl = this.db.getCollection(parentBlobRequest.containerName); | ||
this._createOrUpdateBlob(containerColl, parentBlobRequest, parentBlobRequest.blobName); | ||
this._createOrUpdateBlob(coll, parentBlobRequest); | ||
} | ||
// Storing block information in DB. | ||
const commitColl = this.db.getCollection(StorageTables.Commits); | ||
const blockProxy = this._createOrUpdateBlob(commitColl, request, request.blockName); | ||
const blockProxy = this._createOrUpdateBlob(coll, request); | ||
// Make sure that the parent blob exists on storage. | ||
return fs.ensureFile(env.diskStorageUri(request, true)) | ||
return fs.ensureFile(request.parentUri) | ||
.then(() => { | ||
// Writing block to disk. | ||
return fs.outputFile(env.diskStorageUri(request), request.body, { encoding: request.httpProps[N.CONTENT_ENCODING] }); | ||
return fs.outputFile(request.uri, request.body, { encoding: request.httpProps[N.CONTENT_ENCODING] }); | ||
}) | ||
@@ -235,16 +226,15 @@ .then(() => { | ||
for (const block of request.payload) { | ||
// FIXME: This should be refactored since it is defined 4 times (here, validation.js, StorageEntityGenerator, AzureBlobRequest) | ||
const blockName = `${request.containerName}-${request.blobName}-${block.id}`; | ||
blockPaths.push(path.join(env.commitsPath, blockName)); | ||
const blockId = env.blockId(request.containerName, request.blobName, block.id); | ||
blockPaths.push(env.diskStorageUri(blockId)); | ||
} | ||
// Updating properties of blob | ||
const coll = this.db.getCollection(request.containerName); | ||
const blobProxy = this._createOrUpdateBlob(coll, request, request.blobName); | ||
const blobProxy = this._createOrUpdateBlob(coll, request); | ||
// Writing multiple blocks to one blob | ||
const combinedStream = CombinedStream.create(); | ||
for (const blockName of blockPaths) { | ||
combinedStream.append(fs.createReadStream(blockName)); | ||
for (const path of blockPaths) { | ||
combinedStream.append(fs.createReadStream(path)); | ||
} | ||
return new BbPromise((resolve, reject) => { | ||
const destinationStream = fs.createWriteStream(env.diskStorageUri(request)); | ||
const destinationStream = fs.createWriteStream(request.uri); | ||
destinationStream | ||
@@ -258,5 +248,4 @@ .on('error', (e) => { | ||
const promises = []; | ||
const collCommits = this.db.getCollection(StorageTables.Commits); | ||
const blocks = collCommits.chain() | ||
.find({ parent: `${request.containerName}-${request.blobName}` }) | ||
const blocks = coll.chain() | ||
.find({ parentId: request.id }) | ||
.data(); | ||
@@ -267,6 +256,6 @@ for (const block of blocks) { | ||
totalSize += block.size; | ||
collCommits.update(block); | ||
coll.update(block); | ||
} else { | ||
collCommits.remove(block); | ||
promises.push(fs.remove(path.join(env.commitsPath, block.name))); | ||
coll.remove(block); | ||
promises.push(fs.remove(block.uri)); | ||
} | ||
@@ -287,9 +276,8 @@ } | ||
const coll = this.db.getCollection(request.containerName) | ||
const query = this._buildBlockListQuery(request.containerName, request.blobName, request.blockListType); | ||
const blocks = this.db.getCollection(StorageTables.Commits) | ||
.chain() | ||
const query = this._buildBlockListQuery(request.id, request.blockListType); | ||
const blocks = coll.chain() | ||
.find(query) | ||
.data(); | ||
const { blobProxy } = this._getCollectionAndBlob(request.containerName, request.publicName()); | ||
const { blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
const response = new AzuriteResponse({ proxy: blobProxy, payload: blocks }); | ||
@@ -300,3 +288,3 @@ return BbPromise.resolve(response); | ||
setBlobMetadata(request) { | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.publicName()); | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
blobProxy.original.metaProps = request.metaProps; | ||
@@ -309,3 +297,3 @@ coll.update(blobProxy.release()); | ||
getBlobMetadata(request) { | ||
const { blobProxy } = this._getCollectionAndBlob(request.containerName, request.publicName()); | ||
const { blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
const response = new AzuriteResponse({ proxy: blobProxy }); | ||
@@ -316,3 +304,3 @@ return BbPromise.resolve(response); | ||
setBlobProperties(request) { | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.blobName); | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
request.httpProps[N.CACHE_CONTROL] ? blobProxy.original.cacheControl = request.httpProps[N.CACHE_CONTROL] : delete blobProxy.original.cacheControl; | ||
@@ -359,4 +347,4 @@ request.httpProps[N.CONTENT_TYPE] ? blobProxy.original.contentType = request.httpProps[N.CONTENT_TYPE] : delete blobProxy.original.contentType; | ||
// Getting overlapping pages (sorted by startByte in ascending order) | ||
const collPages = this.db.getCollection(StorageTables.Pages); | ||
const pageRanges = collPages.chain() | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
const pageRanges = coll.chain() | ||
.find({ | ||
@@ -366,3 +354,3 @@ '$and': [ | ||
{ 'start': { '$lte': (endByte + 1) / 512 } }, | ||
{ 'name': { '$eq': `${request.containerName}-${request.blobName}` } }] | ||
{ 'parentId': { '$eq': request.id } }] | ||
}) | ||
@@ -374,7 +362,6 @@ .sort((a, b) => { | ||
this._updatePageRanges(collPages, pageRanges, startByte, endByte, `${request.containerName}-${request.blobName}`); | ||
this._updatePageRanges(coll, pageRanges, startByte, endByte, request.id); | ||
const pageWriteMode = request.httpProps[N.PAGE_WRITE], | ||
blobPath = path.join(env.localStoragePath, request.containerName, request.blobName), | ||
writeStream = fs.createWriteStream(utils.escapeBlobDelimiter(blobPath), { | ||
writeStream = fs.createWriteStream(request.uri, { | ||
flags: 'r+', | ||
@@ -398,5 +385,4 @@ start: startByte, | ||
.on('finish', () => { | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.blobName); | ||
// Fixme: Use async / non-blocking method instead | ||
blobProxy.original.size = fsn.statSync(blobPath).size; | ||
blobProxy.original.size = fsn.statSync(request.uri).size; | ||
blobProxy.original.sequenceNumber++; | ||
@@ -414,3 +400,3 @@ coll.update(blobProxy.release()); | ||
let pageRanges; | ||
const collPages = this.db.getCollection(StorageTables.Pages); | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
if (request.httpProps[N.RANGE]) { | ||
@@ -421,3 +407,3 @@ // If range exists it is guaranteed to be well-formed due to PageAlignment validation | ||
endByte = parseInt(parts[1]); | ||
pageRanges = collPages.chain() | ||
pageRanges = coll.chain() | ||
.find({ | ||
@@ -427,3 +413,3 @@ '$and': [ | ||
{ 'start': { '$lte': endByte } }, | ||
{ 'name': { '$eq': `${request.containerName}-${request.blobName}` } }] | ||
{ 'id': { '$eq': request.id } }] | ||
}) | ||
@@ -435,4 +421,4 @@ .sort((a, b) => { | ||
} else { | ||
pageRanges = collPages.chain() | ||
.find({ 'name': { '$eq': `${request.containerName}-${request.blobName}` } }) | ||
pageRanges = coll.chain() | ||
.find({ 'id': { '$eq': request.id } }) | ||
.sort((a, b) => { | ||
@@ -444,3 +430,2 @@ return a.start - b.start; | ||
const { blobProxy } = this._getCollectionAndBlob(request.containerName, request.blobName); | ||
const response = new AzuriteResponse({ proxy: blobProxy, payload: pageRanges }); | ||
@@ -466,22 +451,22 @@ return BbPromise.resolve(response); | ||
snapshotBlob(request) { | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.blobName); | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
const snapshotEntity = StorageEntityGenerator.clone(blobProxy.original); | ||
const snapshotDate = SnapshotTimeManager.getDate(request.id, new Date(request.now)); | ||
snapshotEntity.snapshot = true; | ||
const snapshotDate = SnapshotTimeManager.getDate(request.containerName, request.blobName); | ||
request.enableSnapshot(snapshotDate.toUTCString()); | ||
snapshotEntity.snapshotDate = snapshotDate.toUTCString(); | ||
snapshotEntity.name = request.publicName(); | ||
snapshotEntity.origin = request.blobName; | ||
snapshotEntity.originId = request.id; | ||
snapshotEntity.originUri = request.uri; | ||
snapshotEntity.id = env.snapshotId(request.containerName, request.blobName, snapshotEntity.snapshotDate); // Updating ID due to possibly changed snapshot date | ||
snapshotEntity.uri = env.diskStorageUri(snapshotEntity.id); | ||
const snapshotProxy = new BlobProxy(coll.insert(snapshotEntity), request.containerName); | ||
if (Object.keys(request.metaProps).length > 0) { | ||
snapshotProxy.original.metaProps = request.metaProps; | ||
// The etag of the snapshot only changes from the original if metadata was added | ||
// The etag ans last-modified of the snapshot only changes from the original if metadata was added | ||
snapshotProxy.updateETag(); | ||
} else { | ||
snapshotProxy.original.meta.updated = blobProxy.original.meta.updated; | ||
snapshotProxy.original.meta.created = blobProxy.original.meta.created; | ||
} | ||
const destPath = path.join(env.snapshotPath, snapshotProxy.containerName, snapshotProxy.original.name); | ||
return fs.ensureDir(path.join(env.snapshotPath, snapshotProxy.containerName)) | ||
return fs.copy(request.uri, snapshotProxy.original.uri) | ||
.then(() => { | ||
return fs.copy(env.diskStorageUri(request, true), destPath); | ||
}) | ||
.then(() => { | ||
const response = new AzuriteResponse({ proxy: snapshotProxy }); | ||
@@ -545,3 +530,3 @@ return response; | ||
leaseDuration = (request.httpProps[N.LEASE_DURATION]) ? parseInt(request.httpProps[N.LEASE_DURATION]) : undefined; | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.blobName); | ||
const { coll, blobProxy } = this._getCollectionAndBlob(request.containerName, request.id); | ||
const now = request.now; | ||
@@ -592,2 +577,4 @@ | ||
to = null; | ||
// TODO: from local storage format is http://127.0.0.1:10000/devstoreaccount1/<container>/<blob> | ||
// which is identical to external format => fix | ||
if (source.type === 'external') { | ||
@@ -632,6 +619,6 @@ from = req({ url: source.uri }); | ||
_createOrUpdateBlob(coll, request, blobName) { | ||
const blob = coll.chain().find({ 'name': { '$eq': blobName } }).data(); | ||
_createOrUpdateBlob(coll, request) { | ||
const blob = coll.chain().find({ 'id': { '$eq': request.id } }).data(); | ||
if (blob.length > 0) { | ||
coll.chain().find({ 'name': { '$eq': blobName } }).remove(); | ||
coll.chain().find({ 'id': { '$eq': request.id } }).remove(); | ||
} | ||
@@ -648,3 +635,3 @@ const entity = StorageEntityGenerator.generateStorageEntity(request); | ||
* @param {String} containerName | ||
* @param {String} blobName | ||
* @param {String} id | ||
* @returns | ||
@@ -654,3 +641,3 @@ * | ||
*/ | ||
_getCollectionAndBlob(containerName, blobName) { | ||
_getCollectionAndBlob(containerName, id) { | ||
const coll = this.db.getCollection(containerName); | ||
@@ -664,3 +651,3 @@ if (!coll) { | ||
const result = coll.chain() | ||
.find({ name: blobName }) | ||
.find({ id: id }) | ||
.data(); | ||
@@ -692,8 +679,8 @@ return { | ||
_updatePageRanges(collPages, pageRanges, startByte, endByte, name) { | ||
_updatePageRanges(coll, pageRanges, startByte, endByte, id) { | ||
const startAlignment = startByte / 512, | ||
endAlignment = (endByte + 1) / 512; | ||
collPages.remove(pageRanges); | ||
collPages.insert({ | ||
name: name, | ||
coll.remove(pageRanges); | ||
coll.insert({ | ||
parentId: id, | ||
start: startAlignment, | ||
@@ -705,4 +692,4 @@ end: endAlignment | ||
if (firstPage && startAlignment > firstPage.start) { | ||
collPages.insert({ | ||
name: name, | ||
coll.insert({ | ||
parentId: id, | ||
start: firstPage.start, | ||
@@ -713,4 +700,4 @@ end: endAlignment - 1 | ||
if (lastPage && endAlignment < lastPage.end) { | ||
collPages.insert({ | ||
name: name, | ||
coll.insert({ | ||
parentId: id, | ||
start: endAlignment + 1, | ||
@@ -722,6 +709,6 @@ end: lastPage.end | ||
_buildBlockListQuery(containerName, blobName, blockListType) { | ||
_buildBlockListQuery(id, blockListType) { | ||
let query = { | ||
'$and': [{ | ||
parent: `${containerName}-${blobName}` | ||
parentId: id | ||
}, | ||
@@ -728,0 +715,0 @@ { |
@@ -5,20 +5,2 @@ 'use strict'; | ||
/** | ||
* Not all allowed delimiters for blob names are valid file names. We thus replace those that are invalid with the valid | ||
* delimiter @ on disk. Note that in our in-memory database and thus for the external interface we still | ||
* use the originally chosen delimiter. | ||
*/ | ||
exports.escapeBlobDelimiter = (blobPath) => { | ||
if (process.platform === 'win32') { | ||
const pathWithoutLetter = blobPath.substr(2); | ||
if (pathWithoutLetter === '') { | ||
return blobPath; | ||
} | ||
return (blobPath.substr(0, 2) + pathWithoutLetter.replace(/(::|:|\/|\||\/\/)/g, '@')); | ||
// LINUX / OS X | ||
} else { | ||
return blobPath.replace(/(::|:|\||\$)/g, '@'); | ||
} | ||
} | ||
exports.computeEtag = (templateString) => { | ||
@@ -25,0 +7,0 @@ return crypto |
@@ -30,3 +30,3 @@ 'use strict'; | ||
// If this header (x-ms-delete-snapshots) is not specified on the request and the blob has associated snapshots, the Blob service returns status code 409 (Conflict). | ||
const snapshots = collection.chain().find({ 'origin': { '$eq': request.blobName } }).data(); | ||
const snapshots = collection.chain().find({ 'originId': { '$eq': request.id } }).data(); | ||
// If the blob has associated snapshots... | ||
@@ -33,0 +33,0 @@ if (snapshots.length > 0) { |
{ | ||
"name": "azurite", | ||
"version": "0.9.9", | ||
"version": "0.9.10", | ||
"description": "A lightweight server clone of Azure Blob Storage that simulates most of the commands supported by it with minimal dependencies.", | ||
@@ -11,3 +11,3 @@ "scripts": { | ||
"nuget": "cross-var \"npm run clean && pkg -t node6-win --output azurite ./package.json && nuget pack -Version $npm_package_version && nuget push *.nupkg -Source https://www.nuget.org/api/v2/package\"", | ||
"docker": "cross-var \"docker build -t arafato/azurite:$npm_package_version . && docker build -t arafato/azurite:latest . && docker push arafato/azurite$npm_package_version && docker push arafato/azurite_latest\"" | ||
"docker": "cross-var \"docker build -t arafato/azurite:$npm_package_version . && docker build -t arafato/azurite:latest . && docker push arafato/azurite:$npm_package_version && docker push arafato/azurite:latest\"" | ||
}, | ||
@@ -14,0 +14,0 @@ "engines": { |
@@ -186,3 +186,3 @@ # Azurite | ||
- Copy Blob [TODO] | ||
- Copy Blob [IN-PROGRESS] | ||
Copies a source blob to a destination blob in this storage account or in another storage account. | ||
@@ -189,0 +189,0 @@ |
99
239375
4308