Comparing version 0.5.4 to 0.6.0
@@ -159,2 +159,3 @@ /** | ||
d.update({ docNumber: order[i] }, { docNumber: order[i] }, options, function (err, nr) { | ||
if (err) { return cb(err); } | ||
if (nr !== 1) { return cb('One update didnt work'); } | ||
@@ -189,5 +190,6 @@ executeAsap(function () { | ||
d.remove({ docNumber: order[i] }, options, function (err, nr) { | ||
if (err) { return cb(err); } | ||
if (nr !== 1) { return cb('One remove didnt work'); } | ||
d.insert({ docNumber: order[i] }, function (err) { // Reinserting just removed document so that the collection size doesn't change | ||
// Time is about 70x smaller for an insert so the impact on the results is minimal | ||
d.insert({ docNumber: order[i] }, function (err) { // We need to reinsert the doc so that we keep the collection's size at n | ||
// So actually we're calculating the average time taken by one insert + one remove | ||
executeAsap(function () { | ||
@@ -194,0 +196,0 @@ runFrom(i + 1); |
@@ -10,7 +10,18 @@ var Datastore = require('../lib/datastore') | ||
, d = new Datastore(benchDb) | ||
, n = 10000 | ||
, program = require('commander') | ||
, n | ||
; | ||
if (process.argv[2]) { n = parseInt(process.argv[2], 10); } | ||
program | ||
.option('-n --number [number]', 'Size of the collection to test on', parseInt) | ||
.option('-i --with-index', 'Test with an index') | ||
.parse(process.argv); | ||
n = program.number || 10000; | ||
console.log("----------------------------"); | ||
console.log("Test with " + n + " documents"); | ||
console.log(program.withIndex ? "Use an index" : "Don't use an index"); | ||
console.log("----------------------------"); | ||
async.waterfall([ | ||
@@ -17,0 +28,0 @@ async.apply(commonUtilities.prepareDb, benchDb) |
@@ -45,7 +45,7 @@ var Datastore = require('../lib/datastore') | ||
// Test with multiple documents | ||
//, async.apply(commonUtilities.prepareDb, benchDb) | ||
//, function (cb) { d.loadDatabase(cb); } | ||
//, async.apply(commonUtilities.insertDocs, d, n, profiler) | ||
//, function (cb) { profiler.step('MULTI: TRUE'); return cb(); } | ||
//, async.apply(commonUtilities.updateDocs, { multi: true }, d, n, profiler) | ||
, async.apply(commonUtilities.prepareDb, benchDb) | ||
, function (cb) { d.loadDatabase(cb); } | ||
, async.apply(commonUtilities.insertDocs, d, n, profiler) | ||
, function (cb) { profiler.step('MULTI: TRUE'); return cb(); } | ||
, async.apply(commonUtilities.removeDocs, { multi: true }, d, n, profiler) | ||
], function (err) { | ||
@@ -52,0 +52,0 @@ profiler.step("Benchmark finished"); |
@@ -40,2 +40,4 @@ var Datastore = require('../lib/datastore') | ||
// CHECK THAT MULTIPLE LOAD DATABASE DONT SCREW INDEXES | ||
// Test with update only one document | ||
@@ -42,0 +44,0 @@ , function (cb) { profiler.step('MULTI: FALSE'); return cb(); } |
@@ -15,6 +15,12 @@ var fs = require('fs') | ||
* Create a new collection | ||
* If a filename is provided, persist it to disk | ||
* Otherwise keep it in memory (data will be lost when the application stops) | ||
*/ | ||
function Datastore (filename) { | ||
this.filename = filename; | ||
this.data = []; | ||
if (!filename || typeof filename !== 'string' || filename.length === 0) { | ||
this.filename = null; | ||
} else { | ||
this.filename = filename; | ||
} | ||
this.executor = new Executor(); | ||
@@ -27,3 +33,6 @@ | ||
// Indexed by field name, dot notation can be used | ||
// _id is always indexed and since _ids are generated randomly the underlying | ||
// binary is always well-balanced | ||
this.indexes = {}; | ||
this.indexes._id = new Index({ fieldName: '_id', unique: true }); | ||
} | ||
@@ -33,2 +42,10 @@ | ||
/** | ||
* Get an array of all the data in the database | ||
*/ | ||
Datastore.prototype.getAllData = function () { | ||
return this.indexes._id.getAll(); | ||
}; | ||
/** | ||
* Reset all currently defined indexes | ||
@@ -48,18 +65,27 @@ */ | ||
* For now this function is synchronous, we need to test how much time it takes | ||
* We use an async API for consistency with the rest of the code | ||
* @param {String} options.fieldName | ||
* @param {Boolean} options.unique | ||
* @param {Boolean} options.sparse | ||
* @return {Boolean} true if index was created or already exists, false otherwise | ||
* @param {Function} cb Optional callback, signature: err | ||
*/ | ||
Datastore.prototype.ensureIndex = function (options) { | ||
Datastore.prototype.ensureIndex = function (options, cb) { | ||
var callback = cb || function () {}; | ||
options = options || {}; | ||
if (!options.fieldName) { return false; } | ||
if (this.indexes[options.fieldName]) { return true; } | ||
if (!options.fieldName) { return callback({ missingFieldName: true }); } | ||
if (this.indexes[options.fieldName]) { return callback(); } | ||
options.datastore = this; | ||
this.indexes[options.fieldName] = new Index(options); | ||
this.indexes[options.fieldName].insert(this.data); | ||
return true; | ||
try { | ||
this.indexes[options.fieldName].insert(this.getAllData()); | ||
} catch (e) { | ||
delete this.indexes[options.fieldName]; | ||
return callback(e); | ||
} | ||
return callback(); | ||
}; | ||
@@ -72,7 +98,24 @@ | ||
Datastore.prototype.addToIndexes = function (doc) { | ||
var self = this; | ||
var i, failingIndex, error | ||
, keys = Object.keys(this.indexes) | ||
; | ||
Object.keys(this.indexes).forEach(function (i) { | ||
self.indexes[i].insert(doc); | ||
}); | ||
for (i = 0; i < keys.length; i += 1) { | ||
try { | ||
this.indexes[keys[i]].insert(doc); | ||
} catch (e) { | ||
failingIndex = i; | ||
error = e; | ||
break; | ||
} | ||
} | ||
// If an error happened, we need to rollback the insert on all other indexes | ||
if (error) { | ||
for (i = 0; i < failingIndex; i += 1) { | ||
this.indexes[keys[i]].remove(doc); | ||
} | ||
throw error; | ||
} | ||
}; | ||
@@ -94,10 +137,28 @@ | ||
/** | ||
* Update a document in all indexes | ||
* Update one or several documents in all indexes | ||
* If one update violates a constraint, all changes are rolled back | ||
*/ | ||
Datastore.prototype.removeFromIndexes = function (doc, newDoc) { | ||
var self = this; | ||
Datastore.prototype.updateIndexes = function (oldDoc, newDoc) { | ||
var i, failingIndex, error | ||
, keys = Object.keys(this.indexes) | ||
; | ||
Object.keys(this.indexes).forEach(function (i) { | ||
self.indexes[i].update(doc, newDoc); | ||
}); | ||
for (i = 0; i < keys.length; i += 1) { | ||
try { | ||
this.indexes[keys[i]].update(oldDoc, newDoc); | ||
} catch (e) { | ||
failingIndex = i; | ||
error = e; | ||
break; | ||
} | ||
} | ||
// If an error happened, we need to rollback the insert on all other indexes | ||
if (error) { | ||
for (i = 0; i < failingIndex; i += 1) { | ||
this.indexes[keys[i]].revertUpdate(oldDoc, newDoc); | ||
} | ||
throw error; | ||
} | ||
}; | ||
@@ -116,3 +177,3 @@ | ||
if (indexNames.length === 0) { return this.data; } // No index defined, no specific candidate | ||
if (indexNames.length <= 1) { return this.getAllData(); } // No index defined (except _id), no specific candidate | ||
@@ -132,3 +193,3 @@ // Usable query keys are the ones corresponding to a basic query (no use of $operators or arrays) | ||
} else { | ||
return this.data; | ||
return this.getAllData(); | ||
} | ||
@@ -155,14 +216,23 @@ }; | ||
if (!exists) { | ||
self.data = []; | ||
self.resetIndexes(); | ||
self.datafileSize = 0; | ||
fs.writeFile(self.filename, '', 'utf8', function (err) { return callback(err); }); | ||
} else { | ||
fs.readFile(self.filename, 'utf8', function (err, rawData) { | ||
if (err) { return callback(err); } | ||
self.data = Datastore.treatRawData(rawData); | ||
self.datafileSize = self.data.length; | ||
self.resetIndexes(self.data); | ||
self.persistCachedDatabase(callback); | ||
}); | ||
return; | ||
} | ||
fs.readFile(self.filename, 'utf8', function (err, rawData) { | ||
if (err) { return callback(err); } | ||
var treatedData = Datastore.treatRawData(rawData); | ||
try { | ||
self.resetIndexes(treatedData); | ||
} catch (e) { | ||
self.resetIndexes(); // Rollback any index which didn't fail | ||
self.datafileSize = 0; | ||
return callback(e); | ||
} | ||
self.datafileSize = treatedData.length; | ||
self.persistCachedDatabase(callback); | ||
}); | ||
}); | ||
@@ -222,3 +292,3 @@ }); | ||
this.data.forEach(function (doc) { | ||
this.getAllData().forEach(function (doc) { | ||
toPersist += model.serialize(doc) + '\n'; | ||
@@ -230,3 +300,28 @@ }); | ||
fs.writeFile(this.filename, toPersist, function (err) { return callback(err); }); | ||
}; | ||
/** | ||
* Persist new state for the given newDocs (can be insertion, update or removal) | ||
* Use an append-only format | ||
* @param {Array} newDocs Can be empty if no doc was updated/removed | ||
* @param {Function} cb Optional, signature: err | ||
*/ | ||
Datastore.prototype.persistNewState = function (newDocs, cb) { | ||
var self = this | ||
, toPersist = '' | ||
, callback = cb || function () {} | ||
; | ||
self.datafileSize += newDocs.length; | ||
newDocs.forEach(function (doc) { | ||
toPersist += model.serialize(doc) + '\n'; | ||
}); | ||
if (toPersist.length === 0) { return callback(); } | ||
fs.appendFile(self.filename, toPersist, 'utf8', function (err) { | ||
return callback(err); | ||
}); | ||
}; | ||
@@ -244,3 +339,2 @@ | ||
, self = this | ||
, persistableNewDoc | ||
, insertedDoc | ||
@@ -252,3 +346,4 @@ ; | ||
newDoc._id = customUtils.uid(16); | ||
persistableNewDoc = model.serialize(newDoc); | ||
model.checkObject(newDoc); | ||
insertedDoc = model.deepCopy(newDoc); | ||
} catch (e) { | ||
@@ -258,11 +353,8 @@ return callback(e); | ||
insertedDoc = model.deserialize(persistableNewDoc); | ||
// Insert in all indexes (also serves to ensure uniqueness) | ||
try { self.addToIndexes(insertedDoc); } catch (e) { return callback(e); } | ||
fs.appendFile(self.filename, persistableNewDoc + '\n', 'utf8', function (err) { | ||
this.persistNewState([newDoc], function (err) { | ||
if (err) { return callback(err); } | ||
self.data.push(insertedDoc); | ||
self.addToIndexes(insertedDoc); | ||
self.datafileSize += 1; | ||
return callback(null, model.deepCopy(insertedDoc)); | ||
return callback(null, newDoc); | ||
}); | ||
@@ -326,28 +418,2 @@ }; | ||
/** | ||
* Persist new state for the given newDocs (can be update or removal) | ||
* Use an append-only format | ||
* @param {Array} newDocs Can be empty if no doc was updated/removed | ||
* @param {Function} cb Optional, signature: err | ||
*/ | ||
Datastore.prototype.persistNewState = function (newDocs, cb) { | ||
var self = this | ||
, toPersist = '' | ||
, callback = cb || function () {} | ||
; | ||
self.datafileSize += newDocs.length; | ||
newDocs.forEach(function (doc) { | ||
toPersist += model.serialize(doc) + '\n'; | ||
}); | ||
if (toPersist.length === 0) { return callback(); } | ||
fs.appendFile(self.filename, toPersist, 'utf8', function (err) { | ||
return callback(err); | ||
}); | ||
}; | ||
/** | ||
* Update all docs matching query | ||
@@ -398,4 +464,6 @@ * For now, very naive implementation (recalculating the whole database) | ||
, function () { // Perform the update | ||
candidates = self.getCandidates(query) | ||
var modifiedDoc; | ||
candidates = self.getCandidates(query); | ||
try { | ||
@@ -405,4 +473,5 @@ for (i = 0; i < candidates.length; i += 1) { | ||
numReplaced += 1; | ||
candidates[i] = model.modify(candidates[i], updateQuery); | ||
updatedDocs.push(candidates[i]); | ||
modifiedDoc = model.modify(candidates[i], updateQuery); | ||
self.updateIndexes(candidates[i], modifiedDoc); | ||
updatedDocs.push(modifiedDoc); | ||
} | ||
@@ -439,7 +508,6 @@ } | ||
, self = this | ||
//, candidates = this.getCandidates(query) | ||
, numRemoved = 0 | ||
, multi | ||
, newData = [] | ||
, removedDocs = [] | ||
, candidates = this.getCandidates(query) | ||
; | ||
@@ -452,3 +520,3 @@ | ||
try { | ||
self.data.forEach(function (d) { | ||
candidates.forEach(function (d) { | ||
if (model.match(d, query) && (multi || numRemoved === 0)) { | ||
@@ -458,13 +526,8 @@ numRemoved += 1; | ||
self.removeFromIndexes(d); | ||
} else { | ||
newData.push(d); | ||
} | ||
}); | ||
} catch (err) { | ||
return callback(err); | ||
} | ||
} catch (err) { return callback(err); } | ||
self.persistNewState(removedDocs, function (err) { | ||
if (err) { return callback(err); } | ||
self.data = newData; | ||
return callback(null, numRemoved); | ||
@@ -471,0 +534,0 @@ }); |
@@ -8,7 +8,6 @@ var BinarySearchTree = require('binary-search-tree').BinarySearchTree | ||
/** | ||
* We can't use the one in model here since it doesn't work for arrays | ||
* Two indexed pointers are equal iif they point to the same place | ||
*/ | ||
function checkValueEquality (a, b) { | ||
return a === b; | ||
return model.compareThings(a, b) === 0; | ||
} | ||
@@ -43,2 +42,3 @@ | ||
* @param {Document or Array of documents} newData Optional, data to initialize the index with | ||
* If an error is thrown during insertion, the index is not modified | ||
*/ | ||
@@ -55,3 +55,3 @@ Index.prototype.reset = function (newData) { | ||
* Insert a new document in the index | ||
* If an array is passed, we insert all its elements | ||
* If an array is passed, we insert all its elements (if one insertion fails the index is not modified) | ||
* O(log(n)) | ||
@@ -62,3 +62,3 @@ */ | ||
if (util.isArray(doc)) { doc.forEach(function (d) { self.insert(d); }); return; } | ||
if (util.isArray(doc)) { this.insertMultipleDocs(doc); return; } | ||
@@ -78,3 +78,32 @@ key = model.getDotValue(doc, this.fieldName); | ||
/** | ||
* Insert an array of documents in the index | ||
* If a constraint is violated, an error should be thrown and the changes rolled back | ||
*/ | ||
Index.prototype.insertMultipleDocs = function (docs) { | ||
var i, error, failingI; | ||
for (i = 0; i < docs.length; i += 1) { | ||
try { | ||
this.insert(docs[i]); | ||
} catch (e) { | ||
error = e; | ||
failingI = i; | ||
break; | ||
} | ||
} | ||
if (error) { | ||
for (i = 0; i < failingI; i += 1) { | ||
this.remove(docs[i]); | ||
} | ||
throw error; | ||
} | ||
}; | ||
/** | ||
* Remove a document from the index | ||
* If an array is passed, we remove all its elements | ||
* The remove operation is safe with regards to the 'unique' constraint | ||
* O(log(n)) | ||
@@ -100,7 +129,16 @@ */ | ||
* Update a document in the index | ||
* O(log(n)) | ||
* If a constraint is violated, changes are rolled back and an error thrown | ||
* Naive implementation, still in O(log(n)) | ||
*/ | ||
Index.prototype.update = function (oldDoc, newDoc) { | ||
if (util.isArray(oldDoc)) { this.updateMultipleDocs(oldDoc); } | ||
this.remove(oldDoc); | ||
this.insert(newDoc); | ||
try { | ||
this.insert(newDoc); | ||
} catch (e) { | ||
this.insert(oldDoc); | ||
throw e; | ||
} | ||
}; | ||
@@ -110,2 +148,57 @@ | ||
/** | ||
* Update multiple documents in the index | ||
* If a constraint is violated, the changes need to be rolled back | ||
* and an error thrown | ||
* @param {Array of oldDoc, newDoc pairs} pairs | ||
*/ | ||
Index.prototype.updateMultipleDocs = function (pairs) { | ||
var i, failingI, error; | ||
for (i = 0; i < pairs.length; i += 1) { | ||
this.remove(pairs[i].oldDoc); | ||
} | ||
for (i = 0; i < pairs.length; i += 1) { | ||
try { | ||
this.insert(pairs[i].newDoc); | ||
} catch (e) { | ||
error = e; | ||
failingI = i; | ||
break; | ||
} | ||
} | ||
// If an error was raised, roll back changes in the inverse order | ||
if (error) { | ||
for (i = 0; i < failingI; i += 1) { | ||
this.remove(pairs[i].newDoc); | ||
} | ||
for (i = 0; i < pairs.length; i += 1) { | ||
this.insert(pairs[i].oldDoc); | ||
} | ||
throw error; | ||
} | ||
}; | ||
/** | ||
* Revert an update | ||
*/ | ||
Index.prototype.revertUpdate = function (oldDoc, newDoc) { | ||
var revert = []; | ||
if (!util.isArray(oldDoc)) { | ||
this.update(newDoc, oldDoc); | ||
} else { | ||
oldDoc.forEach(function (pair) { | ||
revert.push({ oldDoc: pair.newDoc, newDoc: pair.oldDoc }); | ||
}); | ||
this.update(revert); | ||
} | ||
}; | ||
/** | ||
* Get all documents in index that match the query on fieldName | ||
@@ -115,3 +208,3 @@ * For now only works with field equality (i.e. can't use the index for $lt query for example) | ||
* @param {Thing} value Value to match the key against | ||
* @return {Array od documents} | ||
* @return {Array of documents} | ||
*/ | ||
@@ -123,3 +216,24 @@ Index.prototype.getMatching = function (value) { | ||
/** | ||
* Get all elements in the index | ||
* @return {Array of documents} | ||
*/ | ||
Index.prototype.getAll = function () { | ||
var res = []; | ||
this.tree.executeOnEveryNode(function (node) { | ||
var i; | ||
for (i = 0; i < node.data.length; i += 1) { | ||
res.push(node.data[i]); | ||
} | ||
}); | ||
return res; | ||
}; | ||
// Interface | ||
module.exports = Index; |
{ | ||
"name": "nedb", | ||
"version": "0.5.4", | ||
"version": "0.6.0", | ||
"author": { | ||
@@ -25,3 +25,3 @@ "name": "tldr.io", | ||
"underscore": "~1.4.4", | ||
"binary-search-tree": "0.1.2" | ||
"binary-search-tree": "0.1.4" | ||
}, | ||
@@ -28,0 +28,0 @@ "devDependencies": { |
@@ -19,4 +19,11 @@ # NeDB (Node embedded database) | ||
## API | ||
It's a subset of MongoDB's API (the most used operations). The current API will not change, but I will add operations as they are needed. | ||
It's a subset of MongoDB's API (the most used operations). The current API will not change, but I will add operations as they are needed. Summary of the API: | ||
* <a href="#creatingloading-a-database">Creating/loading a database</a> | ||
* <a href="#inserting-documents">Inserting documents</a> | ||
* <a href="#finding-documents">Finding documents</a> | ||
* <a href="#updating-documents">Updating documents</a> | ||
* <a href="#removing-documents">Removing documents</a> | ||
* <a href="#indexing">Indexing</a> | ||
### Creating/loading a database | ||
@@ -269,16 +276,52 @@ ```javascript | ||
### Indexing | ||
NeDB supports indexing. It gives a very nice speed boost and can be used to enforce a unique constraint on a field. You can index any field, including fields in nested documents using the dot notation. For now, indexes are only used for value equality, but I am planning on adding value comparison soon. | ||
Also note that if you use a unique constraint on a field, you will only be able to save one document in which is `undefined`. The second time you do that, the index will reject the document since there is already one with the `undefined` value. I am working on a "sparse" option just like the MongoDB one, enabling indexes to check uniqueness only when the field is defined. | ||
Finally, the `_id` is always indexed with a unique constraint, so queries specifying a value for it are very fast. | ||
```javascript | ||
// The syntax is close, but not identical to MongoDB's | ||
// fieldName is of course required | ||
d.ensureIndex({ fieldName: 'somefield' }, function (err) { | ||
// If there was an error, err is not null | ||
}); | ||
// Using a unique constraint with the index | ||
d.ensureIndex({ fieldName: 'somefield', unique: true }, function (err) { | ||
}); | ||
// The ensureIndex method can be called whenever you want: before or after a loadDatabase(), | ||
// after some data was inserted/modified/removed. It will fail to create the index if the | ||
// unique constraint is not satisfied | ||
// Format of the error message when the unique constraint is not met | ||
d.insert({ name: 'nedb' }, function (err) { | ||
// err is null | ||
d.insert({ name: 'nedb' }, function (err) { | ||
// err is { errorType: 'uniqueViolated' | ||
// , key: 'name' | ||
// , message: 'Unique constraint violated for key name' } | ||
}); | ||
}); | ||
``` | ||
**Note:** the `ensureIndex` function creates the index synchronously, so it's best to use it at application startup. It's quite fast so it doesn't increase startup time much (35 ms for a collection containing 10,000 documents). | ||
## Performance | ||
### Speed | ||
**NeDB is not intended to be a replacement of large-scale databases such as MongoDB!** Its goal is to provide you with a clean and easy way to query data and persist it to disk, for applications that do not need lots of concurrent connections, for example a <a href="https://github.com/louischatriot/braindead-ci" target="_blank">continuous integration and deployment server</a>. | ||
**NeDB is not intended to be a replacement of large-scale databases such as MongoDB!** Its goal is to provide you with a clean and easy way to query data and persist it to disk, for web applications that do not need lots of concurrent connections, for example a <a href="https://github.com/louischatriot/braindead-ci" target="_blank">continuous integration and deployment server</a> and desktop applications built with <a href="https://github.com/rogerwang/node-webkit" target="_blank">Node Webkit</a>. | ||
As such, it was not designed for speed. That said, it is still pretty fast on the expected datasets (10,000 | ||
documents). On my machine (3 years old, no SSD), with a collection | ||
As such, it was not designed for speed. That said, it is still pretty fast on the expected datasets, especially if you use indexing. On my machine (3 years old, no SSD), with a collection | ||
containing 10,000 documents: | ||
* An insert takes **0.14 ms** (or **0.16 ms** with indexing) | ||
* A read takes **6.4 ms** (or **0.02 ms** with indexing) | ||
* An update takes **9.2 ms** (or **0.2 ms** with indexing) | ||
* A deletion takes 8.1 ms (no speed boost with indexes currently due to the underlying data structure which I will change) | ||
* An insert takes **0.14 ms** without indexing, **0.16 ms** with indexing | ||
* A read takes **6.4 ms** without indexing, **0.02 ms** with indexing | ||
* An update takes **11 ms** without indexing, **0.22 ms** with indexing | ||
* A deletion takes **10 ms** without indexing, **0.14ms** with indexing | ||
You can run the simple benchmarks I use by executing the scripts in the `benchmarks` folder. They all take an optional parameter which is the size of the dataset to use (default is 10,000). | ||
You can run the simple benchmarks I use by executing the scripts in the `benchmarks` folder. Run them with the `--help` flag to see how they work. | ||
@@ -285,0 +328,0 @@ ### Memory footprint |
@@ -108,2 +108,21 @@ var Index = require('../lib/indexes') | ||
it('When inserting an array of elements, if an error is thrown all inserts need to be rolled back', function () { | ||
var idx = new Index({ fieldName: 'tf', unique: true }) | ||
, doc1 = { a: 5, tf: 'hello' } | ||
, doc2 = { a: 8, tf: 'world' } | ||
, doc2b = { a: 84, tf: 'world' } | ||
, doc3 = { a: 2, tf: 'bloup' } | ||
; | ||
try { | ||
idx.insert([doc1, doc2, doc2b, doc3]); | ||
} catch (e) { | ||
e.errorType.should.equal('uniqueViolated'); | ||
} | ||
idx.tree.getNumberOfKeys().should.equal(0); | ||
assert.deepEqual(idx.tree.search('hello'), []); | ||
assert.deepEqual(idx.tree.search('world'), []); | ||
assert.deepEqual(idx.tree.search('bloup'), []); | ||
}); | ||
}); // ==== End of 'Insertion' ==== // | ||
@@ -224,2 +243,181 @@ | ||
it('If a simple update violates a unique constraint, changes are rolled back and an error thrown', function () { | ||
var idx = new Index({ fieldName: 'tf', unique: true }) | ||
, doc1 = { a: 5, tf: 'hello' } | ||
, doc2 = { a: 8, tf: 'world' } | ||
, doc3 = { a: 2, tf: 'bloup' } | ||
, bad = { a: 23, tf: 'world' } | ||
; | ||
idx.insert(doc1); | ||
idx.insert(doc2); | ||
idx.insert(doc3); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
assert.deepEqual(idx.tree.search('hello'), [doc1]); | ||
assert.deepEqual(idx.tree.search('world'), [doc2]); | ||
assert.deepEqual(idx.tree.search('bloup'), [doc3]); | ||
try { | ||
idx.update(doc3, bad); | ||
} catch (e) { | ||
e.errorType.should.equal('uniqueViolated'); | ||
} | ||
// No change | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
assert.deepEqual(idx.tree.search('hello'), [doc1]); | ||
assert.deepEqual(idx.tree.search('world'), [doc2]); | ||
assert.deepEqual(idx.tree.search('bloup'), [doc3]); | ||
}); | ||
it('Can update an array of documents', function () { | ||
var idx = new Index({ fieldName: 'tf' }) | ||
, doc1 = { a: 5, tf: 'hello' } | ||
, doc2 = { a: 8, tf: 'world' } | ||
, doc3 = { a: 2, tf: 'bloup' } | ||
, doc1b = { a: 23, tf: 'world' } | ||
, doc2b = { a: 1, tf: 'changed' } | ||
, doc3b = { a: 44, tf: 'bloup' } | ||
; | ||
idx.insert(doc1); | ||
idx.insert(doc2); | ||
idx.insert(doc3); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.update([{ oldDoc: doc1, newDoc: doc1b }, { oldDoc: doc2, newDoc: doc2b }, { oldDoc: doc3, newDoc: doc3b }]); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.getMatching('world').length.should.equal(1); | ||
idx.getMatching('world')[0].should.equal(doc1b); | ||
idx.getMatching('changed').length.should.equal(1); | ||
idx.getMatching('changed')[0].should.equal(doc2b); | ||
idx.getMatching('bloup').length.should.equal(1); | ||
idx.getMatching('bloup')[0].should.equal(doc3b); | ||
}); | ||
it('If a unique constraint is violated during an array-update, all changes are rolled back and an error thrown', function () { | ||
var idx = new Index({ fieldName: 'tf', unique: true }) | ||
, doc0 = { a: 432, tf: 'notthistoo' } | ||
, doc1 = { a: 5, tf: 'hello' } | ||
, doc2 = { a: 8, tf: 'world' } | ||
, doc3 = { a: 2, tf: 'bloup' } | ||
, doc1b = { a: 23, tf: 'changed' } | ||
, doc2b = { a: 1, tf: 'changed' } // Will violate the constraint (first try) | ||
, doc2c = { a: 1, tf: 'notthistoo' } // Will violate the constraint (second try) | ||
, doc3b = { a: 44, tf: 'alsochanged' } | ||
; | ||
idx.insert(doc1); | ||
idx.insert(doc2); | ||
idx.insert(doc3); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
try { | ||
idx.update([{ oldDoc: doc1, newDoc: doc1b }, { oldDoc: doc2, newDoc: doc2b }, { oldDoc: doc3, newDoc: doc3b }]); | ||
} catch (e) { | ||
e.errorType.should.equal('uniqueViolated'); | ||
} | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.getMatching('hello').length.should.equal(1); | ||
idx.getMatching('hello')[0].should.equal(doc1); | ||
idx.getMatching('world').length.should.equal(1); | ||
idx.getMatching('world')[0].should.equal(doc2); | ||
idx.getMatching('bloup').length.should.equal(1); | ||
idx.getMatching('bloup')[0].should.equal(doc3); | ||
try { | ||
idx.update([{ oldDoc: doc1, newDoc: doc1b }, { oldDoc: doc2, newDoc: doc2b }, { oldDoc: doc3, newDoc: doc3b }]); | ||
} catch (e) { | ||
e.errorType.should.equal('uniqueViolated'); | ||
} | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.getMatching('hello').length.should.equal(1); | ||
idx.getMatching('hello')[0].should.equal(doc1); | ||
idx.getMatching('world').length.should.equal(1); | ||
idx.getMatching('world')[0].should.equal(doc2); | ||
idx.getMatching('bloup').length.should.equal(1); | ||
idx.getMatching('bloup')[0].should.equal(doc3); | ||
}); | ||
it('If an update doesnt change a document, the unique constraint is not violated', function () { | ||
var idx = new Index({ fieldName: 'tf', unique: true }) | ||
, doc1 = { a: 5, tf: 'hello' } | ||
, doc2 = { a: 8, tf: 'world' } | ||
, doc3 = { a: 2, tf: 'bloup' } | ||
, noChange = { a: 8, tf: 'world' } | ||
; | ||
idx.insert(doc1); | ||
idx.insert(doc2); | ||
idx.insert(doc3); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
assert.deepEqual(idx.tree.search('world'), [doc2]); | ||
idx.update(doc2, noChange); // No error thrown | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
assert.deepEqual(idx.tree.search('world'), [noChange]); | ||
}); | ||
it('Can revert simple and batch updates', function () { | ||
var idx = new Index({ fieldName: 'tf' }) | ||
, doc1 = { a: 5, tf: 'hello' } | ||
, doc2 = { a: 8, tf: 'world' } | ||
, doc3 = { a: 2, tf: 'bloup' } | ||
, doc1b = { a: 23, tf: 'world' } | ||
, doc2b = { a: 1, tf: 'changed' } | ||
, doc3b = { a: 44, tf: 'bloup' } | ||
, batchUpdate = [{ oldDoc: doc1, newDoc: doc1b }, { oldDoc: doc2, newDoc: doc2b }, { oldDoc: doc3, newDoc: doc3b }] | ||
; | ||
idx.insert(doc1); | ||
idx.insert(doc2); | ||
idx.insert(doc3); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.update(batchUpdate); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.getMatching('world').length.should.equal(1); | ||
idx.getMatching('world')[0].should.equal(doc1b); | ||
idx.getMatching('changed').length.should.equal(1); | ||
idx.getMatching('changed')[0].should.equal(doc2b); | ||
idx.getMatching('bloup').length.should.equal(1); | ||
idx.getMatching('bloup')[0].should.equal(doc3b); | ||
idx.revertUpdate(batchUpdate); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.getMatching('hello').length.should.equal(1); | ||
idx.getMatching('hello')[0].should.equal(doc1); | ||
idx.getMatching('world').length.should.equal(1); | ||
idx.getMatching('world')[0].should.equal(doc2); | ||
idx.getMatching('bloup').length.should.equal(1); | ||
idx.getMatching('bloup')[0].should.equal(doc3); | ||
// Now a simple update | ||
idx.update(doc2, doc2b); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.getMatching('hello').length.should.equal(1); | ||
idx.getMatching('hello')[0].should.equal(doc1); | ||
idx.getMatching('changed').length.should.equal(1); | ||
idx.getMatching('changed')[0].should.equal(doc2b); | ||
idx.getMatching('bloup').length.should.equal(1); | ||
idx.getMatching('bloup')[0].should.equal(doc3); | ||
idx.revertUpdate(doc2, doc2b); | ||
idx.tree.getNumberOfKeys().should.equal(3); | ||
idx.getMatching('hello').length.should.equal(1); | ||
idx.getMatching('hello')[0].should.equal(doc1); | ||
idx.getMatching('world').length.should.equal(1); | ||
idx.getMatching('world')[0].should.equal(doc2); | ||
idx.getMatching('bloup').length.should.equal(1); | ||
idx.getMatching('bloup')[0].should.equal(doc3); | ||
}); | ||
}); // ==== End of 'Update' ==== // | ||
@@ -349,2 +547,17 @@ | ||
it('Get all elements in the index', function () { | ||
var idx = new Index({ fieldName: 'a' }) | ||
, doc1 = { a: 5, tf: 'hello' } | ||
, doc2 = { a: 8, tf: 'world' } | ||
, doc3 = { a: 2, tf: 'bloup' } | ||
; | ||
idx.insert(doc1); | ||
idx.insert(doc2); | ||
idx.insert(doc3); | ||
assert.deepEqual(idx.getAll(), [{ a: 2, tf: 'bloup' }, { a: 5, tf: 'hello' }, { a: 8, tf: 'world' }]); | ||
}); | ||
}); |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
194320
22
4165
364
+ Addedbinary-search-tree@0.1.4(transitive)
- Removedbinary-search-tree@0.1.2(transitive)
Updatedbinary-search-tree@0.1.4