Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

nedb

Package Overview
Dependencies
Maintainers
1
Versions
95
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

nedb - npm Package Compare versions

Comparing version 1.4.0 to 1.5.0

bower.json

2

lib/datastore.js

@@ -22,3 +22,3 @@ var customUtils = require('./customUtils')

* @param {Function} options.onload Optional, if autoload is used this will be called after the load database with the error object as parameter. If you don't pass it the error will be thrown
* @param {Function} options.afterSerialization and options.beforeDeserialization Optional, serialization hooks
* @param {Function} options.afterSerialization/options.beforeDeserialization Optional, serialization hooks
* @param {Number} options.corruptAlertThreshold Optional, threshold after which an alert is thrown if too much data is corrupt

@@ -25,0 +25,0 @@ */

@@ -238,3 +238,3 @@ /**

if (data.length > 0 && corruptItems / data.length > this.corruptAlertThreshold) {
throw "More than 10% of the data file is corrupt, the wrong beforeDeserialization hook may be used. Cautiously refusing to start NeDB to prevent dataloss"
throw "More than " + Math.floor(100 * this.corruptAlertThreshold) + "% of the data file is corrupt, the wrong beforeDeserialization hook may be used. Cautiously refusing to start NeDB to prevent dataloss"
}

@@ -241,0 +241,0 @@

@@ -13,2 +13,3 @@ /**

, async = require('async')
, path = require('path')
, storage = {}

@@ -39,2 +40,37 @@ ;

/**
* Flush data in OS buffer to storage if corresponding option is set
* @param {String} options.filename
* @param {Boolean} options.isDir Optional, defaults to false
* If options is a string, it is assumed that the flush of the file (not dir) called options was requested
*/
storage.flushToStorage = function (options, callback) {
var filename, flags;
if (typeof options === 'string') {
filename = options;
flags = 'r+';
} else {
filename = options.filename;
flags = options.isDir ? 'r' : 'r+';
}
// Windows can't fsync (FlushFileBuffers) directories. We can live with this as it cannot cause 100% dataloss
// except in the very rare event of the first time database is loaded and a crash happens
if (flags === 'r' && (process.platform === 'win32' || process.platform === 'win64')) { return callback(null); }
fs.open(filename, flags, function (err, fd) {
if (err) { return callback(err); }
fs.fsync(fd, function (errFS) {
fs.close(fd, function (errC) {
if (errFS || errC) {
return callback({ errorOnFsync: errFS, errorOnClose: errC });
} else {
return callback(null);
}
});
});
});
};
/**
* Fully write or rewrite the datafile, immune to crashes during the write operation (data will not be lost)

@@ -47,13 +83,10 @@ * @param {String} filename

var callback = cb || function () {}
, tempFilename = filename + '~'
, oldFilename = filename + '~~'
;
, tempFilename = filename + '~';
async.waterfall([
async.apply(storage.ensureFileDoesntExist, tempFilename)
, async.apply(storage.ensureFileDoesntExist, oldFilename)
async.apply(storage.flushToStorage, { filename: path.dirname(filename), isDir: true })
, function (cb) {
storage.exists(filename, function (exists) {
if (exists) {
storage.rename(filename, oldFilename, function (err) { return cb(err); });
storage.flushToStorage(filename, function (err) { return cb(err); });
} else {

@@ -63,11 +96,12 @@ return cb();

});
}
}
, function (cb) {
storage.writeFile(tempFilename, data, function (err) { return cb(err); });
}
, async.apply(storage.flushToStorage, tempFilename)
, function (cb) {
storage.rename(tempFilename, filename, function (err) { return cb(err); });
}
, async.apply(storage.ensureFileDoesntExist, oldFilename)
], function (err) { if (err) { return callback(err); } else { return callback(null); } })
, async.apply(storage.flushToStorage, { filename: path.dirname(filename), isDir: true })
], function (err) { return callback(err); })
};

@@ -82,5 +116,3 @@

storage.ensureDatafileIntegrity = function (filename, callback) {
var tempFilename = filename + '~'
, oldFilename = filename + '~~'
;
var tempFilename = filename + '~';

@@ -91,3 +123,3 @@ storage.exists(filename, function (filenameExists) {

storage.exists(oldFilename, function (oldFilenameExists) {
storage.exists(tempFilename, function (oldFilenameExists) {
// New database

@@ -99,3 +131,3 @@ if (!oldFilenameExists) {

// Write failed, use old version
storage.rename(oldFilename, filename, function (err) { return callback(err); });
storage.rename(tempFilename, filename, function (err) { return callback(err); });
});

@@ -102,0 +134,0 @@ });

{
"name": "nedb",
"version": "1.4.0",
"version": "1.5.0",
"author": {

@@ -5,0 +5,0 @@ "name": "Louis Chatriot",

@@ -18,13 +18,14 @@ <img src="http://i.imgur.com/9O1xHFb.png" style="width: 25%; height: 25%; float: left;">

## Installation, tests
Module name on npm is `nedb`.
Module name on npm and bower is `nedb`.
```javascript
npm install nedb --save // Put latest version in your package.json
npm test // You'll need the dev dependencies to launch tests
bower install nedb // For the browser versions, which will be in browser-version/out
```
## API
It's a subset of MongoDB's API (the most used operations).
It's a subset of MongoDB's API (the most used operations).
* <a href="#creatingloading-a-database">Creating/loading a database</a>
* <a href="#compacting-the-database">Compacting the database</a>
* <a href="#persistence">Persistence</a>
* <a href="#inserting-documents">Inserting documents</a>

@@ -45,3 +46,3 @@ * <a href="#finding-documents">Finding documents</a>

### Creating/loading a database
You can use NeDB as an in-memory only datastore or as a persistent datastore. One datastore is the equivalent of a MongoDB collection. The constructor is used as follows `new Datastore(options)` where `options` is an object with the following fields:
You can use NeDB as an in-memory only datastore or as a persistent datastore. One datastore is the equivalent of a MongoDB collection. The constructor is used as follows `new Datastore(options)` where `options` is an object with the following fields:

@@ -65,4 +66,3 @@ * `filename` (optional): path to the file where the data is persisted. If left blank, the datastore is automatically considered in-memory only. It cannot end with a `~` which is used in the temporary files NeDB uses to perform crash-safe writes

persistent datastore, no command (insert, find, update, remove) will be executed before `loadDatabase`
is called, so make sure to call it yourself or use the `autoload`
option.
is called, so make sure to call it yourself or use the `autoload` option.

@@ -107,4 +107,4 @@ ```javascript

### Compacting the database
Under the hood, NeDB's persistence uses an append-only format, meaning that all updates and deletes actually result in lines added at the end of the datafile. The reason for this is that disk space is very cheap and appends are much faster than rewrites since they don't do a seek. The database is automatically compacted (i.e. put back in the one-line-per-document format) everytime your application restarts.
### Persistence
Under the hood, NeDB's persistence uses an append-only format, meaning that all updates and deletes actually result in lines added at the end of the datafile, for performance reasons. The database is automatically compacted (i.e. put back in the one-line-per-document format) everytime your application restarts.

@@ -115,9 +115,11 @@ You can manually call the compaction function with `yourDatabase.persistence.compactDatafile` which takes no argument. It queues a compaction of the datafile in the executor, to be executed sequentially after all pending operations.

Keep in mind that compaction takes a bit of time (not too much: 130ms for 50k records on my slow machine) and no other operation can happen when it does, so most projects actually don't need to use it.
Keep in mind that compaction takes a bit of time (not too much: 130ms for 50k records on a typical development machine) and no other operation can happen when it does, so most projects actually don't need to use it.
Durability works similarly to major databases: compaction forces the OS to physically flush data to disk, while appends to the data file do not (the OS is responsible for flushing the data). That guarantees that a server crash can never cause complete data loss, while preserving performance. The worst that can happen is a crash between two syncs, causing a loss of all data between the two syncs. Usually syncs are 30 seconds appart so that's at most 30 seconds of data. <a href="http://oldblog.antirez.com/post/redis-persistence-demystified.html" target="_blank">This post by Antirez on Redis persistence</a> explains this in more details, NeDB being very close to Redis AOF persistence with `appendfsync` option set to `no`.
### Inserting documents
The native types are `String`, `Number`, `Boolean`, `Date` and `null`. You can also use
arrays and subdocuments (objects). If a field is `undefined`, it will not be saved (this is different from
MongoDB which transforms `undefined` in `null`, something I find counter-intuitive).
MongoDB which transforms `undefined` in `null`, something I find counter-intuitive).

@@ -599,3 +601,3 @@ If the document does not contain an `_id` field, NeDB will automatically generated one for you (a 16-characters alphanumerical string). The `_id` of a document, once set, cannot be modified.

If you specify a `filename`, the database will be persistent, and automatically select the best storage method available (IndexedDB, WebSQL or localStorage) depending on the browser.
If you specify a `filename`, the database will be persistent, and automatically select the best storage method available (IndexedDB, WebSQL or localStorage) depending on the browser. In most cases that means a lot of data can be stored, typically in hundreds of MB. **WARNING**: the storage system changed between v1.3 and v1.4 and is NOT back-compatible! Your application needs to resync client-side when you upgrade NeDB.

@@ -602,0 +604,0 @@ NeDB is compatible with all major browsers: Chrome, Safari, Firefox, IE9+. Tests are in the `browser-version/test` directory (files `index.html` and `testPersistence.html`).

@@ -0,1 +1,119 @@

/**
* Load and modify part of fs to ensure writeFile will crash after writing 5000 bytes
*/
var fs = require('fs');
function rethrow() {
// Only enable in debug mode. A backtrace uses ~1000 bytes of heap space and
// is fairly slow to generate.
if (DEBUG) {
var backtrace = new Error();
return function(err) {
if (err) {
backtrace.stack = err.name + ': ' + err.message +
backtrace.stack.substr(backtrace.name.length);
throw backtrace;
}
};
}
return function(err) {
if (err) {
throw err; // Forgot a callback but don't know where? Use NODE_DEBUG=fs
}
};
}
function maybeCallback(cb) {
return typeof cb === 'function' ? cb : rethrow();
}
function isFd(path) {
return (path >>> 0) === path;
}
function assertEncoding(encoding) {
if (encoding && !Buffer.isEncoding(encoding)) {
throw new Error('Unknown encoding: ' + encoding);
}
}
var onePassDone = false;
function writeAll(fd, isUserFd, buffer, offset, length, position, callback_) {
var callback = maybeCallback(arguments[arguments.length - 1]);
if (onePassDone) { process.exit(1); } // Crash on purpose before rewrite done
var l = Math.min(5000, length); // Force write by chunks of 5000 bytes to ensure data will be incomplete on crash
// write(fd, buffer, offset, length, position, callback)
fs.write(fd, buffer, offset, l, position, function(writeErr, written) {
if (writeErr) {
if (isUserFd) {
if (callback) callback(writeErr);
} else {
fs.close(fd, function() {
if (callback) callback(writeErr);
});
}
} else {
onePassDone = true;
if (written === length) {
if (isUserFd) {
if (callback) callback(null);
} else {
fs.close(fd, callback);
}
} else {
offset += written;
length -= written;
if (position !== null) {
position += written;
}
writeAll(fd, isUserFd, buffer, offset, length, position, callback);
}
}
});
}
fs.writeFile = function(path, data, options, callback_) {
var callback = maybeCallback(arguments[arguments.length - 1]);
if (!options || typeof options === 'function') {
options = { encoding: 'utf8', mode: 0o666, flag: 'w' };
} else if (typeof options === 'string') {
options = { encoding: options, mode: 0o666, flag: 'w' };
} else if (typeof options !== 'object') {
throwOptionsError(options);
}
assertEncoding(options.encoding);
var flag = options.flag || 'w';
if (isFd(path)) {
writeFd(path, true);
return;
}
fs.open(path, flag, options.mode, function(openErr, fd) {
if (openErr) {
if (callback) callback(openErr);
} else {
writeFd(fd, false);
}
});
function writeFd(fd, isUserFd) {
var buffer = (data instanceof Buffer) ? data : new Buffer('' + data,
options.encoding || 'utf8');
var position = /a/.test(flag) ? null : 0;
writeAll(fd, isUserFd, buffer, 0, buffer.length, position, callback);
}
};
// End of fs modification
var Nedb = require('../lib/datastore.js')

@@ -5,2 +123,2 @@ , db = new Nedb({ filename: 'workspace/lac.db' })

db.loadDatabase();
db.loadDatabase();

@@ -393,3 +393,3 @@ var should = require('chai').should()

Object.keys(doc0).length.should.equal(2);
doc0.hello.should.equal('world');
doc0.hello.should.equal('world');

@@ -558,6 +558,6 @@ doc1 = model.deserialize(doc1);

if (fs.existsSync('workspace/it.db')) { fs.unlinkSync('workspace/it.db'); }
if (fs.existsSync('workspace/it.db~~')) { fs.unlinkSync('workspace/it.db~~'); }
if (fs.existsSync('workspace/it.db~')) { fs.unlinkSync('workspace/it.db~'); }
fs.existsSync('workspace/it.db').should.equal(false);
fs.existsSync('workspace/it.db~~').should.equal(false);
fs.existsSync('workspace/it.db~').should.equal(false);

@@ -568,3 +568,3 @@ storage.ensureDatafileIntegrity(p.filename, function (err) {

fs.existsSync('workspace/it.db').should.equal(true);
fs.existsSync('workspace/it.db~~').should.equal(false);
fs.existsSync('workspace/it.db~').should.equal(false);

@@ -581,3 +581,3 @@ fs.readFileSync('workspace/it.db', 'utf8').should.equal('');

if (fs.existsSync('workspace/it.db')) { fs.unlinkSync('workspace/it.db'); }
if (fs.existsSync('workspace/it.db~~')) { fs.unlinkSync('workspace/it.db~~'); }
if (fs.existsSync('workspace/it.db~')) { fs.unlinkSync('workspace/it.db~'); }

@@ -587,3 +587,3 @@ fs.writeFileSync('workspace/it.db', 'something', 'utf8');

fs.existsSync('workspace/it.db').should.equal(true);
fs.existsSync('workspace/it.db~~').should.equal(false);
fs.existsSync('workspace/it.db~').should.equal(false);

@@ -594,3 +594,3 @@ storage.ensureDatafileIntegrity(p.filename, function (err) {

fs.existsSync('workspace/it.db').should.equal(true);
fs.existsSync('workspace/it.db~~').should.equal(false);
fs.existsSync('workspace/it.db~').should.equal(false);

@@ -603,12 +603,12 @@ fs.readFileSync('workspace/it.db', 'utf8').should.equal('something');

it('If old datafile exists and datafile doesnt, ensureDatafileIntegrity will use it', function (done) {
it('If temp datafile exists and datafile doesnt, ensureDatafileIntegrity will use it (cannot happen except upon first use)', function (done) {
var p = new Persistence({ db: { inMemoryOnly: false, filename: 'workspace/it.db' } });
if (fs.existsSync('workspace/it.db')) { fs.unlinkSync('workspace/it.db'); }
if (fs.existsSync('workspace/it.db~~')) { fs.unlinkSync('workspace/it.db~~'); }
if (fs.existsSync('workspace/it.db~')) { fs.unlinkSync('workspace/it.db~~'); }
fs.writeFileSync('workspace/it.db~~', 'something', 'utf8');
fs.writeFileSync('workspace/it.db~', 'something', 'utf8');
fs.existsSync('workspace/it.db').should.equal(false);
fs.existsSync('workspace/it.db~~').should.equal(true);
fs.existsSync('workspace/it.db~').should.equal(true);

@@ -619,3 +619,3 @@ storage.ensureDatafileIntegrity(p.filename, function (err) {

fs.existsSync('workspace/it.db').should.equal(true);
fs.existsSync('workspace/it.db~~').should.equal(false);
fs.existsSync('workspace/it.db~').should.equal(false);

@@ -628,13 +628,14 @@ fs.readFileSync('workspace/it.db', 'utf8').should.equal('something');

it('If both old and current datafiles exist, ensureDatafileIntegrity will use the datafile, it means step 4 of persistence failed', function (done) {
// Technically it could also mean the write was successful but the rename wasn't, but there is in any case no guarantee that the data in the temp file is whole so we have to discard the whole file
it('If both temp and current datafiles exist, ensureDatafileIntegrity will use the datafile, as it means that the write of the temp file failed', function (done) {
var theDb = new Datastore({ filename: 'workspace/it.db' });
if (fs.existsSync('workspace/it.db')) { fs.unlinkSync('workspace/it.db'); }
if (fs.existsSync('workspace/it.db~~')) { fs.unlinkSync('workspace/it.db~~'); }
if (fs.existsSync('workspace/it.db~')) { fs.unlinkSync('workspace/it.db~'); }
fs.writeFileSync('workspace/it.db', '{"_id":"0","hello":"world"}', 'utf8');
fs.writeFileSync('workspace/it.db~~', '{"_id":"0","hello":"other"}', 'utf8');
fs.writeFileSync('workspace/it.db~', '{"_id":"0","hello":"other"}', 'utf8');
fs.existsSync('workspace/it.db').should.equal(true);
fs.existsSync('workspace/it.db~~').should.equal(true);
fs.existsSync('workspace/it.db~').should.equal(true);

@@ -645,3 +646,3 @@ storage.ensureDatafileIntegrity(theDb.persistence.filename, function (err) {

fs.existsSync('workspace/it.db').should.equal(true);
fs.existsSync('workspace/it.db~~').should.equal(true);
fs.existsSync('workspace/it.db~').should.equal(true);

@@ -656,2 +657,4 @@ fs.readFileSync('workspace/it.db', 'utf8').should.equal('{"_id":"0","hello":"world"}');

docs[0].hello.should.equal("world");
fs.existsSync('workspace/it.db').should.equal(true);
fs.existsSync('workspace/it.db~').should.equal(false);
done();

@@ -670,9 +673,6 @@ });

if (fs.existsSync(testDb + '~')) { fs.unlinkSync(testDb + '~'); }
if (fs.existsSync(testDb + '~~')) { fs.unlinkSync(testDb + '~~'); }
fs.existsSync(testDb).should.equal(false);
fs.writeFileSync(testDb + '~', 'something', 'utf8');
fs.writeFileSync(testDb + '~~', 'something else', 'utf8');
fs.existsSync(testDb + '~').should.equal(true);
fs.existsSync(testDb + '~~').should.equal(true);

@@ -683,4 +683,3 @@ d.persistence.persistCachedDatabase(function (err) {

fs.existsSync(testDb).should.equal(true);
fs.existsSync(testDb + '~').should.equal(false);
fs.existsSync(testDb + '~~').should.equal(false);
fs.existsSync(testDb + '~').should.equal(false);
if (!contents.match(/^{"hello":"world","_id":"[0-9a-zA-Z]{16}"}\n$/)) {

@@ -702,9 +701,7 @@ throw "Datafile contents not as expected";

if (fs.existsSync(testDb + '~')) { fs.unlinkSync(testDb + '~'); }
if (fs.existsSync(testDb + '~~')) { fs.unlinkSync(testDb + '~~'); }
fs.existsSync(testDb).should.equal(false);
fs.existsSync(testDb + '~').should.equal(false);
fs.writeFileSync(testDb + '~', 'bloup', 'utf8');
fs.writeFileSync(testDb + '~~', 'blap', 'utf8');
fs.existsSync(testDb + '~').should.equal(true);
fs.existsSync(testDb + '~~').should.equal(true);

@@ -715,4 +712,3 @@ d.persistence.persistCachedDatabase(function (err) {

fs.existsSync(testDb).should.equal(true);
fs.existsSync(testDb + '~').should.equal(false);
fs.existsSync(testDb + '~~').should.equal(false);
fs.existsSync(testDb + '~').should.equal(false);
if (!contents.match(/^{"hello":"world","_id":"[0-9a-zA-Z]{16}"}\n$/)) {

@@ -724,6 +720,6 @@ throw "Datafile contents not as expected";

});
});
});
});
it('persistCachedDatabase should update the contents of the datafile and leave a clean state even if there is a temp or old datafile', function (done) {
it('persistCachedDatabase should update the contents of the datafile and leave a clean state even if there is a temp datafile', function (done) {
d.insert({ hello: 'world' }, function () {

@@ -735,6 +731,4 @@ d.find({}, function (err, docs) {

fs.writeFileSync(testDb + '~', 'blabla', 'utf8');
fs.writeFileSync(testDb + '~~', 'bloblo', 'utf8');
fs.existsSync(testDb).should.equal(false);
fs.existsSync(testDb + '~').should.equal(true);
fs.existsSync(testDb + '~~').should.equal(true);

@@ -745,4 +739,3 @@ d.persistence.persistCachedDatabase(function (err) {

fs.existsSync(testDb).should.equal(true);
fs.existsSync(testDb + '~').should.equal(false);
fs.existsSync(testDb + '~~').should.equal(false);
fs.existsSync(testDb + '~').should.equal(false);
if (!contents.match(/^{"hello":"world","_id":"[0-9a-zA-Z]{16}"}\n$/)) {

@@ -757,3 +750,3 @@ throw "Datafile contents not as expected";

it('persistCachedDatabase should update the contents of the datafile and leave a clean state even if there is a temp or old datafile', function (done) {
it('persistCachedDatabase should update the contents of the datafile and leave a clean state even if there is a temp datafile', function (done) {
var dbFile = 'workspace/test2.db', theDb;

@@ -763,3 +756,2 @@

if (fs.existsSync(dbFile + '~')) { fs.unlinkSync(dbFile + '~'); }
if (fs.existsSync(dbFile + '~~')) { fs.unlinkSync(dbFile + '~~'); }

@@ -773,3 +765,2 @@ theDb = new Datastore({ filename: dbFile });

fs.existsSync(dbFile + '~').should.equal(false);
fs.existsSync(dbFile + '~~').should.equal(false);
if (contents != "") {

@@ -786,83 +777,77 @@ throw "Datafile contents not as expected";

async.waterfall([
async.apply(storage.ensureFileDoesntExist, dbFile)
async.apply(storage.ensureFileDoesntExist, dbFile)
, async.apply(storage.ensureFileDoesntExist, dbFile + '~')
, async.apply(storage.ensureFileDoesntExist, dbFile + '~~')
, function (cb) {
theDb = new Datastore({ filename: dbFile });
theDb.loadDatabase(cb);
theDb = new Datastore({ filename: dbFile });
theDb.loadDatabase(cb);
}
, function (cb) {
theDb.find({}, function (err, docs) {
assert.isNull(err);
docs.length.should.equal(0);
return cb();
});
theDb.find({}, function (err, docs) {
assert.isNull(err);
docs.length.should.equal(0);
return cb();
});
}
, function (cb) {
theDb.insert({ a: 'hello' }, function (err, _doc1) {
assert.isNull(err);
doc1 = _doc1;
theDb.insert({ a: 'world' }, function (err, _doc2) {
theDb.insert({ a: 'hello' }, function (err, _doc1) {
assert.isNull(err);
doc2 = _doc2;
return cb();
});
});
doc1 = _doc1;
theDb.insert({ a: 'world' }, function (err, _doc2) {
assert.isNull(err);
doc2 = _doc2;
return cb();
});
});
}
, function (cb) {
theDb.find({}, function (err, docs) {
assert.isNull(err);
docs.length.should.equal(2);
_.find(docs, function (item) { return item._id === doc1._id }).a.should.equal('hello');
_.find(docs, function (item) { return item._id === doc2._id }).a.should.equal('world');
return cb();
});
theDb.find({}, function (err, docs) {
assert.isNull(err);
docs.length.should.equal(2);
_.find(docs, function (item) { return item._id === doc1._id }).a.should.equal('hello');
_.find(docs, function (item) { return item._id === doc2._id }).a.should.equal('world');
return cb();
});
}
, function (cb) {
theDb.loadDatabase(cb);
theDb.loadDatabase(cb);
}
, function (cb) { // No change
theDb.find({}, function (err, docs) {
assert.isNull(err);
docs.length.should.equal(2);
_.find(docs, function (item) { return item._id === doc1._id }).a.should.equal('hello');
_.find(docs, function (item) { return item._id === doc2._id }).a.should.equal('world');
return cb();
});
theDb.find({}, function (err, docs) {
assert.isNull(err);
docs.length.should.equal(2);
_.find(docs, function (item) { return item._id === doc1._id }).a.should.equal('hello');
_.find(docs, function (item) { return item._id === doc2._id }).a.should.equal('world');
return cb();
});
}
, function (cb) {
fs.existsSync(dbFile).should.equal(true);
fs.existsSync(dbFile + '~').should.equal(false);
fs.existsSync(dbFile + '~~').should.equal(false);
return cb();
fs.existsSync(dbFile).should.equal(true);
fs.existsSync(dbFile + '~').should.equal(false);
return cb();
}
, function (cb) {
theDb2 = new Datastore({ filename: dbFile });
theDb2.loadDatabase(cb);
}
theDb2 = new Datastore({ filename: dbFile });
theDb2.loadDatabase(cb);
}
, function (cb) { // No change in second db
theDb2.find({}, function (err, docs) {
assert.isNull(err);
docs.length.should.equal(2);
_.find(docs, function (item) { return item._id === doc1._id }).a.should.equal('hello');
_.find(docs, function (item) { return item._id === doc2._id }).a.should.equal('world');
return cb();
});
theDb2.find({}, function (err, docs) {
assert.isNull(err);
docs.length.should.equal(2);
_.find(docs, function (item) { return item._id === doc1._id }).a.should.equal('hello');
_.find(docs, function (item) { return item._id === doc2._id }).a.should.equal('world');
return cb();
});
}
, function (cb) {
fs.existsSync(dbFile).should.equal(true);
fs.existsSync(dbFile + '~').should.equal(false);
fs.existsSync(dbFile + '~~').should.equal(false);
return cb();
}
fs.existsSync(dbFile).should.equal(true);
fs.existsSync(dbFile + '~').should.equal(false);
return cb();
}
], done);
});
// This test is a bit complicated since it depends on the time I/O actions take to execute
// That depends on the machine and the load on the machine when the tests are run
// It is timed for my machine with nothing else running but may not work as expected on others (it will not fail but may not be a proof)
// Every new version of NeDB passes it on my machine before rtelease
// The child process will load the database with the given datafile, but the fs.writeFile function
// is rewritten to crash the process before it finished (after 5000 bytes), to ensure data was not lost
it('If system crashes during a loadDatabase, the former version is not lost', function (done) {
var cp, N = 150000, toWrite = "", i;
var N = 500, toWrite = "", i, doc_i;

@@ -875,36 +860,52 @@ // Ensuring the state is clean

for (i = 0; i < N; i += 1) {
toWrite += model.serialize({ _id: customUtils.uid(16), hello: 'world' }) + '\n';
}
toWrite += model.serialize({ _id: 'anid_' + i, hello: 'world' }) + '\n';
}
fs.writeFileSync('workspace/lac.db', toWrite, 'utf8');
var datafileLength = fs.readFileSync('workspace/lac.db', 'utf8').length;
// Loading it in a separate process that we will crash before finishing the loadDatabase
cp = child_process.fork('test_lac/loadAndCrash.test')
child_process.fork('test_lac/loadAndCrash.test').on('exit', function (code) {
code.should.equal(1); // See test_lac/loadAndCrash.test.js
// Kill the child process when we're at step 3 of persistCachedDatabase (during write to datafile)
setTimeout(function() {
cp.kill('SIGINT');
fs.existsSync('workspace/lac.db').should.equal(true);
fs.existsSync('workspace/lac.db~').should.equal(true);
fs.readFileSync('workspace/lac.db', 'utf8').length.should.equal(datafileLength);
fs.readFileSync('workspace/lac.db~', 'utf8').length.should.equal(5000);
// If the timing is correct, only the temp datafile contains data
// The datafile was in the middle of being written and is empty
// Reload database without a crash, check that no data was lost and fs state is clean (no temp file)
var db = new Datastore({ filename: 'workspace/lac.db' });
db.loadDatabase(function (err) {
assert.isNull(err);
// Let the process crash be finished then load database without a crash, and test we didn't lose data
setTimeout(function () {
var db = new Datastore({ filename: 'workspace/lac.db' });
db.loadDatabase(function (err) {
assert.isNull(err);
fs.existsSync('workspace/lac.db').should.equal(true);
fs.existsSync('workspace/lac.db~').should.equal(false);
fs.readFileSync('workspace/lac.db', 'utf8').length.should.equal(datafileLength);
db.count({}, function (err, n) {
// Data has not been lost
assert.isNull(err);
n.should.equal(150000);
db.find({}, function (err, docs) {
docs.length.should.equal(N);
for (i = 0; i < N; i += 1) {
doc_i = _.find(docs, function (d) { return d._id === 'anid_' + i; });
assert.isDefined(doc_i);
assert.deepEqual({ hello: 'world', _id: 'anid_' + i }, doc_i);
}
return done();
});
});
});
});
// State is clean, the temp datafile has been erased and the datafile contains all the data
fs.existsSync('workspace/lac.db').should.equal(true);
fs.existsSync('workspace/lac.db~').should.equal(false);
// Not run on Windows as there is no clean way to set maximum file descriptors. Not an issue as the code itself is tested.
it("Cannot cause EMFILE errors by opening too many file descriptors", function (done) {
if (process.platform === 'win32' || process.platform === 'win64') { return done(); }
child_process.execFile('test_lac/openFdsLaunch.sh', function (err, stdout, stderr) {
if (err) { return done(err); }
done();
});
});
}, 100);
}, 2000);
// The subprocess will not output anything to stdout unless part of the test fails
if (stdout.length !== 0) {
return done(stdout);
} else {
return done();
}
});
});

@@ -911,0 +912,0 @@

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc