@nozbe/lokijs
Advanced tools
Comparing version 1.5.11-wmelon-idb-fix2 to 1.5.12-wmelon
{ | ||
"name": "@nozbe/lokijs", | ||
"version": "1.5.11-wmelon-idb-fix2", | ||
"version": "1.5.12-wmelon", | ||
"description": "Nozbe's temporary fork of LokiJS - used for WatermelonDB purposes to work around NPM issues", | ||
@@ -5,0 +5,0 @@ "homepage": "https://techfort.github.io/LokiJS/", |
# LokiJS | ||
The super fast in-memory javascript document oriented database. | ||
Enable offline-syncing to your SQL/NoSQL database servers with [SyncProxy](https://www.syncproxy.com) !! Code-free real time syncing, ideal for mobile, electron and web apps. | ||
[![Join the chat at https://gitter.im/techfort/LokiJS](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/techfort/LokiJS?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) | ||
@@ -4,0 +8,0 @@ ![alt CI-badge](https://travis-ci.org/techfort/LokiJS.svg?branch=master) |
@@ -49,2 +49,4 @@ (function(root, factory) { | ||
* Expects an array of Loki documents as the return value | ||
* @param {number} options.megachunkCount Number of parallel requests for data when loading database. | ||
* Can be tuned for a specific application | ||
*/ | ||
@@ -55,5 +57,10 @@ function IncrementalIndexedDBAdapter(options) { | ||
this.chunkSize = 100; | ||
this.megachunkCount = this.options.megachunkCount || 20; | ||
this.idb = null; // will be lazily loaded on first operation that needs it | ||
this._prevLokiVersionId = null; | ||
this._prevCollectionVersionIds = {}; | ||
if (!(this.megachunkCount >= 4 && this.megachunkCount % 2 === 0)) { | ||
throw new Error('megachunkCount must be >=4 and divisible by 2'); | ||
} | ||
} | ||
@@ -425,8 +432,7 @@ | ||
chunks = chunksToMap(chunks); | ||
var loki = JSON.parse(chunks.loki); | ||
var loki = chunks.loki; | ||
chunks.loki = null; // gc | ||
// populate collections with data | ||
var deserializeChunk = that.options.deserializeChunk; | ||
populateLoki(loki, chunks.chunkMap, deserializeChunk); | ||
populateLoki(loki, chunks.chunkMap); | ||
chunks = null; // gc | ||
@@ -497,6 +503,5 @@ | ||
function populateLoki(loki, chunkMap, deserializeChunk) { | ||
loki.collections.forEach(function(collectionStub, i) { | ||
function populateLoki(loki, chunkMap) { | ||
loki.collections.forEach(function populateCollection(collectionStub, i) { | ||
var chunkCollection = chunkMap[collectionStub.name]; | ||
if (chunkCollection) { | ||
@@ -506,3 +511,3 @@ if (!chunkCollection.metadata) { | ||
} | ||
var collection = JSON.parse(chunkCollection.metadata); | ||
var collection = chunkCollection.metadata; | ||
chunkCollection.metadata = null; | ||
@@ -513,14 +518,7 @@ | ||
var dataChunks = chunkCollection.dataChunks; | ||
dataChunks.forEach(function(chunkObj, i) { | ||
var chunk = JSON.parse(chunkObj); | ||
chunkObj = null; // make string available for GC | ||
dataChunks[i] = null; | ||
if (deserializeChunk) { | ||
chunk = deserializeChunk(collection.name, chunk); | ||
} | ||
dataChunks.forEach(function populateChunk(chunk, i) { | ||
chunk.forEach(function(doc) { | ||
collection.data.push(doc); | ||
}); | ||
dataChunks[i] = null; | ||
}); | ||
@@ -614,15 +612,96 @@ } | ||
var tx = this.idb.transaction(['LokiIncrementalData'], "readonly"); | ||
var store = tx.objectStore('LokiIncrementalData'); | ||
idbReq(tx.objectStore('LokiIncrementalData').getAll(), function(e) { | ||
var chunks = e.target.result; | ||
callback(chunks); | ||
}, function(e) { | ||
callback(e); | ||
}); | ||
var deserializeChunk = this.options.deserializeChunk; | ||
if (this.options.onFetchStart) { | ||
this.options.onFetchStart(); | ||
// If there are a lot of chunks (>100), don't request them all in one go, but in multiple | ||
// "megachunks" (chunks of chunks). This improves concurrency, as main thread is already busy | ||
// while IDB process is still fetching data. Details: https://github.com/techfort/LokiJS/pull/874 | ||
function getMegachunks(keys) { | ||
var megachunkCount = that.megachunkCount; | ||
var keyRanges = createKeyRanges(keys, megachunkCount); | ||
var allChunks = []; | ||
var megachunksReceived = 0; | ||
function processMegachunk(e, megachunkIndex, keyRange) { | ||
// var debugMsg = 'processing chunk ' + megachunkIndex + ' (' + keyRange.lower + ' -- ' + keyRange.upper + ')' | ||
// DEBUG && console.time(debugMsg); | ||
var megachunk = e.target.result; | ||
megachunk.forEach(function (chunk, i) { | ||
parseChunk(chunk, deserializeChunk); | ||
allChunks.push(chunk); | ||
megachunk[i] = null; // gc | ||
}); | ||
// DEBUG && console.timeEnd(debugMsg); | ||
megachunksReceived += 1; | ||
if (megachunksReceived === megachunkCount) { | ||
callback(allChunks); | ||
} | ||
} | ||
// Stagger megachunk requests - first one half, then request the second when first one comes | ||
// back. This further improves concurrency. | ||
function requestMegachunk(index) { | ||
var keyRange = keyRanges[index]; | ||
idbReq(store.getAll(keyRange), function(e) { | ||
if (index < megachunkCount / 2) { | ||
requestMegachunk(index + megachunkCount / 2); | ||
} | ||
processMegachunk(e, index, keyRange); | ||
}, function(e) { | ||
callback(e); | ||
}); | ||
} | ||
for (var i = 0; i < megachunkCount / 2; i += 1) { | ||
requestMegachunk(i); | ||
} | ||
} | ||
function getAllChunks() { | ||
idbReq(store.getAll(), function(e) { | ||
var allChunks = e.target.result; | ||
allChunks.forEach(function (chunk) { | ||
parseChunk(chunk, deserializeChunk); | ||
}); | ||
callback(allChunks); | ||
}, function(e) { | ||
callback(e); | ||
}); | ||
} | ||
function getAllKeys() { | ||
idbReq(store.getAllKeys(), function(e) { | ||
var keys = e.target.result.sort(); | ||
if (keys.length > 100) { | ||
getMegachunks(keys); | ||
} else { | ||
getAllChunks(); | ||
} | ||
}, function(e) { | ||
callback(e); | ||
}); | ||
if (that.options.onFetchStart) { | ||
that.options.onFetchStart(); | ||
} | ||
} | ||
getAllKeys(); | ||
}; | ||
function parseChunk(chunk, deserializeChunk) { | ||
chunk.value = JSON.parse(chunk.value); | ||
if (deserializeChunk) { | ||
var segments = chunk.key.split('.'); | ||
if (segments.length === 3 && segments[1] === 'chunk') { | ||
var collectionName = segments[0]; | ||
chunk.value = deserializeChunk(collectionName, chunk.value); | ||
} | ||
} | ||
} | ||
/** | ||
@@ -713,2 +792,23 @@ * Deletes a database from IndexedDB | ||
function createKeyRanges(keys, count) { | ||
var countPerRange = Math.floor(keys.length / count); | ||
var keyRanges = []; | ||
var minKey, maxKey; | ||
for (var i = 0; i < count; i += 1) { | ||
minKey = keys[countPerRange * i]; | ||
maxKey = keys[countPerRange * (i + 1)]; | ||
if (i === 0) { | ||
// ... < maxKey | ||
keyRanges.push(IDBKeyRange.upperBound(maxKey, true)); | ||
} else if (i === count - 1) { | ||
// >= minKey | ||
keyRanges.push(IDBKeyRange.lowerBound(minKey)); | ||
} else { | ||
// >= minKey && < maxKey | ||
keyRanges.push(IDBKeyRange.bound(minKey, maxKey, false, true)); | ||
} | ||
} | ||
return keyRanges; | ||
} | ||
function idbReq(request, onsuccess, onerror) { | ||
@@ -715,0 +815,0 @@ request.onsuccess = function (e) { |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
3072551
10009
92