Comparing version 1.5.11 to 1.5.12
{ | ||
"name": "lokijs", | ||
"version": "1.5.11", | ||
"version": "1.5.12", | ||
"description": "Fast document oriented javascript in-memory database", | ||
@@ -5,0 +5,0 @@ "homepage": "https://techfort.github.io/LokiJS/", |
# LokiJS | ||
LokiJS is being sponsored by the following tool; please help to support us by taking a look and signing up to a free trial | ||
<a href="https://tracking.gitads.io/?repo=lokijs"> <img src="https://images.gitads.io/lokijs" alt="GitAds"/> </a> | ||
The super fast in-memory javascript document oriented database. | ||
Enable offline-syncing to your SQL/NoSQL database servers with [SyncProxy](https://www.syncproxy.com) !! Code-free real time syncing, ideal for mobile, electron and web apps. | ||
[![Join the chat at https://gitter.im/techfort/LokiJS](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/techfort/LokiJS?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) | ||
@@ -7,0 +8,0 @@ ![alt CI-badge](https://travis-ci.org/techfort/LokiJS.svg?branch=master) |
@@ -40,2 +40,5 @@ (function(root, factory) { | ||
* Use this as an opportunity to execute code concurrently while IDB does work on a separate thread | ||
* @param {function} options.onDidOverwrite Called when this adapter is forced to overwrite contents | ||
* of IndexedDB. This happens if there's another open tab of the same app that's making changes. | ||
* You might use it as an opportunity to alert user to the potential loss of data | ||
* @param {function} options.serializeChunk Called with a chunk (array of Loki documents) before | ||
@@ -47,2 +50,4 @@ * it's saved to IndexedDB. You can use it to manually compress on-disk representation | ||
* Expects an array of Loki documents as the return value | ||
* @param {number} options.megachunkCount Number of parallel requests for data when loading database. | ||
* Can be tuned for a specific application | ||
*/ | ||
@@ -53,3 +58,10 @@ function IncrementalIndexedDBAdapter(options) { | ||
this.chunkSize = 100; | ||
this.megachunkCount = this.options.megachunkCount || 20; | ||
this.idb = null; // will be lazily loaded on first operation that needs it | ||
this._prevLokiVersionId = null; | ||
this._prevCollectionVersionIds = {}; | ||
if (!(this.megachunkCount >= 4 && this.megachunkCount % 2 === 0)) { | ||
throw new Error('megachunkCount must be >=4 and divisible by 2'); | ||
} | ||
} | ||
@@ -104,3 +116,3 @@ | ||
// TODO: remove sanity checks when everything is fully tested | ||
// verify | ||
var firstElement = collection.data[firstDataPosition]; | ||
@@ -120,3 +132,2 @@ if (!(firstElement && firstElement.$loki >= minId && firstElement.$loki <= maxId)) { | ||
// TODO: remove sanity checks when everything is fully tested | ||
if (chunkData.length > this.chunkSize) { | ||
@@ -140,18 +151,162 @@ throw new Error("broken invariant - chunk size"); | ||
* @param {string} dbname - the name to give the serialized database | ||
* @param {object} dbcopy - copy of the Loki database | ||
* @param {function} getLokiCopy - returns copy of the Loki database | ||
* @param {function} callback - (Optional) callback passed obj.success with true or false | ||
* @memberof IncrementalIndexedDBAdapter | ||
*/ | ||
IncrementalIndexedDBAdapter.prototype.saveDatabase = function(dbname, loki, callback) { | ||
IncrementalIndexedDBAdapter.prototype.saveDatabase = function(dbname, getLokiCopy, callback) { | ||
var that = this; | ||
DEBUG && console.log("exportDatabase - begin"); | ||
DEBUG && console.time("exportDatabase"); | ||
var chunksToSave = []; | ||
var savedLength = 0; | ||
if (!this.idb) { | ||
this._initializeIDB(dbname, callback, function() { | ||
that.saveDatabase(dbname, getLokiCopy, callback); | ||
}); | ||
return; | ||
} | ||
if (this.operationInProgress) { | ||
throw new Error("Error while saving to database - another operation is already in progress. Please use throttledSaves=true option on Loki object"); | ||
} | ||
this.operationInProgress = true; | ||
DEBUG && console.log("saveDatabase - begin"); | ||
DEBUG && console.time("saveDatabase"); | ||
function finish(e) { | ||
DEBUG && e && console.error(e); | ||
DEBUG && console.timeEnd("saveDatabase"); | ||
that.operationInProgress = false; | ||
callback(e); | ||
} | ||
// try..catch is required, e.g.: | ||
// InvalidStateError: Failed to execute 'transaction' on 'IDBDatabase': The database connection is closing. | ||
// (this may happen if another tab has called deleteDatabase) | ||
try { | ||
var updatePrevVersionIds = function () { | ||
console.error('Unexpected successful tx - cannot update previous version ids'); | ||
}; | ||
var didOverwrite = false; | ||
var tx = this.idb.transaction(['LokiIncrementalData'], "readwrite"); | ||
tx.oncomplete = function() { | ||
updatePrevVersionIds(); | ||
finish(); | ||
if (didOverwrite && that.options.onDidOverwrite) { | ||
that.options.onDidOverwrite(); | ||
} | ||
}; | ||
tx.onerror = function(e) { | ||
finish(e); | ||
}; | ||
tx.onabort = function(e) { | ||
finish(e); | ||
}; | ||
var store = tx.objectStore('LokiIncrementalData'); | ||
var performSave = function (maxChunkIds) { | ||
try { | ||
var incremental = !maxChunkIds; | ||
var chunkInfo = that._putInChunks(store, getLokiCopy(), incremental, maxChunkIds); | ||
// Update last seen version IDs, but only after the transaction is successful | ||
updatePrevVersionIds = function() { | ||
that._prevLokiVersionId = chunkInfo.lokiVersionId; | ||
chunkInfo.collectionVersionIds.forEach(function (collectionInfo) { | ||
that._prevCollectionVersionIds[collectionInfo.name] = collectionInfo.versionId; | ||
}); | ||
}; | ||
tx.commit && tx.commit(); | ||
} catch (error) { | ||
console.error('idb performSave failed: ', error); | ||
tx.abort(); | ||
} | ||
}; | ||
// Incrementally saving changed chunks breaks down if there is more than one writer to IDB | ||
// (multiple tabs of the same web app), leading to data corruption. To fix that, we save all | ||
// metadata chunks (loki + collections) with a unique ID on each save and remember it. Before | ||
// the subsequent save, we read loki from IDB to check if its version ID changed. If not, we're | ||
// guaranteed that persisted DB is consistent with our diff. Otherwise, we fall back to the slow | ||
// path and overwrite *all* database chunks with our version. Both reading and writing must | ||
// happen in the same IDB transaction for this to work. | ||
// TODO: We can optimize the slow path by fetching collection metadata chunks and comparing their | ||
// version IDs with those last seen by us. Since any change in collection data requires a metadata | ||
// chunk save, we're guaranteed that if the IDs match, we don't need to overwrite chukns of this collection | ||
var getAllKeysThenSave = function() { | ||
// NOTE: We must fetch all keys to protect against a case where another tab has wrote more | ||
// chunks whan we did -- if so, we must delete them. | ||
idbReq(store.getAllKeys(), function(e) { | ||
var maxChunkIds = getMaxChunkIds(e.target.result); | ||
performSave(maxChunkIds); | ||
}, function(e) { | ||
console.error('Getting all keys failed: ', e); | ||
tx.abort(); | ||
}); | ||
}; | ||
var getLokiThenSave = function() { | ||
idbReq(store.get('loki'), function(e) { | ||
if (lokiChunkVersionId(e.target.result) === that._prevLokiVersionId) { | ||
performSave(); | ||
} else { | ||
DEBUG && console.warn('Another writer changed Loki IDB, using slow path...'); | ||
didOverwrite = true; | ||
getAllKeysThenSave(); | ||
} | ||
}, function(e) { | ||
console.error('Getting loki chunk failed: ', e); | ||
tx.abort(); | ||
}); | ||
}; | ||
getLokiThenSave(); | ||
} catch (error) { | ||
finish(error); | ||
} | ||
}; | ||
// gets current largest chunk ID for each collection | ||
function getMaxChunkIds(allKeys) { | ||
var maxChunkIds = {}; | ||
allKeys.forEach(function (key) { | ||
var keySegments = key.split("."); | ||
// table.chunk.2317 | ||
if (keySegments.length === 3 && keySegments[1] === "chunk") { | ||
var collection = keySegments[0]; | ||
var chunkId = parseInt(keySegments[2]) || 0; | ||
var currentMax = maxChunkIds[collection]; | ||
if (!currentMax || chunkId > currentMax) { | ||
maxChunkIds[collection] = chunkId; | ||
} | ||
} | ||
}); | ||
return maxChunkIds; | ||
} | ||
function lokiChunkVersionId(chunk) { | ||
try { | ||
if (chunk) { | ||
var loki = JSON.parse(chunk.value); | ||
return loki.idbVersionId || null; | ||
} else { | ||
return null; | ||
} | ||
} catch (e) { | ||
console.error('Error while parsing loki chunk', e); | ||
return null; | ||
} | ||
} | ||
IncrementalIndexedDBAdapter.prototype._putInChunks = function(idbStore, loki, incremental, maxChunkIds) { | ||
var that = this; | ||
var collectionVersionIds = []; | ||
var savedSize = 0; | ||
var prepareCollection = function (collection, i) { | ||
// Find dirty chunk ids | ||
var dirtyChunks = new Set(); | ||
collection.dirtyIds.forEach(function(lokiId) { | ||
incremental && collection.dirtyIds.forEach(function(lokiId) { | ||
var chunkId = (lokiId / that.chunkSize) | 0; | ||
@@ -169,6 +324,8 @@ dirtyChunks.add(chunkId); | ||
// we must stringify now, because IDB is asynchronous, and underlying objects are mutable | ||
// (and it's faster for some reason) | ||
// In general, it's also faster to stringify, because we need serialization anyway, and | ||
// JSON.stringify is much better optimized than IDB's structured clone | ||
chunkData = JSON.stringify(chunkData); | ||
savedLength += chunkData.length; | ||
chunksToSave.push({ | ||
savedSize += chunkData.length; | ||
DEBUG && incremental && console.log('Saving: ' + collection.name + ".chunk." + chunkId); | ||
idbStore.put({ | ||
key: collection.name + ".chunk." + chunkId, | ||
@@ -178,12 +335,33 @@ value: chunkData, | ||
}; | ||
dirtyChunks.forEach(prepareChunk); | ||
if (incremental) { | ||
dirtyChunks.forEach(prepareChunk); | ||
} else { | ||
// add all chunks | ||
var maxChunkId = (collection.maxId / that.chunkSize) | 0; | ||
for (var j = 0; j <= maxChunkId; j += 1) { | ||
prepareChunk(j); | ||
} | ||
// delete chunks with larger ids than what we have | ||
// NOTE: we don't have to delete metadata chunks as they will be absent from loki anyway | ||
// NOTE: failures are silently ignored, so we don't have to worry about holes | ||
var persistedMaxChunkId = maxChunkIds[collection.name] || 0; | ||
for (var k = maxChunkId + 1; k <= persistedMaxChunkId; k += 1) { | ||
var deletedChunkName = collection.name + ".chunk." + k; | ||
idbStore.delete(deletedChunkName); | ||
DEBUG && console.warn('Deleted chunk: ' + deletedChunkName); | ||
} | ||
} | ||
// save collection metadata as separate chunk (but only if changed) | ||
if (collection.dirty) { | ||
if (collection.dirty || dirtyChunks.size || !incremental) { | ||
collection.idIndex = []; // this is recreated lazily | ||
collection.data = []; | ||
collection.idbVersionId = randomVersionId(); | ||
collectionVersionIds.push({ name: collection.name, versionId: collection.idbVersionId }); | ||
var metadataChunk = JSON.stringify(collection); | ||
savedLength += metadataChunk.length; | ||
chunksToSave.push({ | ||
savedSize += metadataChunk.length; | ||
DEBUG && incremental && console.log('Saving: ' + collection.name + ".metadata"); | ||
idbStore.put({ | ||
key: collection.name + ".metadata", | ||
@@ -199,10 +377,14 @@ value: metadataChunk, | ||
loki.idbVersionId = randomVersionId(); | ||
var serializedMetadata = JSON.stringify(loki); | ||
savedLength += serializedMetadata.length; | ||
loki = null; // allow GC of the DB copy | ||
savedSize += serializedMetadata.length; | ||
chunksToSave.push({ key: "loki", value: serializedMetadata }); | ||
DEBUG && incremental && console.log('Saving: loki'); | ||
idbStore.put({ key: "loki", value: serializedMetadata }); | ||
DEBUG && console.log("saved size: " + savedLength); | ||
that._saveChunks(dbname, chunksToSave, callback); | ||
DEBUG && console.log("saved size: " + savedSize); | ||
return { | ||
lokiVersionId: loki.idbVersionId, | ||
collectionVersionIds: collectionVersionIds, | ||
}; | ||
}; | ||
@@ -227,107 +409,110 @@ | ||
var that = this; | ||
DEBUG && console.log("loadDatabase - begin"); | ||
DEBUG && console.time("loadDatabase"); | ||
this._getAllChunks(dbname, function(chunks) { | ||
if (!Array.isArray(chunks)) { | ||
// we got an error | ||
DEBUG && console.timeEnd("loadDatabase"); | ||
callback(chunks); | ||
} | ||
if (!chunks.length) { | ||
DEBUG && console.timeEnd("loadDatabase"); | ||
callback(null); | ||
return; | ||
} | ||
if (this.operationInProgress) { | ||
throw new Error("Error while loading database - another operation is already in progress. Please use throttledSaves=true option on Loki object"); | ||
} | ||
DEBUG && console.log("Found chunks:", chunks.length); | ||
this.operationInProgress = true; | ||
that._sortChunksInPlace(chunks); | ||
DEBUG && console.log("loadDatabase - begin"); | ||
DEBUG && console.time("loadDatabase"); | ||
// repack chunks into a map | ||
var loki; | ||
var chunkCollections = {}; | ||
var finish = function (value) { | ||
DEBUG && console.timeEnd("loadDatabase"); | ||
that.operationInProgress = false; | ||
callback(value); | ||
}; | ||
chunks.forEach(function(object) { | ||
var key = object.key; | ||
var value = object.value; | ||
if (key === "loki") { | ||
loki = value; | ||
return; | ||
} else if (key.includes(".")) { | ||
var keySegments = key.split("."); | ||
if (keySegments.length === 3 && keySegments[1] === "chunk") { | ||
var colName = keySegments[0]; | ||
if (chunkCollections[colName]) { | ||
chunkCollections[colName].dataChunks.push(value); | ||
} else { | ||
chunkCollections[colName] = { | ||
metadata: null, | ||
dataChunks: [value], | ||
}; | ||
} | ||
return; | ||
} else if (keySegments.length === 2 && keySegments[1] === "metadata") { | ||
var name = keySegments[0]; | ||
if (chunkCollections[name]) { | ||
chunkCollections[name].metadata = value; | ||
} else { | ||
chunkCollections[name] = { metadata: value, dataChunks: [] }; | ||
} | ||
return; | ||
} | ||
this._getAllChunks(dbname, function(chunks) { | ||
try { | ||
if (!Array.isArray(chunks)) { | ||
throw chunks; // we have an error | ||
} | ||
console.error("Unknown chunk " + key); | ||
callback(new Error("Invalid database - unknown chunk found")); | ||
}); | ||
chunks = null; | ||
if (!chunks.length) { | ||
return finish(null); | ||
} | ||
if (!loki) { | ||
callback(new Error("Invalid database - missing database metadata")); | ||
} | ||
DEBUG && console.log("Found chunks:", chunks.length); | ||
// parse Loki object | ||
loki = JSON.parse(loki); | ||
// repack chunks into a map | ||
chunks = chunksToMap(chunks); | ||
var loki = chunks.loki; | ||
chunks.loki = null; // gc | ||
// populate collections with data | ||
that._populate(loki, chunkCollections); | ||
chunkCollections = null; | ||
// populate collections with data | ||
populateLoki(loki, chunks.chunkMap); | ||
chunks = null; // gc | ||
DEBUG && console.timeEnd("loadDatabase"); | ||
callback(loki); | ||
// remember previous version IDs | ||
that._prevLokiVersionId = loki.idbVersionId || null; | ||
that._prevCollectionVersionIds = {}; | ||
loki.collections.forEach(function (collection) { | ||
that._prevCollectionVersionIds[collection.name] = collection.idbVersionId || null; | ||
}); | ||
return finish(loki); | ||
} catch (error) { | ||
that._prevLokiVersionId = null; | ||
that._prevCollectionVersionIds = {}; | ||
return finish(error); | ||
} | ||
}); | ||
}; | ||
IncrementalIndexedDBAdapter.prototype._sortChunksInPlace = function(chunks) { | ||
// sort chunks in place to load data in the right order (ascending loki ids) | ||
// on both Safari and Chrome, we'll get chunks in order like this: 0, 1, 10, 100... | ||
var getSortKey = function(object) { | ||
function chunksToMap(chunks) { | ||
var loki; | ||
var chunkMap = {}; | ||
sortChunksInPlace(chunks); | ||
chunks.forEach(function(object) { | ||
var key = object.key; | ||
if (key.includes(".")) { | ||
var segments = key.split("."); | ||
if (segments.length === 3 && segments[1] === "chunk") { | ||
return parseInt(segments[2], 10); | ||
var value = object.value; | ||
if (key === "loki") { | ||
loki = value; | ||
return; | ||
} else if (key.includes(".")) { | ||
var keySegments = key.split("."); | ||
if (keySegments.length === 3 && keySegments[1] === "chunk") { | ||
var colName = keySegments[0]; | ||
if (chunkMap[colName]) { | ||
chunkMap[colName].dataChunks.push(value); | ||
} else { | ||
chunkMap[colName] = { | ||
metadata: null, | ||
dataChunks: [value], | ||
}; | ||
} | ||
return; | ||
} else if (keySegments.length === 2 && keySegments[1] === "metadata") { | ||
var name = keySegments[0]; | ||
if (chunkMap[name]) { | ||
chunkMap[name].metadata = value; | ||
} else { | ||
chunkMap[name] = { metadata: value, dataChunks: [] }; | ||
} | ||
return; | ||
} | ||
} | ||
return -1; // consistent type must be returned | ||
}; | ||
chunks.sort(function(a, b) { | ||
var aKey = getSortKey(a), | ||
bKey = getSortKey(b); | ||
if (aKey < bKey) return -1; | ||
if (aKey > bKey) return 1; | ||
return 0; | ||
console.error("Unknown chunk " + key); | ||
throw new Error("Corrupted database - unknown chunk found"); | ||
}); | ||
}; | ||
IncrementalIndexedDBAdapter.prototype._populate = function(loki, chunkCollections) { | ||
var that = this; | ||
loki.collections.forEach(function(collectionStub, i) { | ||
var chunkCollection = chunkCollections[collectionStub.name]; | ||
if (!loki) { | ||
throw new Error("Corrupted database - missing database metadata"); | ||
} | ||
return { loki: loki, chunkMap: chunkMap }; | ||
} | ||
function populateLoki(loki, chunkMap) { | ||
loki.collections.forEach(function populateCollection(collectionStub, i) { | ||
var chunkCollection = chunkMap[collectionStub.name]; | ||
if (chunkCollection) { | ||
// TODO: What if metadata is missing? | ||
var collection = JSON.parse(chunkCollection.metadata); | ||
if (!chunkCollection.metadata) { | ||
throw new Error("Corrupted database - missing metadata chunk for " + collectionStub.name); | ||
} | ||
var collection = chunkCollection.metadata; | ||
chunkCollection.metadata = null; | ||
@@ -338,18 +523,11 @@ | ||
var dataChunks = chunkCollection.dataChunks; | ||
dataChunks.forEach(function(chunkObj, i) { | ||
var chunk = JSON.parse(chunkObj); | ||
chunkObj = null; // make string available for GC | ||
dataChunks[i] = null; | ||
if (that.options.deserializeChunk) { | ||
chunk = that.options.deserializeChunk(collection.name, chunk); | ||
} | ||
dataChunks.forEach(function populateChunk(chunk, i) { | ||
chunk.forEach(function(doc) { | ||
collection.data.push(doc); | ||
}); | ||
dataChunks[i] = null; | ||
}); | ||
} | ||
}); | ||
}; | ||
} | ||
@@ -382,5 +560,6 @@ IncrementalIndexedDBAdapter.prototype._initializeIDB = function(dbname, onError, onSuccess) { | ||
that.idbInitInProgress = false; | ||
that.idb = e.target.result; | ||
var db = e.target.result; | ||
that.idb = db; | ||
if (!that.idb.objectStoreNames.contains('LokiIncrementalData')) { | ||
if (!db.objectStoreNames.contains('LokiIncrementalData')) { | ||
onError(new Error("Missing LokiIncrementalData")); | ||
@@ -394,3 +573,8 @@ // Attempt to recover (after reload) by deleting database, since it's damaged anyway | ||
that.idb.onversionchange = function(versionChangeEvent) { | ||
db.onversionchange = function(versionChangeEvent) { | ||
// Ignore if database was deleted and recreated in the meantime | ||
if (that.idb !== db) { | ||
return; | ||
} | ||
DEBUG && console.log('IDB version change', versionChangeEvent); | ||
@@ -404,2 +588,3 @@ // This function will be called if another connection changed DB version | ||
that.idb.close(); | ||
that.idb = null; | ||
if (that.options.onversionchange) { | ||
@@ -420,3 +605,3 @@ that.options.onversionchange(versionChangeEvent); | ||
that.idbInitInProgress = false; | ||
console.error("IndexeddB open error", e); | ||
console.error("IndexedDB open error", e); | ||
onError(e); | ||
@@ -426,7 +611,7 @@ }; | ||
IncrementalIndexedDBAdapter.prototype._saveChunks = function(dbname, chunks, callback) { | ||
IncrementalIndexedDBAdapter.prototype._getAllChunks = function(dbname, callback) { | ||
var that = this; | ||
if (!this.idb) { | ||
this._initializeIDB(dbname, callback, function() { | ||
that._saveChunks(dbname, chunks, callback); | ||
that._getAllChunks(dbname, callback); | ||
}); | ||
@@ -436,65 +621,96 @@ return; | ||
if (this.operationInProgress) { | ||
throw new Error("Error while saving to database - another operation is already in progress. Please use throttledSaves=true option on Loki object"); | ||
} | ||
var tx = this.idb.transaction(['LokiIncrementalData'], "readonly"); | ||
var store = tx.objectStore('LokiIncrementalData'); | ||
this.operationInProgress = true; | ||
var deserializeChunk = this.options.deserializeChunk; | ||
var tx = this.idb.transaction(['LokiIncrementalData'], "readwrite"); | ||
tx.oncomplete = function() { | ||
that.operationInProgress = false; | ||
DEBUG && console.timeEnd("exportDatabase"); | ||
callback(); | ||
}; | ||
// If there are a lot of chunks (>100), don't request them all in one go, but in multiple | ||
// "megachunks" (chunks of chunks). This improves concurrency, as main thread is already busy | ||
// while IDB process is still fetching data. Details: https://github.com/techfort/LokiJS/pull/874 | ||
function getMegachunks(keys) { | ||
var megachunkCount = that.megachunkCount; | ||
var keyRanges = createKeyRanges(keys, megachunkCount); | ||
tx.onerror = function(e) { | ||
that.operationInProgress = false; | ||
callback(e); | ||
}; | ||
var allChunks = []; | ||
var megachunksReceived = 0; | ||
tx.onabort = function(e) { | ||
that.operationInProgress = false; | ||
callback(e); | ||
}; | ||
function processMegachunk(e, megachunkIndex, keyRange) { | ||
// var debugMsg = 'processing chunk ' + megachunkIndex + ' (' + keyRange.lower + ' -- ' + keyRange.upper + ')' | ||
// DEBUG && console.time(debugMsg); | ||
var megachunk = e.target.result; | ||
megachunk.forEach(function (chunk, i) { | ||
parseChunk(chunk, deserializeChunk); | ||
allChunks.push(chunk); | ||
megachunk[i] = null; // gc | ||
}); | ||
// DEBUG && console.timeEnd(debugMsg); | ||
var store = tx.objectStore('LokiIncrementalData'); | ||
megachunksReceived += 1; | ||
if (megachunksReceived === megachunkCount) { | ||
callback(allChunks); | ||
} | ||
} | ||
chunks.forEach(function(object) { | ||
store.put(object); | ||
}); | ||
}; | ||
// Stagger megachunk requests - first one half, then request the second when first one comes | ||
// back. This further improves concurrency. | ||
function requestMegachunk(index) { | ||
var keyRange = keyRanges[index]; | ||
idbReq(store.getAll(keyRange), function(e) { | ||
if (index < megachunkCount / 2) { | ||
requestMegachunk(index + megachunkCount / 2); | ||
} | ||
IncrementalIndexedDBAdapter.prototype._getAllChunks = function(dbname, callback) { | ||
var that = this; | ||
if (!this.idb) { | ||
this._initializeIDB(dbname, callback, function() { | ||
that._getAllChunks(dbname, callback); | ||
}); | ||
return; | ||
processMegachunk(e, index, keyRange); | ||
}, function(e) { | ||
callback(e); | ||
}); | ||
} | ||
for (var i = 0; i < megachunkCount / 2; i += 1) { | ||
requestMegachunk(i); | ||
} | ||
} | ||
if (this.operationInProgress) { | ||
throw new Error("Error while loading database - another operation is already in progress. Please use throttledSaves=true option on Loki object"); | ||
function getAllChunks() { | ||
idbReq(store.getAll(), function(e) { | ||
var allChunks = e.target.result; | ||
allChunks.forEach(function (chunk) { | ||
parseChunk(chunk, deserializeChunk); | ||
}); | ||
callback(allChunks); | ||
}, function(e) { | ||
callback(e); | ||
}); | ||
} | ||
this.operationInProgress = true; | ||
function getAllKeys() { | ||
idbReq(store.getAllKeys(), function(e) { | ||
var keys = e.target.result.sort(); | ||
if (keys.length > 100) { | ||
getMegachunks(keys); | ||
} else { | ||
getAllChunks(); | ||
} | ||
}, function(e) { | ||
callback(e); | ||
}); | ||
var tx = this.idb.transaction(['LokiIncrementalData'], "readonly"); | ||
if (that.options.onFetchStart) { | ||
that.options.onFetchStart(); | ||
} | ||
} | ||
var request = tx.objectStore('LokiIncrementalData').getAll(); | ||
request.onsuccess = function(e) { | ||
that.operationInProgress = false; | ||
var chunks = e.target.result; | ||
callback(chunks); | ||
}; | ||
getAllKeys(); | ||
}; | ||
request.onerror = function(e) { | ||
that.operationInProgress = false; | ||
callback(e); | ||
}; | ||
if (this.options.onFetchStart) { | ||
this.options.onFetchStart(); | ||
function parseChunk(chunk, deserializeChunk) { | ||
chunk.value = JSON.parse(chunk.value); | ||
if (deserializeChunk) { | ||
var segments = chunk.key.split('.'); | ||
if (segments.length === 3 && segments[1] === 'chunk') { | ||
var collectionName = segments[0]; | ||
chunk.value = deserializeChunk(collectionName, chunk.value); | ||
} | ||
} | ||
}; | ||
} | ||
@@ -526,2 +742,5 @@ /** | ||
this._prevLokiVersionId = null; | ||
this._prevCollectionVersionIds = {}; | ||
if (this.idb) { | ||
@@ -553,4 +772,68 @@ this.idb.close(); | ||
function randomVersionId() { | ||
// Appears to have enough entropy for chunk version IDs | ||
// (Only has to be different than enough of its own previous versions that there's no writer | ||
// that thinks a new version is the same as an earlier one, not globally unique) | ||
return Math.random().toString(36).substring(2); | ||
} | ||
function _getSortKey(object) { | ||
var key = object.key; | ||
if (key.includes(".")) { | ||
var segments = key.split("."); | ||
if (segments.length === 3 && segments[1] === "chunk") { | ||
return parseInt(segments[2], 10); | ||
} | ||
} | ||
return -1; // consistent type must be returned | ||
} | ||
function sortChunksInPlace(chunks) { | ||
// sort chunks in place to load data in the right order (ascending loki ids) | ||
// on both Safari and Chrome, we'll get chunks in order like this: 0, 1, 10, 100... | ||
chunks.sort(function(a, b) { | ||
var aKey = _getSortKey(a), | ||
bKey = _getSortKey(b); | ||
if (aKey < bKey) return -1; | ||
if (aKey > bKey) return 1; | ||
return 0; | ||
}); | ||
} | ||
function createKeyRanges(keys, count) { | ||
var countPerRange = Math.floor(keys.length / count); | ||
var keyRanges = []; | ||
var minKey, maxKey; | ||
for (var i = 0; i < count; i += 1) { | ||
minKey = keys[countPerRange * i]; | ||
maxKey = keys[countPerRange * (i + 1)]; | ||
if (i === 0) { | ||
// ... < maxKey | ||
keyRanges.push(IDBKeyRange.upperBound(maxKey, true)); | ||
} else if (i === count - 1) { | ||
// >= minKey | ||
keyRanges.push(IDBKeyRange.lowerBound(minKey)); | ||
} else { | ||
// >= minKey && < maxKey | ||
keyRanges.push(IDBKeyRange.bound(minKey, maxKey, false, true)); | ||
} | ||
} | ||
return keyRanges; | ||
} | ||
function idbReq(request, onsuccess, onerror) { | ||
request.onsuccess = function (e) { | ||
try { | ||
return onsuccess(e); | ||
} catch (error) { | ||
onerror(error); | ||
} | ||
}; | ||
request.onerror = onerror; | ||
return request; | ||
} | ||
return IncrementalIndexedDBAdapter; | ||
})(); | ||
}); |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
3072874
10009
92