Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Socket
Sign inDemoInstall

lmdb

Package Overview
Dependencies
Maintainers
3
Versions
174
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

lmdb - npm Package Compare versions

Comparing version 1.6.10 to 2.0.0-alpha1

deno.js

96

benchmark/index.js
'use strict';
const { Worker, isMainThread, parentPort, threadId } = require('worker_threads');
const { isMaster, fork } = require('cluster');
import { Worker, isMainThread, parentPort, threadId } from'worker_threads';
import { isMaster, fork } from 'cluster';
import inspector from 'inspector'
var crypto = require('crypto');
var path = require('path');
var testDirPath = path.resolve(__dirname, './benchdata');
var fs =require('fs');
var rimraf = require('rimraf');
var benchmark = require('benchmark');
var testDirPath = new URL('./benchdata', import.meta.url).toString().slice(8);
import fs from 'fs';
import rimraf from 'rimraf';
import benchmark from 'benchmark';
var suite = new benchmark.Suite();
const { open, lmdbNativeFunctions } = require('..');
import { open } from '../index.js';
var env;

@@ -33,13 +31,11 @@ var dbi;

let bigString = 'big'
for (let i = 0; i < 9; i++) {
for (let i = 0; i < 10; i++) {
bigString += bigString
}
console.log('bigString', bigString.length)
data.more = bigString
//data.more = bigString
var c = 0
let result
let outstanding = 0
let iteration = 1
let lastResult = Promise.resolve()
function setData(deferred) {

@@ -64,2 +60,32 @@ /* result = store.transactionAsync(() => {

}
function batchData(deferred) {
result = store.batch(() => {
for (let i = 0; i < 10; i++) {
let key = (c += 357) % total
store.put(key, data)
}
})
}
let lastResult
function batchDataAdd(deferred) {
outstanding++
result = store.batch(() => {
for (let i = 0; i < 10; i++) {
let key = (c += 357)
store.put(key, data)
}
}).then(() => {
outstanding--
})
if (outstanding < 500) {
deferred.resolve()
} else if (outstanding < 10000) {
setImmediate(() => {
deferred.resolve()
})
} else {
console.log('delaying')
setTimeout(() => deferred.resolve(), outstanding >> 3)
}
}

@@ -82,2 +108,12 @@ function syncTxn() {

}
let a = Buffer.from('this id\0\0\0\0\0')
let b = Buffer.from('mmmmmmore text')
//b = b.subarray(2,b.length)
let b2 = Buffer.from('the similar key')
let b3 = Buffer.from('this is very similar')
function keyComparison() {
try {
result = store.db.compareKeys(a, b2)
}catch(error) { console.log(error)}
}
function getRange() {

@@ -87,3 +123,3 @@ let start = (c += 357) % total

for (let entry of store.getRange({
start,
start,
end: start + 10

@@ -100,4 +136,6 @@ })) {

if (isMainThread && isMaster) {
var inspector = require('inspector')
//inspector.open(9330, null, true); debugger
try{
//inspector.open(9330, null, true); //debugger
//debugger
} catch(error) {}

@@ -111,3 +149,3 @@ function cleanup(done) {

// setup clean directory
fs.mkdirSync(testDirPath, { recursive: true})
fs.mkdirSync(testDirPath, { recursive: true });
done();

@@ -122,2 +160,4 @@ });

//winMemoryPriority: 4,
//eventTurnBatching: false,
//overlappingSync: true,
})

@@ -127,3 +167,3 @@ store = rootStore.openDB('testing', {

sharedStructuresKey: 100000000,
keyIsUint32: true,
keyIsUint32: true,
})

@@ -134,3 +174,3 @@ let lastPromise

}
return lastPromise.then(() => {
return lastPromise?.then(() => {
console.log('setup completed');

@@ -146,11 +186,16 @@ })

await setup();
//suite.add('compare keys', keyComparison);
//suite.add('syncTxn', syncTxn);
suite.add('getRange', getRange);
suite.add('put', {
//suite.add('getRange', getRange);
suite.add('setData', {
defer: true,
fn: setData
});
/*suite.add('put-batch', {
defer: true,
fn: batchDataAdd
});
suite.add('get', getData);
suite.add('plainJSON', plainJSON);
suite.add('getBinary', getBinary);
suite.add('getBinary', getBinary);*/
suite.add('getBinaryFast', getBinaryFast);

@@ -169,2 +214,3 @@ suite.on('cycle', function (event) {

console.log('Fastest is ' + this.filter('fastest').map('name'));
return
var numCPUs = require('os').cpus().length;

@@ -198,3 +244,3 @@ console.log('Test opening/closing threads ' + numCPUs + ' threads');

sharedStructuresKey: 100000000,
keyIsUint32: true,
keysUse32LE: true,
})

@@ -201,0 +247,0 @@

@@ -1,5 +0,5 @@

const { WeakLRUCache } = require('weak-lru-cache')
import { WeakLRUCache } from 'weak-lru-cache/index.js'
let getLastVersion
const mapGet = Map.prototype.get
exports.CachingStore = Store => class extends Store {
export const CachingStore = Store => class extends Store {
constructor(dbName, options) {

@@ -9,21 +9,17 @@ super(dbName, options)

this.env.cacheCommitter = true
this.on('aftercommit', ({ operations, results }) => {
results = results || []
let activeCache
for (let i = 0, l = operations.length; i < l; i++) {
let operation = operations[i]
if (typeof operation[1] === 'object') {
if (activeCache) {
if (results[i] === 0) {
let expirationPriority = ((operation[1] || 0).length || 0) >> 10
let entry = mapGet.call(activeCache, operation[0])
if (entry)
activeCache.used(entry, expirationPriority) // this will enter it into the LRFU
} else
activeCache.delete(operation[0]) // just delete it from the map
this.on('aftercommit', ({ next, last }) => {
do {
let store = next.store
if (store) {
if (next.flag & 1)
next.store.cache.delete(next.key) // just delete it from the map
else {
let expirationPriority = next.valueSize >> 10
let cache = next.store.cache
let entry = mapGet.call(cache, next.key)
if (entry)
cache.used(entry, expirationPriority) // this will enter it into the LRFU
}
} else if (operation && operation.length === undefined) {
activeCache = operation.cachingDb && operation.cachingDb.cache
}
}
} while (next != last && (next = next.next))
})

@@ -78,5 +74,5 @@ }

// sync operation, immediately add to cache, otherwise keep it pinned in memory until it is committed
let entry = this.cache.setValue(id, value, result.isSync ? 0 : -1)
let entry = this.cache.setValue(id, value, !result || result.isSync ? 0 : -1)
if (version !== undefined)
entry.version = version
entry.version = typeof version === 'object' ? version.version : version
}

@@ -90,4 +86,5 @@ return result

let entry = this.cache.setValue(id, value)
if (version !== undefined)
entry.version = version
if (version !== undefined) {
entry.version = typeof version === 'object' ? version.version : version
}
} else // it is possible that a value used to exist here

@@ -114,4 +111,4 @@ this.cache.delete(id)

}
exports.setGetLastVersion = (get) => {
export function setGetLastVersion(get) {
getLastVersion = get
}

@@ -1,15 +0,25 @@

const fs = require('fs')
const { extname, basename, dirname} = require('path')
const { ArrayLikeIterable } = require('./util/ArrayLikeIterable')
const when = require('./util/when')
const EventEmitter = require('events')
Object.assign(exports, require('node-gyp-build')(__dirname))
const { Env, Cursor, Compression, getBufferForAddress, getAddress, keyValueToBuffer, bufferToKeyValue } = exports
const { CachingStore, setGetLastVersion } = require('./caching')
const { writeKey, readKey } = require('ordered-binary')
const os = require('os')
import fs from 'fs' // TODO: or Deno
import { extname, basename, dirname} from 'path'
import EventEmitter from 'events'
import { Env, Cursor, Compression, getBufferForAddress, getAddress } from './native.js'
import { CachingStore, setGetLastVersion } from './caching.js'
import { addQueryMethods } from './query.js'
import { addWriteMethods, ABORT } from './writer.js'
export { ABORT } from './writer.js'
import { applyKeyHandling } from './keys.js'
export { toBufferKey as keyValueToBuffer, compareKeys, compareKeys as compareKey, fromBufferKey as bufferToKeyValue } from 'ordered-binary/index.js'
import { createRequire } from 'module'
import { Encoder as MsgpackrEncoder } from 'msgpackr'
export { levelup } from './level.js'
const require = createRequire(import.meta.url)
import os from 'os'
setGetLastVersion(getLastVersion)
Uint8ArraySlice = Uint8Array.prototype.slice
const keyBuffer = Buffer.allocUnsafeSlow(2048)
const keyBufferView = new DataView(keyBuffer.buffer, 0, 2048) // max key size is actually 1978
const Uint8ArraySlice = Uint8Array.prototype.slice
const keyBytes = Buffer.allocUnsafeSlow(2048)
const keyBuffer = keyBytes.buffer
const keyBytesView = keyBytes.dataView = new DataView(keyBytes.buffer, 0, 2048) // max key size is actually 1978
keyBytes.uint32 = new Uint32Array(keyBuffer, 0, 512)
keyBytes.float64 = new Float64Array(keyBuffer, 0, 256)
keyBytes.uint32.address = keyBytes.address = keyBuffer.address = getAddress(keyBuffer)
const buffers = []

@@ -23,5 +33,4 @@

}
const ABORT = {}
const allDbs = exports.allDbs = new Map()
export const allDbs = new Map()
const SYNC_PROMISE_RESULT = Promise.resolve(true)

@@ -31,22 +40,3 @@ const SYNC_PROMISE_FAIL = Promise.resolve(false)

SYNC_PROMISE_FAIL.isSync = true
const LAST_KEY = String.fromCharCode(0xffff)
const LAST_BUFFER_KEY = Buffer.from([255, 255, 255, 255])
const FIRST_BUFFER_KEY = Buffer.from([0])
const ITERATOR_DONE = { done: true, value: undefined }
const writeUint32Key = (key, target, start) => {
(target.dataView || (target.dataView = new DataView(target.buffer, 0, target.length))).setUint32(start, key, true)
return start + 4
}
const readUint32Key = (target, start) => {
return (target.dataView || (target.dataView = new DataView(target.buffer, 0, target.length))).getUint32(start, true)
}
const writeBufferKey = (key, target, start) => {
if (key.length > 1978)
throw new Error('Key buffer is too long')
target.set(key, start)
return key.length + start
}
const readBufferKey = (target, start, end) => {
return Uint8ArraySlice.call(target, start, end)
}
let env

@@ -56,6 +46,4 @@ let defaultCompression

const MDB_SET_KEY = 0, MDB_SET_RANGE = 1, MDB_GET_BOTH_RANGE = 2, MDB_GET_CURRENT = 3, MDB_FIRST = 4, MDB_LAST = 5, MDB_NEXT = 6, MDB_NEXT_NODUP = 7, MDB_NEXT_DUP = 8, MDB_PREV = 9, MDB_PREV_NODUP = 10, MDB_PREV_DUP = 11
exports.open = open
exports.ABORT = ABORT
let abortedNonChildTransactionWarn
function open(path, options) {
export function open(path, options) {
let env = new Env()

@@ -67,4 +55,3 @@ let committingWrites

let transactionWarned
let readTxn, writeTxn, pendingBatch, currentCommit, runNextBatch, readTxnRenewed, cursorTxns = []
let renewId = 1
let readTxn, writeTxn, pendingBatch, currentCommit, runNextBatch, readTxnRenewed
if (typeof path == 'object' && !options) {

@@ -86,3 +73,4 @@ options = path

remapChunks,
keyBuffer,
keyBytes,
//overlappingSync: true,
// default map size limit of 4 exabytes when using remapChunks, since it is not preallocated and we can

@@ -110,9 +98,13 @@ // make it super huge.

threshold: 1000,
dictionary: fs.readFileSync(require.resolve('./dict/dict.txt')),
})
} else
options.compression = new Compression(Object.assign({
dictionary: fs.readFileSync(new URL('./dict/dict.txt', import.meta.url.replace(/dist[\\\/]index.cjs$/, ''))),
})
defaultCompression.threshold = 1000
} else {
let compressionOptions = Object.assign({
threshold: 1000,
dictionary: fs.readFileSync(require.resolve('./dict/dict.txt')),
}), options.compression)
dictionary: fs.readFileSync(new URL('./dict/dict.txt', import.meta.url.replace(/dist[\\\/]index.cjs$/, ''))),
}, options.compression)
options.compression = new Compression(compressionOptions)
options.compression.threshold = compressionOptions.threshold
}
}

@@ -129,17 +121,4 @@

} catch(error) {
if (error.message.startsWith('MDB_INVALID')) {
require('./util/upgrade-lmdb').upgrade(path, options, open)
env = new Env()
env.open(options)
} else
throw error
throw error
}
/* let filePath = noSubdir ? path : (path + '/data.mdb')
if (fs.statSync(filePath).size == env.info().mapSize && !options.remapChunks) {
// if the file size is identical to the map size, that means the OS is taking full disk space for
// mapping and we need to revert back to remapChunks
env.close()
options.remapChunks = true
env.open(options)
}*/
env.readerCheck() // clear out any stale entries

@@ -150,3 +129,3 @@ function renewReadTxn() {

else
readTxn = env.beginTxn(READING_TNX)
readTxn = env.beginTxn(0x20000)
readTxnRenewed = setImmediate(resetReadTxn)

@@ -157,7 +136,6 @@ return readTxn

if (readTxnRenewed) {
renewId++
LMDBStore.onReadReset()
readTxnRenewed = null
if (readTxn.cursorCount - (readTxn.renewingCursorCount || 0) > 0) {
readTxn.onlyCursor = true
cursorTxns.push(readTxn)
readTxn = null

@@ -177,12 +155,8 @@ }

const openDB = () => {
try {
this.db = env.openDbi(Object.assign({
name: dbName,
create: true,
txn: writeTxn,
}, dbOptions))
this.db.name = dbName || null
} catch(error) {
handleError(error, null, null, openDB)
}
this.db = env.openDbi(Object.assign({
name: dbName,
create: true,
txn: env.writeTxn,
}, dbOptions))
this.db.name = dbName || null
}

@@ -205,2 +179,3 @@ if (dbOptions.compression && !(dbOptions.compression instanceof Compression)) {

this.name = dbName
this.status = 'open'
this.env = env

@@ -222,6 +197,6 @@ this.reads = 0

if (!this.encoding || this.encoding == 'msgpack' || this.encoding == 'cbor') {
this.encoder = this.decoder = new (this.encoding == 'cbor' ? require('cbor-x').Encoder : require('msgpackr').Encoder)
this.encoder = this.decoder = new (this.encoding == 'cbor' ? require('cbor-x').Encoder : MsgpackrEncoder)
(Object.assign(this.sharedStructuresKey ?
this.setupSharedStructures() : {
copyBuffers: true // need to copy any embedded buffers that are found since we use unsafe buffers
copyBuffers: true, // need to copy any embedded buffers that are found since we use unsafe buffers
}, options, dbOptions))

@@ -232,28 +207,4 @@ } else if (this.encoding == 'json') {

}
} else if (this.encoding == 'ordered-binary') {
this.encoder = this.decoder = {
encode(value) {
if (savePosition > 6200)
allocateSaveBuffer()
let start = savePosition
savePosition = writeKey(value, saveBuffer, start)
let buffer = saveBuffer.subarray(start, savePosition)
savePosition = (savePosition + 7) & 0xfffff8
return buffer
},
decode(buffer, end) { return readKey(buffer, 0, end) },
writeKey,
readKey,
}
}
if (this.keyIsUint32) {
this.writeKey = writeUint32Key
this.readKey = readUint32Key
} else if (this.keyIsBuffer) {
this.writeKey = writeBufferKey
this.readKey = readBufferKey
} else {
this.writeKey = writeKey
this.readKey = readKey
}
applyKeyHandling(this)
allDbs.set(dbName ? name + '-' + dbName : name, this)

@@ -279,7 +230,9 @@ stores.push(this)

}
open(dbOptions, callback) {
let db = this.openDB(dbOptions)
if (callback)
callback(null, db)
return db
}
transactionAsync(callback, asChild) {
if (writeTxn) {
// already nested in a transaction, just execute and return
return callback()
}
let lastOperation

@@ -322,90 +275,5 @@ let after, strictOrder

}
childTransaction(callback) {
if (useWritemap)
throw new Error('Child transactions are not supported in writemap mode')
if (writeTxn) {
let parentTxn = writeTxn
let childTxn = writeTxn = env.beginTxn(null, parentTxn)
try {
return when(callback(), (result) => {
writeTxn = parentTxn
if (result === ABORT)
childTxn.abort()
else
childTxn.commit()
return result
}, (error) => {
writeTxn = parentTxn
childTxn.abort()
throw error
})
} catch(error) {
writeTxn = parentTxn
childTxn.abort()
throw error
}
}
return this.transactionAsync(callback, true)
}
transaction(callback, abort) {
if (!transactionWarned) {
console.warn('transaction is deprecated, use transactionSync if you want a synchronous transaction or transactionAsync for asynchronous transaction. In this future this will always call transactionAsync.')
transactionWarned = true
}
let result = this.transactionSync(callback, abort)
return abort ? ABORT : result
}
transactionSync(callback, abort) {
if (writeTxn) {
if (!useWritemap && !this.cache)
// already nested in a transaction, execute as child transaction (if possible) and return
return this.childTransaction(callback)
let result = callback() // else just run in current transaction
if (result == ABORT && !abortedNonChildTransactionWarn) {
console.warn('Can not abort a transaction inside another transaction with ' + (this.cache ? 'caching enabled' : 'useWritemap enabled'))
abortedNonChildTransactionWarn = true
}
return result
}
let txn
try {
this.transactions++
txn = writeTxn = env.beginTxn()
/*if (scheduledOperations && runNextBatch) {
runNextBatch((operations, callback) => {
try {
callback(null, this.commitBatchNow(operations))
} catch (error) {
callback(error)
}
})
}
TODO: To reenable forced sequential writes, we need to re-execute the operations if we get an env resize
*/
return when(callback(), (result) => {
try {
if (result === ABORT)
txn.abort()
else {
txn.commit()
resetReadTxn()
}
writeTxn = null
return result
} catch(error) {
if (error.message == 'The transaction is already closed.') {
return result
}
return handleError(error, this, txn, () => this.transaction(callback))
}
}, (error) => {
return handleError(error, this, txn, () => this.transaction(callback))
})
} catch(error) {
return handleError(error, this, txn, () => this.transaction(callback))
}
}
getSharedBufferForGet(id) {
let txn = (writeTxn || (readTxnRenewed ? readTxn : renewReadTxn()))
lastSize = this.keyIsCompatibility ? txn.getBinaryShared(id) : this.db.get(this.writeKey(id, keyBuffer, 0))
lastSize = this.keyIsCompatibility ? txn.getBinaryShared(id) : this.db.get(this.writeKey(id, keyBytes, 0))
if (lastSize === 0xffffffff) { // not found code

@@ -415,5 +283,5 @@ return //undefined

return lastSize
lastSize = keyBufferView.getUint32(0, true)
let bufferIndex = keyBufferView.getUint32(12, true)
lastOffset = keyBufferView.getUint32(8, true)
lastSize = keyBytesView.getUint32(0, true)
let bufferIndex = keyBytesView.getUint32(12, true)
lastOffset = keyBytesView.getUint32(8, true)
let buffer = buffers[bufferIndex]

@@ -435,8 +303,8 @@ let startOffset

getSizeBinaryFast(id) {
(writeTxn || (readTxnRenewed ? readTxn : renewReadTxn()))
lastSize = this.keyIsCompatibility ? this.db.getByPrimitive(id) : this.db.getByBinary(this.writeKey(id, keyBuffer, 0))
(env.writeTxn || (readTxnRenewed ? readTxn : renewReadTxn()))
lastSize = this.db.getByBinary(this.writeKey(id, keyBytes, 0))
}
getString(id) {
let string = (writeTxn || (readTxnRenewed ? readTxn : renewReadTxn()))
.getUtf8(this.db, id)
(env.writeTxn || (readTxnRenewed ? readTxn : renewReadTxn()))
let string = this.db.getStringByBinary(this.writeKey(id, keyBytes, 0))
if (string)

@@ -488,619 +356,22 @@ lastSize = string.length

}
ifNoExists(key, callback) {
return this.ifVersion(key, null, callback)
}
ifVersion(key, version, callback) {
if (typeof version != 'number') {
if (version == null) {
if (version === null)
version = -4.2434325325532E-199 // NO_EXIST_VERSION
else {// if undefined, just do callback without any condition being added
callback()
// TODO: if we are inside another ifVersion, use that promise, or use ANY_VERSION
return pendingBatch ? pendingBatch.unconditionalResults : Promise.resolve(true) // be consistent in returning a promise, indicate success
}
} else {
throw new Error('Version must be a number or null')
}
}
let scheduledOperations = this.getScheduledOperations()
let index = scheduledOperations.push([key, version]) - 1
try {
callback()
let commit = this.scheduleCommit()
return commit.results.then((writeResults) => {
if (writeResults[index] === 0)
return true
if (writeResults[index] === 3) {
throw new Error('The key size was 0 or too large')
}
return false
})
} finally {
scheduledOperations.push(false) // reset condition
}
}
doesExist(key, versionOrValue) {
let txn
try {
if (writeTxn) {
txn = writeTxn
} else {
txn = readTxnRenewed ? readTxn : renewReadTxn()
}
if (versionOrValue === undefined) {
this.getSizeBinaryFast(key)
return lastSize !== 0xffffffff
}
else if (this.useVersions) {
this.getSizeBinaryFast(key)
return lastSize !== 0xffffffff && matches(getLastVersion(), versionOrValue)
}
else {
let cursor = new Cursor(txn, this.db)
if (this.encoder) {
versionOrValue = this.encoder.encode(versionOrValue)
}
if (typeof versionOrValue == 'string')
versionOrValue = Buffer.from(versionOrValue)
let result = cursor.goToDup(key, versionOrValue) !== undefined
cursor.close()
return result
}
} catch(error) {
return handleError(error, this, txn, () => this.doesExist(key, versionOrValue))
if (!env.writeTxn)
readTxnRenewed ? readTxn : renewReadTxn()
if (versionOrValue === undefined) {
this.getSizeBinaryFast(key)
return lastSize !== 0xffffffff
}
}
getScheduledOperations() {
if (!scheduledOperations) {
scheduledOperations = []
scheduledOperations.bytes = 0
else if (this.useVersions) {
this.getSizeBinaryFast(key)
return lastSize !== 0xffffffff && matches(getLastVersion(), versionOrValue)
}
if (scheduledOperations.store != this) {
// issue action to switch dbs
scheduledOperations.store = this
scheduledOperations.push(this.db)
}
return scheduledOperations
}
putToBinary(id, value, version, ifVersion) {
let operations = this.getScheduledOperations()
let position = writeBuffer.position || 0
writeUint32Array[(position++) << 1] = 0 // write the operation
writeFloat64Array[position++] = version
writeFloat64Array[position++] = ifVersion
let keySize = this.writeKey(id, writeBuffer, (position + 2) << 3)
writeUint32Array[(position << 1) - 3] = keySize
if (this.encoder) {
//if (!(value instanceof Uint8Array)) TODO: in a future version, directly store buffers that are provided
value = this.encoder.encode(value)
}
writeUint32Array[(position++ << 1) - 2] = keySize
writeUint32Array[(position++) << 1] = value.length
writeFloat64Array[position] = 0
position += ((keySize - 1) >> 3) + 1
writeBuffer.position = position
}
put(id, value, version, ifVersion) {
if (id.length > 1978) {
throw new Error('Key is larger than maximum key size (1978)')
}
this.writes++
if (writeTxn) {
if (ifVersion !== undefined) {
this.get(id)
let previousVersion = this.get(id) ? getLastVersion() : null
if (!matches(previousVersion, ifVersion)) {
return SYNC_PROMISE_FAIL
}
}
putSync.call(this, id, value, version)
return SYNC_PROMISE_RESULT
}
if (this.encoder) {
//if (!(value instanceof Uint8Array)) TODO: in a future version, directly store buffers that are provided
value = this.encoder.encode(value)
} else if (typeof value != 'string' && !(value instanceof Uint8Array))
throw new Error('Invalid value to put in database ' + value + ' (' + (typeof value) +'), consider using encoder')
let operations = this.getScheduledOperations()
let index = operations.push(ifVersion == null ? version == null ? [id, value] : [id, value, version] : [id, value, version, ifVersion]) - 1
// track the size of the scheduled operations (and include the approx size of the array structure too)
operations.bytes += (id.length || 6) + (value && value.length || 0) + 100
let commit = this.scheduleCommit()
return ifVersion === undefined ? commit.unconditionalResults : // TODO: Technically you can get a bad key if an array is passed in there is no ifVersion and still fail
commit.results.then((writeResults) => {
if (writeResults[index] === 0)
return true
if (writeResults[index] === 3) {
throw new Error('The key size was 0 or too large')
}
return false
})
}
putSync(id, value, version) {
if (id.length > 1978) {
throw new Error('Key is larger than maximum key size (1978)')
}
let localTxn, hadWriteTxn = writeTxn
try {
this.writes++
if (!writeTxn)
localTxn = writeTxn = env.beginTxn()
if (this.encoder)
value = this.encoder.encode(value)
if (typeof value == 'string') {
writeTxn.putUtf8(this.db, id, value, version)
} else {
if (!(value instanceof Uint8Array)) {
throw new Error('Invalid value type ' + typeof value + ' used ' + value)
}
writeTxn.putBinary(this.db, id, value, version)
}
if (localTxn) {
writeTxn.commit()
writeTxn = null
resetReadTxn()
}
} catch(error) {
if (hadWriteTxn)
throw error // if we are in a transaction, the whole transaction probably needs to restart
return handleError(error, this, localTxn, () => this.putSync(id, value, version))
}
}
removeSync(id, ifVersionOrValue) {
if (id.length > 1978) {
throw new Error('Key is larger than maximum key size (1978)')
}
let localTxn, hadWriteTxn = writeTxn
try {
if (!writeTxn)
localTxn = writeTxn = env.beginTxn()
let deleteValue
if (ifVersionOrValue !== undefined) {
if (this.useVersions) {
let previousVersion = this.get(id) ? getLastVersion() : null
if (!matches(previousVersion, ifVersionOrValue))
return false
} else if (this.encoder)
deleteValue = this.encoder.encode(ifVersionOrValue)
else
deleteValue = ifVersionOrValue
}
this.writes++
let result
if (deleteValue)
result = writeTxn.del(this.db, id, deleteValue)
else
result = writeTxn.del(this.db, id)
if (localTxn) {
writeTxn.commit()
writeTxn = null
resetReadTxn()
}
return result // object found and deleted
} catch(error) {
if (hadWriteTxn)
throw error // if we are in a transaction, the whole transaction probably needs to restart
return handleError(error, this, localTxn, () => this.removeSync(id))
}
}
remove(id, ifVersionOrValue) {
if (id.length > 1978) {
throw new Error('Key is larger than maximum key size (1978)')
}
this.writes++
if (writeTxn) {
if (removeSync.call(this, id, ifVersionOrValue) === false)
return SYNC_PROMISE_FAIL
return SYNC_PROMISE_RESULT
}
let scheduledOperations = this.getScheduledOperations()
let operation
if (ifVersionOrValue === undefined)
operation = [id]
else if (this.useVersions)
operation = [id, undefined, undefined, ifVersionOrValue] // version condition
else {
if (this.encoder)
operation = [id, this.encoder.encode(ifVersionOrValue), true]
else
operation = [id, ifVersionOrValue, true]
}
let index = scheduledOperations.push(operation) - 1 // remove specific values
scheduledOperations.bytes += (id.length || 6) + 100
let commit = this.scheduleCommit()
return ifVersionOrValue === undefined ? commit.unconditionalResults :
commit.results.then((writeResults) => {
if (writeResults[index] === 0)
return true
if (writeResults[index] === 3) {
throw new Error('The key size was 0 or too large')
}
return false
})
}
getValues(key, options) {
let defaultOptions = {
key,
valuesForKey: true
}
if (options && options.snapshot === false)
throw new Error('Can not disable snapshots for getValues')
return this.getRange(options ? Object.assign(defaultOptions, options) : defaultOptions)
}
getKeys(options) {
if (!options)
options = {}
options.values = false
return this.getRange(options)
}
getCount(options) {
if (!options)
options = {}
options.onlyCount = true
return this.getRange(options)[Symbol.iterator]()
}
getKeysCount(options) {
if (!options)
options = {}
options.onlyCount = true
options.values = false
return this.getRange(options)[Symbol.iterator]()
}
getValuesCount(key, options) {
if (!options)
options = {}
options.key = key
options.valuesForKey = true
options.onlyCount = true
return this.getRange(options)[Symbol.iterator]()
}
getRange(options) {
let iterable = new ArrayLikeIterable()
if (!options)
options = {}
let includeValues = options.values !== false
let includeVersions = options.versions
let valuesForKey = options.valuesForKey
let limit = options.limit
let db = this.db
let snapshot = options.snapshot
iterable[Symbol.iterator] = () => {
let currentKey = valuesForKey ? options.key : options.start
const reverse = options.reverse
let count = 0
let cursor, cursorRenewId
let txn
let flags = (includeValues ? 0x100 : 0) | (reverse ? 0x400 : 0) | (valuesForKey ? 0x800 : 0)
function resetCursor() {
try {
if (cursor)
finishCursor()
txn = writeTxn || (readTxnRenewed ? readTxn : renewReadTxn())
cursor = !writeTxn && db.availableCursor
if (cursor) {
db.availableCursor = null
if (db.cursorTxn != txn)
cursor.renew(txn)
else// if (db.currentRenewId != renewId)
flags |= 0x2000
} else {
cursor = new Cursor(txn, db)
}
txn.cursorCount = (txn.cursorCount || 0) + 1 // track transaction so we always use the same one
if (snapshot === false) {
cursorRenewId = renewId // use shared read transaction
txn.renewingCursorCount = (txn.renewingCursorCount || 0) + 1 // need to know how many are renewing cursors
}
} catch(error) {
if (cursor) {
try {
cursor.close()
} catch(error) { }
}
return handleError(error, this, txn, resetCursor)
}
if (this.encoder) {
versionOrValue = this.encoder.encode(versionOrValue)
}
resetCursor()
let store = this
if (options.onlyCount) {
flags |= 0x1000
let count = position(options.offset)
finishCursor()
return count
}
function position(offset) {
let keySize = store.writeKey(currentKey, keyBuffer, 0)
let endAddress
if (valuesForKey) {
if (options.start === undefined && options.end === undefined)
endAddress = 0
else {
let startAddress
if (store.encoder.writeKey) {
startAddress = BigInt(saveKey(options.start, store.encoder.writeKey, iterable))
keyBufferView.setBigUint64(2000, startAddress, true)
endAddress = saveKey(options.end, store.encoder.writeKey, iterable)
} else {
throw new Error('Only key-based encoding is supported for start/end values')
let encoded = store.encoder.encode(options.start)
let bufferAddress = encoded.buffer.address || (encoded.buffer.address = getAddress(encoded) - encoded.byteOffset)
startAddress = bufferAddress + encoded.byteOffset
}
}
} else
endAddress = saveKey(options.end, store.writeKey, iterable)
return cursor.position(flags, offset || 0, keySize, endAddress)
}
function finishCursor() {
if (txn.isAborted)
return
if (cursorRenewId)
txn.renewingCursorCount--
if (--txn.cursorCount <= 0 && txn.onlyCursor) {
cursor.close()
let index = cursorTxns.indexOf(txn)
if (index > -1)
cursorTxns.splice(index, 1)
txn.abort() // this is no longer main read txn, abort it now that we are done
txn.isAborted = true
} else {
if (db.availableCursor || txn != readTxn)
cursor.close()
else {// try to reuse it
db.availableCursor = cursor
db.cursorTxn = txn
}
}
}
return {
next() {
let keySize
if (cursorRenewId && cursorRenewId != renewId) {
resetCursor()
keySize = position(0)
}
if (count === 0) { // && includeValues) // on first entry, get current value if we need to
keySize = position(options.offset)
} else
keySize = cursor.iterate()
if (keySize === 0 ||
(count++ >= limit)) {
finishCursor()
return ITERATOR_DONE
}
if (includeValues) // TODO: Can we do this after readKey, ran into issues with this before
lastSize = keyBufferView.getUint32(0, true)
if (!valuesForKey || snapshot === false)
currentKey = store.readKey(keyBuffer, 32, keySize + 32)
if (includeValues) {
let value
if (store.decoder) {
value = store.decoder.decode(db.unsafeBuffer, lastSize)
} else if (store.encoding == 'binary')
value = Uint8ArraySlice.call(db.unsafeBuffer, 0, lastSize)
else {
value = store.db.unsafeBuffer.toString('utf8', 0, lastSize)
if (store.encoding == 'json' && value)
value = JSON.parse(value)
}
if (includeVersions)
return {
value: {
key: currentKey,
value,
version: getLastVersion()
}
}
else if (valuesForKey)
return {
value
}
else
return {
value: {
key: currentKey,
value,
}
}
} else if (includeVersions) {
return {
value: {
key: currentKey,
version: getLastVersion()
}
}
} else {
return {
value: currentKey
}
}
},
return() {
finishCursor()
return ITERATOR_DONE
},
throw() {
finishCursor()
return ITERATOR_DONE
}
}
if (typeof versionOrValue == 'string')
versionOrValue = Buffer.from(versionOrValue)
return this.getValuesCount(key, { start: versionOrValue, exactMatch: true}) > 0
}
return iterable
}
scheduleCommit() {
if (!pendingBatch) {
// pendingBatch promise represents the completion of the transaction
let whenCommitted = new Promise((resolve, reject) => {
runNextBatch = (sync) => {
if (!whenCommitted)
return
runNextBatch = null
if (pendingBatch) {
for (const store of stores) {
store.emit('beforecommit', { scheduledOperations })
}
}
clearTimeout(timeout)
currentCommit = whenCommitted
whenCommitted = null
pendingBatch = null
if (scheduledOperations || scheduledTransactions) {
// operations to perform, collect them as an array and start doing them
let operations = scheduledOperations || []
let transactions = scheduledTransactions
if (operations.appendAsyncTxn) {
operations.push(true)
}
scheduledOperations = null
scheduledTransactions = null
const writeBatch = () => {
let start = Date.now()
let results = Buffer.alloc(operations.length)
let continuedWriteTxn
let transactionResults
let transactionSetIndex = 0
let callback = async (error) => {
if (error === true) {
// resume batch transaction
if (!transactionResults) {
// get the transaction we will use
continuedWriteTxn = env.beginTxn(true)
transactionResults = new Array(transactions.length)
results.transactionResults = transactionResults
}
let transactionSet = transactions[transactionSetIndex]
let transactionSetResults = transactionResults[transactionSetIndex++] = []
let promises
for (let i = 0, l = transactionSet.length; i < l; i++) {
let userTxn = transactionSet[i]
let asChild = userTxn.asChild
if (asChild) {
if (promises) {
// must complete any outstanding transactions before proceeding
await Promise.all(promises)
promises = null
}
let childTxn = writeTxn = env.beginTxn(null, continuedWriteTxn)
try {
let result = userTxn.callback()
if (result && result.then) {
await result
}
if (result === ABORT)
childTxn.abort()
else
childTxn.commit()
transactionSetResults[(i << 1) + 1] = result
} catch(error) {
childTxn.abort()
if (!txnError(error, i))
return
}
} else {
writeTxn = continuedWriteTxn
try {
let result = userTxn()
if (result && result.then) {
if (!promises)
promises = []
transactionSetResults[(i << 1) + 1] = result
promises.push(result.catch(() => {
txnError(error, i)
}))
} else
transactionSetResults[(i << 1) + 1] = result
} catch(error) {
if (!txnError(error, i))
return
}
}
}
if (promises) { // finish any outstanding commit functions
await Promise.all(promises)
}
writeTxn = null
return env.continueBatch(0)
function txnError(error, i) {
if (error.message.startsWith('MDB_MAP_FULL')) {
env.continueBatch(-30792)
writeTxn = null
return false
}
if (error.message.startsWith('MDB_MAP_RESIZED')) {
env.continueBatch(-30785)
writeTxn = null
return false
}
// user exception
transactionSetResults[i << 1] = error
return true
}
}
let duration = Date.now() - start
this.averageTransactionTime = (this.averageTransactionTime * 3 + duration) / 4
//console.log('did batch', (duration) + 'ms', name, operations.length/*map(o => o[1].toString('binary')).join(',')*/)
resetReadTxn()
if (error) {
if (error.message == 'Interrupted batch')
// if the batch was interrupted by a sync transaction request we just have to restart it
return writeBatch()
try {
// see if we can recover from recoverable error (like full map with a resize)
handleError(error, this, null, writeBatch)
} catch(error) {
currentCommit = null
for (const store of stores) {
store.emit('aftercommit', { operations })
}
reject(error)
}
} else {
currentCommit = null
for (const store of stores) {
store.emit('aftercommit', { operations, results })
}
resolve(results)
}
}
try {
if (sync === true) {
env.batchWrite(operations, results)
callback()
} else
env.batchWrite(operations, results, callback)
} catch (error) {
callback(error)
}
}
try {
writeBatch()
} catch(error) {
reject(error)
}
} else {
resolve([])
}
}
let timeout
if (this.commitDelay > 0) {
timeout = setTimeout(() => {
when(currentCommit, () => whenCommitted && runNextBatch(), () => whenCommitted && runNextBatch())
}, this.commitDelay)
} else {
timeout = runNextBatch.immediate = setImmediate(() => {
when(currentCommit, () => whenCommitted && runNextBatch(), () => whenCommitted && runNextBatch())
})
}
})
pendingBatch = {
results: whenCommitted,
unconditionalResults: whenCommitted.then(() => true) // for returning from non-conditional operations
}
}
return pendingBatch
}
batch(operations) {

@@ -1128,3 +399,3 @@ /*if (writeTxn) {

}
close() {
close(callback) {
this.db.close()

@@ -1140,11 +411,11 @@ if (this.isRoot) {

}
this.status = 'closed'
if (callback)
callback()
}
isOperational() {
return this.status == 'open'
}
getStats() {
try {
let stats = this.db.stat(readTxnRenewed ? readTxn : renewReadTxn())
return stats
}
catch(error) {
return handleError(error, this, readTxn, () => this.getStats())
}
return this.db.stat(readTxnRenewed ? readTxn : renewReadTxn())
}

@@ -1159,23 +430,18 @@ sync(callback) {

deleteDB() {
try {
this.transactionSync(() =>
this.db.drop({
justFreePages: false,
txn: writeTxn,
justFreePages: false
})
} catch(error) {
handleError(error, this, null, () => this.deleteDB())
}
, { abortable: false })
}
clear() {
try {
clear(callback) {
this.transactionSync(() =>
this.db.drop({
justFreePages: true,
txn: writeTxn,
justFreePages: true
})
} catch(error) {
handleError(error, this, null, () => this.clear())
}
, { abortable: false })
if (this.encoder && this.encoder.structures)
this.encoder.structures = []
if (typeof callback == 'function')
callback(null)
}

@@ -1193,14 +459,10 @@ readerCheck() {

lastVersion = getLastVersion()
try {
let buffer = this.getBinary(this.sharedStructuresKey)
if (this.useVersions)
setLastVersion(lastVersion)
return buffer ? this.encoder.decode(buffer) : []
} catch(error) {
return handleError(error, this, null, getStructures)
}
let buffer = this.getBinary(this.sharedStructuresKey)
if (this.useVersions)
setLastVersion(lastVersion)
return buffer ? this.encoder.decode(buffer) : []
}
return {
saveStructures: (structures, previousLength) => {
return this.transactionSync(() => {
return this.transactionSyncStart(() => {
let existingStructuresBuffer = this.getBinary(this.sharedStructuresKey)

@@ -1210,7 +472,7 @@ let existingStructures = existingStructuresBuffer ? this.encoder.decode(existingStructuresBuffer) : []

return false // it changed, we need to indicate that we couldn't update
writeTxn.putBinary(this.db, this.sharedStructuresKey, this.encoder.encode(structures))
this.put(this.sharedStructuresKey, structures)
})
},
getStructures,
copyBuffers: true // need to copy any embedded buffers that are found since we use unsafe buffers
copyBuffers: true, // need to copy any embedded buffers that are found since we use unsafe buffers
}

@@ -1222,47 +484,19 @@ }

const removeSync = LMDBStore.prototype.removeSync
addQueryMethods(LMDBStore, { env, getReadTxn() {
return readTxnRenewed ? readTxn : renewReadTxn()
}, saveKey, keyBytes, keyBytesView, getLastVersion })
addWriteMethods(LMDBStore, { env, fixedBuffer: keyBytes, resetReadTxn, ...options })
LMDBStore.prototype.supports = {
permanence: true,
bufferKeys: true,
promises: true,
snapshots: true,
clear: true,
status: true,
deferredOpen: true,
openCallback: true,
}
return options.cache ?
new (CachingStore(LMDBStore))(options.name || null, options) :
new LMDBStore(options.name || null, options)
function handleError(error, store, txn, retry) {
try {
if (writeTxn)
writeTxn.abort()
} catch(error) {}
if (writeTxn)
writeTxn = null
if (error.message.startsWith('MDB_') &&
!(error.message.startsWith('MDB_KEYEXIST') || error.message.startsWith('MDB_NOTFOUND')) ||
error.message == 'The transaction is already closed.') {
resetReadTxn() // separate out cursor-based read txns
try {
if (readTxn) {
readTxn.abort()
readTxn.isAborted = true
}
} catch(error) {}
readTxn = null
}
if (error.message.startsWith('MDB_PROBLEM'))
console.error(error)
//if (error.message == 'The transaction is already closed.')
// return handleError(error, store, null, retry)
if (error.message.startsWith('MDB_MAP_FULL') || error.message.startsWith('MDB_MAP_RESIZED')) {
const oldSize = env.info().mapSize
const newSize = error.message.startsWith('MDB_MAP_FULL') ?
Math.floor(((1.08 + 3000 / Math.sqrt(oldSize)) * oldSize) / 0x100000) * 0x100000 : // increase size, more rapidly at first, and round to nearest 1 MB
oldSize + 0x2000//Math.pow(2, (Math.round(Math.log2(oldSize)) + 1)) // for resized notifications, we try to align to doubling each time
for (const store of stores) {
store.emit('remap')
}
try {
env.resize(newSize)
} catch(error) {
throw new Error(error.message + ' trying to set map size to ' + newSize)
}
return retry()
}
error.message = 'In database ' + name + ': ' + error.message
throw error
}
}

@@ -1284,44 +518,2 @@

function compareKey(a, b) {
// compare with type consistency that matches ordered-binary
if (typeof a == 'object') {
if (!a) {
return b == null ? 0 : -1
}
if (a.compare) {
if (b == null) {
return 1
} else if (b.compare) {
return a.compare(b)
} else {
return -1
}
}
let arrayComparison
if (b instanceof Array) {
let i = 0
while((arrayComparison = compareKey(a[i], b[i])) == 0 && i <= a.length) {
i++
}
return arrayComparison
}
arrayComparison = compareKey(a[0], b)
if (arrayComparison == 0 && a.length > 1)
return 1
return arrayComparison
} else if (typeof a == typeof b) {
if (typeof a === 'symbol') {
a = Symbol.keyFor(a)
b = Symbol.keyFor(b)
}
return a < b ? -1 : a === b ? 0 : 1
}
else if (typeof b == 'object') {
if (b instanceof Array)
return -compareKey(b, a)
return 1
} else {
return typeOrder[typeof a] < typeOrder[typeof b] ? -1 : 1
}
}
class Entry {

@@ -1340,19 +532,11 @@ constructor(value, version, db) {

}
exports.compareKey = compareKey
const typeOrder = {
symbol: 0,
undefined: 1,
boolean: 2,
number: 3,
string: 4
}
exports.getLastEntrySize = function() {
export function getLastEntrySize() {
return lastSize
}
function getLastVersion() {
return keyBufferView.getFloat64(16, true)
export function getLastVersion() {
return keyBytesView.getFloat64(16, true)
}
function setLastVersion(version) {
return keyBufferView.setFloat64(16, version, true)
export function setLastVersion(version) {
return keyBytesView.setFloat64(16, version, true)
}

@@ -1364,4 +548,4 @@ let saveBuffer, saveDataView, saveDataAddress

saveBuffer.dataView = saveDataView = new DataView(saveBuffer.buffer, saveBuffer.byteOffset, saveBuffer.byteLength)
saveDataAddress = getAddress(saveBuffer)
saveBuffer.buffer.address = saveDataAddress - saveBuffer.byteOffset
saveBuffer.buffer.address = getAddress(saveBuffer.buffer)
saveDataAddress = saveBuffer.buffer.address + saveBuffer.byteOffset
savePosition = 0

@@ -1381,4 +565,1 @@

}
exports.getLastVersion = getLastVersion
exports.setLastVersion = setLastVersion

@@ -1,5 +0,5 @@

{
{
"name": "lmdb",
"author": "Kris Zyp",
"version": "1.6.10",
"version": "2.0.0-alpha1",
"description": "Simple, efficient, scalable data store wrapper for LMDB",

@@ -9,3 +9,3 @@ "license": "MIT",

"type": "git",
"url": "http://github.com/DoctorEvidence/lmdb-store"
"url": "git+ssh://git@github.com/DoctorEvidence/lmdb-store.git"
},

@@ -18,7 +18,13 @@ "keywords": [

],
"type": "commonjs",
"main": "./index.js",
"type": "module",
"module": "index.js",
"exports": {
"import": "./index.mjs",
"require": "./index.js"
".": {
"require": "./dist/index.cjs",
"import": "./index.js"
},
"./index.js": {
"require": "./dist/index.cjs",
"import": "./index.js"
}
},

@@ -31,6 +37,9 @@ "types": "./index.d.ts",

"install": "node-gyp-build",
"build": "node-gyp configure && node-gyp build -d && rollup -c",
"build-js": "rollup -c",
"prepare": "rollup -c",
"before-publish": "rollup -c && prebuildify-ci download && prebuildify --target 17.0.1 && prebuildify --target 16.9.0 && prebuildify --target 14.17.6 && prebuildify --target 12.18.0",
"prebuild": "prebuildify --target 17.0.1 && prebuildify --target 16.9.0 && prebuildify --target 14.17.6 && prebuildify --target 12.18.0",
"prebuild-arm64": "prebuildify --arch=arm64 --target 17.0.1 && prebuildify --arch=arm64 --target 16.9.0 && prebuildify --arch=arm64 --target 14.17.6",
"before-publish": "prebuildify-ci download && prebuildify --target 17.0.1 && prebuildify --target 16.9.0 && prebuildify --target 14.17.6 && prebuildify --target 12.18.0 && prebuildify --target electron@13.1.4",
"recompile": "node-gyp clean && node-gyp configure && node-gyp build -d",
"recompile": "node-gyp clean && node-gyp configure && node-gyp build",
"test": "mocha test/**.test.js --recursive && npm run test:types",

@@ -46,4 +55,4 @@ "test2": "mocha tests -u tdd",

"node-gyp-build": "^4.2.3",
"ordered-binary": "^1.0.0",
"weak-lru-cache": "^1.0.0"
"ordered-binary": "^1.1.0",
"weak-lru-cache": "^1.1.0"
},

@@ -63,4 +72,12 @@ "optionalDependencies": {

"rimraf": "^3.0.2",
"rollup": "^1.20.3",
"tsd": "^0.14.0"
},
"bugs": {
"url": "https://github.com/DoctorEvidence/lmdb-store/issues"
},
"homepage": "https://github.com/DoctorEvidence/lmdb-store#readme",
"directories": {
"test": "tests"
}
}

@@ -14,2 +14,3 @@ [![license](https://img.shields.io/badge/license-MIT-brightgreen)](LICENSE)

* Optional native off-main-thread compression with high-performance LZ4 compression
* Minimal dependencies to ensure stability and efficient memory use
* And ridiculously fast and efficient:

@@ -54,3 +55,3 @@

// or
myStore.transactionAsync(() => {
myStore.transaction(() => {
myStore.put('greeting', { someText: 'Hello, World!' });

@@ -347,3 +348,2 @@ myStore.get('greeting').someText // 'Hello, World!'

* `useVersions` - Set this to true if you will be setting version numbers on the entries in the database. Note that you can not change this flag once a database has entries in it (or they won't be read correctly).
* `encryptionKey` - This enables encryption, and the provided value is the key that is used for encryption. This may be a buffer or string, but must be 32 bytes/characters long. This uses the Chacha8 cipher for fast and secure on-disk encryption of data.
* `keyIsBuffer` - This will cause the database to expect and return keys as node buffers.

@@ -353,2 +353,3 @@ * `keyIsUint32` - This will cause the database to expect and return keys as unsigned 32-bit integers.

* `strictAsyncOrder` - Maintain strict ordering of execution of asynchronous transaction callbacks relative to asynchronous single operations.
The following additional option properties are only available when creating the main database environment (`open`):

@@ -358,2 +359,5 @@ * `path` - This is the file path to the database environment file you will use.

* `maxReaders` - The maximum number of concurrent read transactions (readers) to be able to open ([more information](http://www.lmdb.tech/doc/group__mdb.html#gae687966c24b790630be2a41573fe40e2)).
* `overlappingSync` - This enables committing transactions where LMDB waits for a transaction to be fully flushed to disk _after_ the transaction has been committed. This option is discussed in more detail below.
* `eventTurnBatching` - This is enabled by default and will ensure that all asynchronous write operations performed in the same event turn will be batched together into the same transaction. Disabling this allows lmdb-store to commit a transaction at any time, and asynchronous operations will only be guaranteed to be in the same transaction if explicitly batched together (with `transactionAsync`, `batch`, `ifVersion`). If this is disabled (set to `false`), you can control how many writes can occur before starting a transaction with `txnStartThreshold` (allow a transaction will still be started at the next event turn if the threshold is not met). Disabling event turn batching (and using lower `txnStartThreshold` values) can facilitate a faster response time to write operations. `txnStartThreshold` defaults to 5.
* `encryptionKey` - This enables encryption, and the provided value is the key that is used for encryption. This may be a buffer or string, but must be 32 bytes/characters long. This uses the Chacha8 cipher for fast and secure on-disk encryption of data.
* `commitDelay` - This is the amount of time to wait (in milliseconds) for batching write operations before committing the writes (in a transaction). This defaults to 0. A delay of 0 means more immediate commits with less latency (uses `setImmediate`), but a longer delay (which uses `setTimeout`) can be more efficient at collecting more writes into a single transaction and reducing I/O load. Note that NodeJS timers only have an effective resolution of about 10ms, so a `commitDelay` of 1ms will generally wait about 10ms.

@@ -363,3 +367,3 @@

In addition, the following options map to LMDB's env flags, <a href="http://www.lmdb.tech/doc/group__mdb.html">described here</a>. None of these need to be set, the defaults can always be used and are generally recommended, but these are available for various needs and performance optimizations:
* `noSync` - Doesn't sync the data to disk. This can be useful for temporary databases where durability/integrity is not necessary, and can significantly improve write performance that is I/O bound. We discourage this flag for data that needs integrity and durability in storage, since it can result in data loss/corruption if the computer crashes.
* `noSync` - Does not explicitly flush data to disk at all. This can be useful for temporary databases where durability/integrity is not necessary, and can significantly improve write performance that is I/O bound. However, we discourage this flag for data that needs integrity and durability in storage, since it can result in data loss/corruption if the computer crashes.
* `noMemInit` - This provides a small performance boost for writes, by skipping zero'ing out malloc'ed data, but can leave application data in unused portions of the database. If you do not need to worry about unauthorized access to the db files themselves, this is recommended.

@@ -376,2 +380,16 @@ * `remapChunks` - This a flag to specify if dynamic memory mapping should be used. Enabling this generally makes read operations a little bit slower, but frees up more mapped memory, making it friendlier to other applications. This is enabled by default on 32-bit operating systems (which require this to go beyond 4GB database size) if `mapSize` is not specified, otherwise it is disabled by default.

### Overlapping Sync Options
The `overlappingSync` option enables a new technique for committing transactions where LMDB waits for a transaction to be fully flushed to disk _after_ the transaction has been committed. This means that the expensive/slow disk flushing operations do not occur during the writer lock, and allows disk flushing to occur in parallel with future transactions, providing potentially significant performance benefits. This uses a multi-step process of updating meta pointers to ensure database integrity even if a crash occurs.
When this is enabled, there are two events of potential interest: when the transaction is committed and the data is visible (to all other threads/processes), and when the transaction is flushed and durable. For write operations, the returned promise will resolve when the transaction is committed. The promise will also have a `flushed` property that holds a second promise that is resolved when the OS reports that the transaction writes has been fully flushed to disk and are truly durable (at least as far the hardward/OS is capable of guaranteeing this). For example:
```
let db = open('my-db', { overlappingSync: true })
let written = db.put(key, value);
await written; // wait for it to be committed
let v = db.get(key) // this value now be retrieved from the db
await written.flushed // wait for commit to be fully flushed to disk
```
This option is probably not helpful on Windows, as Window's disk flushing operation tends to have poor performance characteristic (whereas Windows tends to perform well with standard transactions). This option may be enabled by default in the future, for non-Windows platforms.
#### Serialization options

@@ -390,2 +408,14 @@ If you are using the default encoding of `'msgpack'`, the [msgpackr](https://github.com/kriszyp/msgpackr) package is used for serialization and deserialization. You can provide store options that are passed to msgpackr, as well. For example, these options can be potentially useful:

## LevelUp
If you have an existing application built on LevelUp, the lmdb-store is designed to make it easy to transition to this package, with most of the LevelUp API implemented and supported in lmdb-store. This includes the `put`, `del`, `batch`, `status`, `isOperation`, and `getMany` functions. One key difference in APIs is that LevelUp uses asynchronous callback based `get`s, but lmdb-store is so fast that it generally returns from `get` call before an an event can even be queued, consequently lmdb-store uses synchronous `get`s. However, there is a `levelup` export that can be used to generate a new store instance with LevelUp's style of API for `get` (although it still runs synchronously):
```
let dbLevel = levelup(db)
dbLevel.get(id, (error, value) => {
})
// or
dbLevel.get(id).then(...)
```
##### Build Options

@@ -399,3 +429,3 @@ A few LMDB options are available at build time, and can be specified with options with `npm install` (which can be specified in your package.json install script):

`npm install --enable_fast_api_calls=true`: This will build `lmdb-store` with V8's new API for fast calls. `lmdb-store` supports the new fast API for several functions, and this can provide significant performance benefits for `get`s and range retrieval. This should be used in conjunction with starting node with the `--turbo-fast-api-calls` option. This is only supported in Node v16.4.0 and higher.
`npm install --enable_fast_api_calls=true`: This will build `lmdb-store` with V8's new API for fast calls. `lmdb-store` supports the new fast API for several functions, and this can provide significant performance benefits for `get`s and range retrieval. This should be used in conjunction with starting node with the `--turbo-fast-api-calls` option. This is only supported in Node v17 and higher.

@@ -402,0 +432,0 @@ ## Credits

@@ -1,16 +0,17 @@

'use strict';
let path = require('path');
let rimraf = require('rimraf');
let chai = require('chai');
import path from 'path';
import rimraf from 'rimraf';
import chai from 'chai';
let should = chai.should();
let expect = chai.expect;
let spawn = require('child_process').spawn;
import { spawn } from 'child_process';
import { unlinkSync } from 'fs'
import { fileURLToPath } from 'url'
import { dirname } from 'path'
let nativeMethods, dirName = dirname(fileURLToPath(import.meta.url))
let { open, getLastVersion, bufferToKeyValue, keyValueToBuffer, ABORT } = require('..');
const { ArrayLikeIterable } = require('../util/ArrayLikeIterable')
//var inspector = require('inspector'); inspector.open(9330, null, true); debugger
import { open, levelup, bufferToKeyValue, keyValueToBuffer, ABORT } from '../index.js';
import { ArrayLikeIterable } from '../util/ArrayLikeIterable.js'
describe('lmdb-store', function() {
let testDirPath = path.resolve(__dirname, './testdata-ls');
let testDirPath = path.resolve(dirName, './testdata-ls');

@@ -36,6 +37,7 @@ // just to make a reasonable sized chunk of data...

});
let testIteration = 1
describe('Basic use', basicTests({ compression: false }));
let testIteration = 0
describe('Basic use', basicTests({ }));
describe('Basic use with overlapping sync', basicTests({ overlappingSync: true }));
describe('Basic use with encryption', basicTests({ compression: false, encryptionKey: 'Use this key to encrypt the data' }));
//describe('Check encrypted data', basicTests({ compression: false, checkLast: true }));
describe('Check encrypted data', basicTests({ compression: false, encryptionKey: 'Use this key to encrypt the data', checkLast: true }));
describe('Basic use with JSON', basicTests({ encoding: 'json' }));

@@ -49,2 +51,4 @@ describe('Basic use with ordered-binary', basicTests({ encoding: 'ordered-binary' }));

before(function() {
if (!options.checkLast)
testIteration++;
db = open(testDirPath + '/test-' + testIteration + '.mdb', Object.assign({

@@ -54,5 +58,7 @@ name: 'mydb3',

useVersions: true,
batchStartThreshold: 10,
//asyncTransactionOrder: 'strict',
//useWritemap: true,
//noSync: true,
//overlappingSync: true,
compression: {

@@ -62,3 +68,2 @@ threshold: 256,

}, options));
testIteration++;
if (!options.checkLast)

@@ -84,5 +89,5 @@ db.clear();

it('encrypted data can not be accessed', function() {
let data = db.get('key1');
let data = db.get('key1');
console.log({data})
data.should.deep.equal({foo: 1, bar: true})
data.should.deep.equal('test')
})

@@ -113,4 +118,5 @@ return

]
for (let key of keys)
for (let key of keys) {
await db.put(key, 3);
}
let returnedKeys = []

@@ -188,2 +194,3 @@ for (let { key, value } of db.getRange({

entry.version.should.equal(53252);
console.log('starting ifVersion');
(await db.ifVersion('key1', 777, () => {

@@ -226,2 +233,13 @@ db.put('newKey', 'test', 6);

});
it('repeated compressions', async function() {
let str = expand('Hello world!')
db.put('key1', str, 53252);
db.put('key1', str, 53253);
db.put('key1', str, 53254);
await db.put('key1', str, 53255);
let entry = db.getEntry('key1');
entry.value.should.equal(str);
entry.version.should.equal(53255);
(await db.remove('key1')).should.equal(true);
});
if (options.encoding == 'ordered-binary')

@@ -236,2 +254,9 @@ return // no more tests need to be applied for this

});
it('writes batch with callback', async function() {
let dataIn = {name: 'for batch 1'}
await db.batch(() => {
db.put('key1', dataIn);
db.put('key2', dataIn);
})
})
it.skip('trigger sync commit', async function() {

@@ -558,8 +583,8 @@ let dataIn = {foo: 4, bar: false}

entry.version.should.equal(33);
db.putSync('zkey7', 'test', { append: true, noOverwrite: true });
db2.putSync('zkey6', 'test1', { appendDup: true });
db2.putSync('zkey6', 'test2', { appendDup: true });
expect(() => db.putSync('zkey5', 'test', { append: true, version: 44 })).to.throw();
expect(() => db.putSync('zkey7', 'test', { noOverwrite: true })).to.throw();
expect(() => db2.putSync('zkey6', 'test1', { noDupData: true })).to.throw();
should.equal(db.putSync('zkey7', 'test', { append: true, noOverwrite: true }), true);
should.equal(db2.putSync('zkey6', 'test1', { appendDup: true }), true);
should.equal(db2.putSync('zkey6', 'test2', { appendDup: true }), true);
should.equal(db.putSync('zkey5', 'test', { append: true, version: 44 }), false);
should.equal(db.putSync('zkey7', 'test', { noOverwrite: true }), false);
should.equal(db2.putSync('zkey6', 'test1', { noDupData: true }), false);
});

@@ -622,5 +647,8 @@ it('async transactions', async function() {

it('async transaction with interrupting sync transaction default order', async function() {
db.strictAsyncOrder = false
db.strictAsyncOrder = true
let order = []
let ranSyncTxn
db.on('beforecommit', ()=> {
// force eventTurnBatching on
})
db.transactionAsync(() => {

@@ -631,6 +659,8 @@ order.push('a1');

ranSyncTxn = true;
setImmediate(() => db.transactionSync(() => {
order.push('s1');
db.put('inside-sync', 'test');
}));
setImmediate(() => {
db.transactionSync(() => {
order.push('s1');
db.put('inside-sync', 'test');
});
});
}

@@ -643,3 +673,3 @@ });

});
order.should.deep.equal(['a1', 'a2', 's1']);
order.should.deep.equal(['a1', 's1', 'a2']);
should.equal(db.get('async1'), 'test');

@@ -650,2 +680,19 @@ should.equal(db.get('outside-txn'), 'test');

});
it('multiple async mixed', async function() {
let result
for (let i = 0; i < 100; i++) {
if (i%4 < 3) {
db.strictAsyncOrder = i%4 == 2
result = db.transaction(() => {
db.put('foo' + i, i)
})
} else {
result = db.put('foo' + i, i)
}
}
await result
for (let i = 0; i < 100; i++) {
should.equal(db.get('foo' + i), i)
}
})
it('big child transactions', async function() {

@@ -666,2 +713,70 @@ let ranTransaction

});
it('mixed batches', async function() {
let promise
for (let i = 0; i < 20; i++) {
db.put(i, 'test')
promise = db.batch(() => {
for (let j = 0; j < 20; j++) {
db.put('test:' + i + '/' + j, i + j)
}
})
}
await promise
for (let i = 0; i < 20; i++) {
should.equal(db.get(i), 'test');
for (let j = 0; j < 20; j++) {
should.equal(db.get('test:' + i + '/' + j), i + j)
}
}
});
it('levelup style callback', function(done) {
should.equal(db.isOperational(), true)
should.equal(db.status, 'open')
should.equal(db.supports.permanence, true)
db.put('key1', '1', (error, result) => {
should.equal(error, null)
'1'.should.equal(db.get('key1'))
db.del('key1', (error, result) => {
should.equal(error, null)
let leveldb = levelup(db)
leveldb.get('key1', (error, value) => {
should.equal(error.name, 'NotFoundError')
leveldb.put('key1', 'test', (error, value) => {
leveldb.getMany(['key1'], (error, values) => {
should.equal('test', values[0])
done();
})
})
})
})
})
});
it('batch operations', async function() {
let batch = db.batch()
batch.put('test:z', 'z')
batch.clear()
batch.put('test:a', 'a')
batch.put('test:b', 'b')
batch.put('test:c', 'c')
batch.del('test:c')
let callbacked
await batch.write(() => { callbacked = true })
should.equal(callbacked, true)
should.equal(db.get('test:a'), 'a')
should.equal(db.get('test:b'), 'b')
should.equal(db.get('test:c'), undefined)
should.equal(db.get('test:d'), undefined)
});
it('batch array', async function() {
await db.batch([
{type: 'put', key: 'test:a', value: 1 },
{type: 'put', key: 'test:b', value: 2 },
{type: 'put', key: 'test:c', value: 3 },
{type: 'del', key: 'test:c' },
])
should.equal(db.get('test:a'), 1)
should.equal(db.get('test:b'), 2)
should.equal(db.get('test:c'), undefined)
});
it('read and write with binary encoding', async function() {

@@ -675,3 +790,5 @@ let dbBinary = db.openDB(Object.assign({

dbBinary.put('empty', Buffer.from([]));
await dbBinary.put('Uint8Array', new Uint8Array([1,2,3]));
let promise = dbBinary.put('Uint8Array', new Uint8Array([1,2,3]));
await promise
await promise.flushed
dbBinary.get('buffer').toString().should.equal('hello');

@@ -700,2 +817,7 @@ dbBinary.get('Uint8Array')[1].should.equal(2);

db.close();
if (options.encryptionKey) {
return done();
}
unlinkSync(testDirPath + '/test-' + testIteration + '.mdb');
console.log('successfully unlinked')
done();

@@ -762,3 +884,5 @@ },10);

after(function() {
console.log('closing')
db.close();
console.log('closed')
});

@@ -765,0 +889,0 @@ });

@@ -1,2 +0,1 @@

const when = require('./when')
const SKIP = {}

@@ -7,3 +6,3 @@ if (!Symbol.asyncIterator) {

class ArrayLikeIterable {
export class ArrayLikeIterable {
constructor(sourceArray) {

@@ -136,3 +135,1 @@ if (sourceArray) {

}
exports.ArrayLikeIterable = ArrayLikeIterable

@@ -1,2 +0,2 @@

module.exports = function when(promise, callback, errback) {
export function when(promise, callback, errback) {
if (promise && promise.then) {

@@ -3,0 +3,0 @@ return errback ?

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc