Comparing version 2.5.3 to 2.6.0-alpha1
import { WeakLRUCache, clearKeptObjects } from './native.js'; | ||
import { FAILED_CONDITION, ABORT } from './write.js'; | ||
import { UNMODIFIED } from './read.js'; | ||
import { when } from './util/when.js'; | ||
let getLastVersion; | ||
let getLastVersion, getLastTxnId; | ||
const mapGet = Map.prototype.get; | ||
@@ -14,3 +15,3 @@ export const CachingStore = (Store, env) => { | ||
env.cacheCommitter = true; | ||
this.on('aftercommit', ({ next, last }) => { | ||
this.on('aftercommit', ({ next, last, txnId }) => { | ||
do { | ||
@@ -26,4 +27,6 @@ let meta = next.meta; | ||
let entry = mapGet.call(cache, meta.key); | ||
if (entry) | ||
if (entry) { | ||
entry.txnId = txnId; | ||
cache.used(entry, expirationPriority + 4); // this will enter it into the LRFU (with a little lower priority than a read) | ||
} | ||
} | ||
@@ -38,2 +41,4 @@ } | ||
this.cache = new WeakLRUCache(options.cache); | ||
if (options.cache.validated) | ||
this.cache.validated = true; | ||
} | ||
@@ -44,6 +49,22 @@ get isCaching() { | ||
get(id, cacheMode) { | ||
let value = this.cache.getValue(id); | ||
if (value !== undefined) | ||
return value; | ||
value = super.get(id); | ||
let value; | ||
if (this.cache.validated) { | ||
let entry = this.cache.get(id); | ||
if (entry) { | ||
let cachedValue = entry.value; | ||
if (entry.txnId) { | ||
value = super.get(id, { ifNotTxnId: entry.txnId }); | ||
if (value === UNMODIFIED) | ||
return cachedValue; | ||
} else // with no txn id we do not validate; this is the state of a cached value after a write before it transacts | ||
value = cachedValue; | ||
} else | ||
value = super.get(id); | ||
} else { | ||
value = this.cache.getValue(id); | ||
if (value !== undefined) { | ||
return value; | ||
} | ||
value = super.get(id); | ||
} | ||
if (value && typeof value === 'object' && !cacheMode && typeof id !== 'object') { | ||
@@ -54,2 +75,4 @@ let entry = this.cache.setValue(id, value, this.lastSize >> 10); | ||
} | ||
if (this.cache.validated) | ||
entry.txnId = getLastTxnId(); | ||
} | ||
@@ -159,4 +182,5 @@ return value; | ||
}; | ||
export function setGetLastVersion(get) { | ||
export function setGetLastVersion(get, getTxnId) { | ||
getLastVersion = get; | ||
getLastTxnId = getTxnId; | ||
} |
@@ -6,3 +6,3 @@ /* sample-bdb.txt - BerkeleyDB toy/sample | ||
/* | ||
* Copyright 2012-2020 Howard Chu, Symas Corp. | ||
* Copyright 2012-2021 Howard Chu, Symas Corp. | ||
* All rights reserved. | ||
@@ -9,0 +9,0 @@ * |
@@ -6,3 +6,3 @@ /* sample-mdb.txt - MDB toy/sample | ||
/* | ||
* Copyright 2012-2020 Howard Chu, Symas Corp. | ||
* Copyright 2012-2021 Howard Chu, Symas Corp. | ||
* All rights reserved. | ||
@@ -9,0 +9,0 @@ * |
19
keys.js
@@ -50,3 +50,3 @@ import { getAddress, orderedBinary } from './native.js'; | ||
let saveBuffer, saveDataView = { setFloat64() {}, setUint32() {} }, saveDataAddress; | ||
let saveBuffer, uint32, saveDataView = { setFloat64() {}, setUint32() {} }, saveDataAddress; | ||
let savePosition = 8000; | ||
@@ -56,2 +56,3 @@ let DYNAMIC_KEY_BUFFER_SIZE = 8192; | ||
saveBuffer = typeof Buffer != 'undefined' ? Buffer.alloc(DYNAMIC_KEY_BUFFER_SIZE) : new Uint8Array(DYNAMIC_KEY_BUFFER_SIZE); | ||
uint32 = null; | ||
saveBuffer.buffer.address = getAddress(saveBuffer); | ||
@@ -65,6 +66,8 @@ saveDataAddress = saveBuffer.buffer.address; | ||
} | ||
export function saveKey(key, writeKey, saveTo, maxKeySize) { | ||
export function saveKey(key, writeKey, saveTo, maxKeySize, skip) { | ||
if (savePosition > 7800) { | ||
allocateSaveBuffer(); | ||
} | ||
if (skip > 0) | ||
savePosition += skip; | ||
let start = savePosition; | ||
@@ -106,1 +109,13 @@ try { | ||
} | ||
export function saveRead(txn, dbi, key, writeKey, saveTo, maxKeySize) { | ||
let start = savePosition; | ||
saveKey(key, writeKey, saveTo, maxKeySize, 12); | ||
if (start > savePosition) | ||
start = 0; | ||
if (!uint32) | ||
uint32 = new Uint32Array(saveBuffer.buffer, 0, saveBuffer.buffer.byteLength >> 2); | ||
saveDataView.setFloat64(start, txn.address, true); | ||
if (Atomics.or(uint32, (start >> 2) + 2, dbi)) { | ||
return start + saveDataAddress; | ||
} | ||
} |
import { dirname, join, default as pathModule } from 'path'; | ||
import { fileURLToPath } from 'url'; | ||
import loadNAPI from 'node-gyp-build-optional-packages'; | ||
export let Env, Txn, Dbi, Compression, Cursor, getAddress, createBufferForAddress, clearKeptObjects, setGlobalBuffer, arch, fs, os, onExit, tmpdir, lmdbError, path, EventEmitter, orderedBinary, MsgpackrEncoder, WeakLRUCache, setEnvMap, getEnvMap, getByBinary, detachBuffer, write, position, iterate, prefetch, resetTxn, getCurrentValue, getCurrentShared, getStringByBinary, getSharedByBinary, compress; | ||
export let Env, Txn, Dbi, Compression, Cursor, getAddress, createBufferForAddress, clearKeptObjects, globalBuffer, setGlobalBuffer, arch, fs, os, onExit, tmpdir, lmdbError, path, EventEmitter, orderedBinary, MsgpackrEncoder, WeakLRUCache, setEnvMap, getEnvMap, getByBinary, detachBuffer, write, position, iterate, prefetch, resetTxn, getCurrentValue, getCurrentShared, getStringByBinary, getSharedByBinary, getSharedBuffer, compress; | ||
@@ -57,2 +57,4 @@ path = pathModule; | ||
setGlobalBuffer = externals.setGlobalBuffer; | ||
globalBuffer = externals.globalBuffer; | ||
getSharedBuffer = externals.getSharedBuffer; | ||
prefetch = externals.prefetch; | ||
@@ -59,0 +61,0 @@ iterate = externals.iterate; |
import { createRequire } from 'module'; | ||
import { setRequire } from './open.js'; | ||
import { nativeAddon, setNativeFunctions } from './native.js'; | ||
export { nativeAddon } from './native.js' | ||
import { setFlagsFromString } from 'v8'; | ||
@@ -5,0 +6,0 @@ setRequire(createRequire(import.meta.url)); |
25
open.js
@@ -11,3 +11,3 @@ import { Compression, getAddress, arch, fs, path as pathModule, lmdbError, EventEmitter, MsgpackrEncoder, Env, Dbi, tmpdir, os, nativeAddon } from './native.js'; | ||
setGetLastVersion(getLastVersion); | ||
setGetLastVersion(getLastVersion, getLastTxnId); | ||
let keyBytes, keyBytesView; | ||
@@ -24,2 +24,5 @@ const buffers = []; | ||
const MAX_KEY_SIZE = 4026; | ||
// this is used as the key size by default because default page size is OS page size, which is usually | ||
// 4KB (but is 16KB on M-series MacOS), and this keeps a consistent max key size when no page size specified. | ||
const DEFAULT_MAX_KEY_SIZE = 1978; | ||
const DEFAULT_COMMIT_DELAY = 0; | ||
@@ -51,3 +54,3 @@ | ||
let is32Bit = arch().endsWith('32'); | ||
let remapChunks = options.remapChunks || (options.mapSize ? | ||
let remapChunks = options.remapChunks || options.encryptionKey || (options.mapSize ? | ||
(is32Bit && options.mapSize > 0x100000000) : // larger than fits in address space, must use dynamic maps | ||
@@ -62,3 +65,2 @@ is32Bit); // without a known map size, we default to being able to handle large data correctly/well*/ | ||
keyBytes, | ||
pageSize: 4096, | ||
overlappingSync: (options.noSync || options.readOnly) ? false : (os != 'win32'), | ||
@@ -118,4 +120,5 @@ // default map size limit of 4 exabytes when using remapChunks, since it is not preallocated and we can | ||
lmdbError(rc); | ||
delete options.keyBytes // no longer needed, don't copy to stores | ||
let maxKeySize = env.getMaxKeySize(); | ||
maxKeySize = Math.min(maxKeySize, MAX_KEY_SIZE); | ||
maxKeySize = Math.min(maxKeySize, options.pageSize ? MAX_KEY_SIZE : DEFAULT_MAX_KEY_SIZE); | ||
flags = getEnvFlags(env.address); // re-retrieve them, they are not necessarily the same if we are connecting to an existing env | ||
@@ -175,3 +178,3 @@ if (flags & 0x1000) { | ||
(!options.readOnly && dbOptions.create !== false ? 0x40000 : 0) | | ||
(dbOptions.useVersions ? 0x1000 : 0); | ||
(dbOptions.useVersions ? 0x100 : 0); | ||
let keyType = (dbOptions.keyIsUint32 || dbOptions.keyEncoding == 'uint32') ? 2 : keyIsBuffer ? 3 : 0; | ||
@@ -183,3 +186,3 @@ if (keyType == 2) | ||
// in read-only mode we use a read-only txn to open the database | ||
// TODO: LMDB is actually not entire thread-safe when it comes to opening databases with | ||
// TODO: LMDB is actually not entirely thread-safe when it comes to opening databases with | ||
// read-only transactions since there is a race condition on setting the update dbis that | ||
@@ -274,4 +277,5 @@ // occurs outside the lock | ||
} | ||
backup(path) { | ||
return new Promise((resolve, reject) => env.copy(path, false, (error) => { | ||
backup(path, compact) { | ||
fs.mkdirSync(pathModule.dirname(path), { recursive: true }); | ||
return new Promise((resolve, reject) => env.copy(path, compact, (error) => { | ||
if (error) { | ||
@@ -388,3 +392,2 @@ reject(error); | ||
} | ||
export function setLastVersion(version) { | ||
@@ -394,2 +397,6 @@ return keyBytesView.setFloat64(16, version, true); | ||
export function getLastTxnId() { | ||
return keyBytesView.getUint32(32, true); | ||
} | ||
const KEY_BUFFER_SIZE = 4096; | ||
@@ -396,0 +403,0 @@ function allocateFixedBuffer() { |
{ | ||
"name": "lmdb", | ||
"author": "Kris Zyp", | ||
"version": "2.5.3", | ||
"version": "2.6.0-alpha1", | ||
"description": "Simple, efficient, scalable, high-performance LMDB interface", | ||
@@ -49,2 +49,5 @@ "license": "MIT", | ||
}, | ||
"bin": { | ||
"download-lmdb-prebuilds": "./bin/download-prebuilds.js download" | ||
}, | ||
"scripts": { | ||
@@ -56,9 +59,10 @@ "install": "node-gyp-build-optional-packages", | ||
"prepare": "rollup -c", | ||
"before-publish": "rollup -c && prebuildify-ci download && prebuildify-platform-packages --target 16.14.0 && prebuildify-platform-packages --target 14.19.1 && prebuildify-platform-packages --target 17.9.0 && prebuildify-platform-packages --target 18.0.0 && set ENABLE_V8_FUNCTIONS=false&& prebuildify-platform-packages --napi --platform-packages --target 16.14.0 && node util/set-optional-deps.cjs && npm run test", | ||
"prebuild-libc-expanded": "prebuildify-platform-packages --tag-libc --target 16.14.0 && prebuildify-platform-packages --tag-libc --target 14.19.1 && prebuildify-platform-packages --tag-libc --target 17.9.0 && prebuildify-platform-packages --tag-libc --target 18.0.0 && ENABLE_V8_FUNCTIONS=false prebuildify-platform-packages --tag-libc --napi --platform-packages --target 16.14.0", | ||
"prebuild-libc": "prebuildify-platform-packages --tag-libc --target 17.9.0 && prebuildify-platform-packages --tag-libc --target 18.0.0 && prebuildify-platform-packages --napi --platform-packages --tag-libc --target 16.14.2", | ||
"prebuildify": "prebuildify-platform-packages --target 16.14.2", | ||
"full-publish": "cd prebuilds/win32-x64 && npm publish --access public && cd ../darwin-x64 && npm publish --access public && cd ../darwin-arm64 && npm publish --access public && cd ../linux-x64 && npm publish --access public && cd ../linux-arm64 && npm publish --access public && cd ../linux-arm && npm publish --access public && cd ../.. && npm publish", | ||
"before-publish": "rollup -c && prebuildify-ci download && prebuildify-platform-packages --target 16.14.0 && prebuildify-platform-packages --target 14.19.1 && prebuildify-platform-packages --target 18.0.0 && set ENABLE_V8_FUNCTIONS=false&& prebuildify-platform-packages --napi --platform-packages --target 16.14.0 && node util/set-optional-deps.cjs && npm run test", | ||
"prebuild-libc-musl": "ENABLE_V8_FUNCTIONS=false prebuildify-platform-packages --tag-libc --napi --platform-packages --target 16.14.2", | ||
"prebuild-libc": "prebuildify-platform-packages --tag-libc --target 18.0.0 && prebuildify-platform-packages --napi --platform-packages --tag-libc --target 16.14.2", | ||
"prebuild-libc-arm7": "prebuildify-platform-packages --napi --platform-packages --tag-libc --target 16.14.2", | ||
"prebuildify": "prebuildify-platform-packages --target 18.6.0", | ||
"full-publish": "cd prebuilds/win32-x64 && npm publish --tag alpha --access public && cd ../darwin-x64 && npm publish --tag alpha --access public && cd ../darwin-arm64 && npm publish --tag alpha --access public && cd ../linux-x64 && npm publish --tag alpha --access public && cd ../linux-arm64 && npm publish --tag alpha --access public && cd ../linux-arm && npm publish --tag alpha --access public && cd ../.. && npm publish --tag alpha", | ||
"recompile": "node-gyp clean && node-gyp configure && node-gyp build", | ||
"test": "mocha test/**.test.js --recursive && npm run test:types", | ||
"test": "mocha test/**.test.js --expose-gc --recursive && npm run test:types", | ||
"deno-test": "deno run --allow-ffi --allow-write --allow-read --allow-env --allow-net --unstable test/deno.ts", | ||
@@ -97,9 +101,9 @@ "test2": "mocha test/performance.js -u tdd", | ||
"optionalDependencies": { | ||
"@lmdb/lmdb-darwin-arm64": "2.5.3", | ||
"@lmdb/lmdb-darwin-x64": "2.5.3", | ||
"@lmdb/lmdb-linux-arm": "2.5.3", | ||
"@lmdb/lmdb-linux-arm64": "2.5.3", | ||
"@lmdb/lmdb-linux-x64": "2.5.3", | ||
"@lmdb/lmdb-win32-x64": "2.5.3" | ||
"@lmdb/lmdb-darwin-arm64": "2.6.0-alpha1", | ||
"@lmdb/lmdb-darwin-x64": "2.6.0-alpha1", | ||
"@lmdb/lmdb-linux-arm": "2.6.0-alpha1", | ||
"@lmdb/lmdb-linux-arm64": "2.6.0-alpha1", | ||
"@lmdb/lmdb-linux-x64": "2.6.0-alpha1", | ||
"@lmdb/lmdb-win32-x64": "2.6.0-alpha1" | ||
} | ||
} | ||
} |
181
read.js
import { RangeIterable } from './util/RangeIterable.js'; | ||
import { getAddress, Cursor, Txn, orderedBinary, lmdbError, getByBinary, detachBuffer, setGlobalBuffer, prefetch, iterate, position as doPosition, resetTxn, getCurrentValue, getCurrentShared, getStringByBinary, getSharedByBinary } from './native.js'; | ||
import { getAddress, Cursor, Txn, orderedBinary, lmdbError, getByBinary, detachBuffer, setGlobalBuffer, prefetch, iterate, position as doPosition, resetTxn, getCurrentValue, getCurrentShared, getStringByBinary, globalBuffer, getSharedBuffer } from './native.js'; | ||
import { saveKey } from './keys.js'; | ||
@@ -7,11 +7,17 @@ const ITERATOR_DONE = { done: true, value: undefined }; | ||
const Uint8A = typeof Buffer != 'undefined' ? Buffer.allocUnsafeSlow : Uint8Array | ||
let getValueBytes = makeReusableBuffer(0); | ||
let getValueBytes = globalBuffer; | ||
if (!getValueBytes.maxLength) { | ||
getValueBytes.maxLength = getValueBytes.length; | ||
Object.defineProperty(getValueBytes, 'length', { value: getValueBytes.length, writable: true, configurable: true }); | ||
} | ||
const START_ADDRESS_POSITION = 4064; | ||
const NEW_BUFFER_THRESHOLD = 0x8000; | ||
export const UNMODIFIED = {}; | ||
export function addReadMethods(LMDBStore, { | ||
maxKeySize, env, keyBytes, keyBytesView, getLastVersion | ||
maxKeySize, env, keyBytes, keyBytesView, getLastVersion, getLastTxnId | ||
}) { | ||
let readTxn, readTxnRenewed, asSafeBuffer = false; | ||
let renewId = 1; | ||
let mmaps = []; | ||
Object.assign(LMDBStore.prototype, { | ||
@@ -30,8 +36,15 @@ getString(id) { | ||
}, | ||
getBinaryFast(id) { | ||
(env.writeTxn || (readTxnRenewed ? readTxn : renewReadTxn(this))); | ||
let rc = this.lastSize = getByBinary(this.dbAddress, this.writeKey(id, keyBytes, 0)); | ||
getBinaryFast(id, options) { | ||
let rc | ||
if (options?.txn?.address) | ||
rc = this.lastSize = getByBinary(this.dbAddress, this.writeKey(id, keyBytes, 0), options.ifNotTxnId || 0, options.txn.address); | ||
else { | ||
(env.writeTxn || (readTxnRenewed ? readTxn : renewReadTxn(this))); | ||
rc = this.lastSize = getByBinary(this.dbAddress, this.writeKey(id, keyBytes, 0), options?.ifNotTxnId || 0, 0); | ||
} | ||
if (rc < 0) { | ||
if (rc == -30798) // MDB_NOTFOUND | ||
return; // undefined | ||
if (rc == -30004) // txn id matched | ||
return UNMODIFIED; | ||
if (rc == -30781 /*MDB_BAD_VALSIZE*/ && this.writeKey(id, keyBytes, 0) == 0) | ||
@@ -43,3 +56,7 @@ throw new Error(id === undefined ? | ||
rc = this.lastSize = keyBytesView.getUint32(0, true); | ||
else | ||
else if (rc == -30001) {// shared buffer | ||
this.lastSize = keyBytesView.getUint32(0, true); | ||
let bufferId = keyBytesView.getUint32(4, true); | ||
return getMMapBuffer(bufferId, this.lastSize); | ||
} else | ||
throw lmdbError(rc); | ||
@@ -52,4 +69,3 @@ } | ||
return this._returnLargeBuffer( | ||
() => getByBinary(this.dbAddress, this.writeKey(id, keyBytes, 0)), | ||
() => getSharedByBinary(this.dbAddress, this.writeKey(id, keyBytes, 0))); | ||
() => getByBinary(this.dbAddress, this.writeKey(id, keyBytes, 0), 0, 0)); | ||
} | ||
@@ -59,3 +75,17 @@ bytes.length = this.lastSize; | ||
}, | ||
_returnLargeBuffer(getFast, getShared) { | ||
getBFAsync(id, callback, options) { | ||
saveKey(id, writeKey, {}) | ||
}, | ||
retainBinary(buffer) { | ||
if (!buffer) | ||
return | ||
if (!buffer.isGlobal && !env.writeTxn) { | ||
buffer.txn = readTxn; | ||
readTxn.refCount = (readTxn.refCount || 0) + 1; | ||
return buffer; | ||
} else { | ||
return Uint8ArraySlice.call(buffer, 0, this.lastSize); | ||
} | ||
}, | ||
_returnLargeBuffer(getFast) { | ||
let bytes; | ||
@@ -91,8 +121,2 @@ let compression = this.compression; | ||
} | ||
if (this.lastSize > NEW_BUFFER_THRESHOLD && !compression) { | ||
// for large binary objects, cheaper to make a buffer that directly points at the shared LMDB memory to avoid copying a large amount of memory, but only for large data since there is significant overhead to instantiating the buffer | ||
if (globalThis.__lmdb_last_shared__ && detachBuffer) // we have to detach the last one, or else could crash due to two buffers pointing at same location | ||
detachBuffer(globalThis.__lmdb_last_shared__.buffer); | ||
return globalThis.__lmdb_last_shared__ = getShared(); | ||
} | ||
// grow our shared/static buffer to accomodate the size of the data | ||
@@ -120,6 +144,7 @@ bytes = this._allocateGetBuffer(this.lastSize); | ||
} else { | ||
console.log('should not get here', newLength) | ||
bytes = makeReusableBuffer(newLength); | ||
setGlobalBuffer(getValueBytes = bytes); | ||
} | ||
bytes.isShared = true; | ||
bytes.isGlobal = true; | ||
return bytes; | ||
@@ -131,3 +156,3 @@ }, | ||
let fastBuffer = this.getBinaryFast(id); | ||
return fastBuffer && (fastBuffer.isShared ? Uint8ArraySlice.call(fastBuffer, 0, this.lastSize) : fastBuffer); | ||
return fastBuffer && (fastBuffer.isGlobal ? Uint8ArraySlice.call(fastBuffer, 0, this.lastSize) : fastBuffer); | ||
} finally { | ||
@@ -137,14 +162,24 @@ asSafeBuffer = false; | ||
}, | ||
get(id) { | ||
getSharedBinary(id) { | ||
let fastBuffer = this.getBinaryFast(id); | ||
if (fastBuffer) { | ||
if (fastBuffer.isGlobal || writeTxn) | ||
return Uint8ArraySlice.call(fastBuffer, 0, this.lastSize) | ||
fastBuffer.txn = readTxn; | ||
readTxn.refCount = (readTxn.refCount || 0) + 1; | ||
return fastBuffer; | ||
} | ||
}, | ||
get(id, options) { | ||
if (this.decoderCopies) { | ||
// the decoder copies any data, so we can use the fast binary retrieval that overwrites the same buffer space | ||
let bytes = this.getBinaryFast(id); | ||
return bytes && this.decoder.decode(bytes); | ||
let bytes = this.getBinaryFast(id, options); | ||
return bytes && (bytes == UNMODIFIED ? UNMODIFIED : this.decoder.decode(bytes)); | ||
} | ||
if (this.encoding == 'binary') | ||
return this.getBinary(id); | ||
return this.getBinary(id, options); | ||
if (this.decoder) { | ||
// the decoder potentially uses the data from the buffer in the future and needs a stable buffer | ||
let bytes = this.getBinary(id); | ||
return bytes && this.decoder.decode(bytes); | ||
return bytes && (bytes == UNMODIFIED ? UNMODIFIED : this.decoder.decode(bytes)); | ||
} | ||
@@ -179,4 +214,7 @@ | ||
_commitReadTxn() { | ||
if (readTxn) | ||
if (readTxn) { | ||
readTxn.isCommitted = true | ||
readTxn.commit(); | ||
} | ||
lastReadTxnRef = null; | ||
readTxnRenewed = null; | ||
@@ -264,3 +302,5 @@ readTxn = null; | ||
let flags = (includeValues ? 0x100 : 0) | (reverse ? 0x400 : 0) | | ||
(valuesForKey ? 0x800 : 0) | (options.exactMatch ? 0x4000 : 0); | ||
(valuesForKey ? 0x800 : 0) | (options.exactMatch ? 0x4000 : 0) | | ||
(options.inclusiveEnd ? 0x8000 : 0) | | ||
(options.exclusiveStart ? 0x10000 : 0); | ||
let store = this; | ||
@@ -283,6 +323,6 @@ function resetCursor() { | ||
cursorAddress = cursor.address; | ||
txn.cursorCount = (txn.cursorCount || 0) + 1; // track transaction so we always use the same one | ||
txn.refCount = (txn.refCount || 0) + 1; // track transaction so we always use the same one | ||
if (snapshot === false) { | ||
cursorRenewId = renewId; // use shared read transaction | ||
txn.renewingCursorCount = (txn.renewingCursorCount || 0) + 1; // need to know how many are renewing cursors | ||
txn.renewingrefCount = (txn.renewingrefCount || 0) + 1; // need to know how many are renewing cursors | ||
} | ||
@@ -339,4 +379,4 @@ } catch(error) { | ||
if (cursorRenewId) | ||
txn.renewingCursorCount--; | ||
if (--txn.cursorCount <= 0 && txn.onlyCursor) { | ||
txn.renewingrefCount--; | ||
if (--txn.refCount <= 0 && txn.onlyCursor) { | ||
cursor.close(); | ||
@@ -368,3 +408,3 @@ txn.abort(); // this is no longer main read txn, abort it now that we are done | ||
if (count < 0) | ||
lmdbError(count); | ||
lmdbError(count); | ||
finishCursor(); | ||
@@ -384,20 +424,23 @@ return ITERATOR_DONE; | ||
lastSize = keyBytesView.getUint32(0, true); | ||
let bytes = compression ? compression.getValueBytes : getValueBytes; | ||
if (lastSize > bytes.maxLength) { | ||
store.lastSize = lastSize; | ||
asSafeBuffer = store.encoding == 'binary'; | ||
try { | ||
bytes = store._returnLargeBuffer( | ||
() => getCurrentValue(cursorAddress), | ||
() => getCurrentShared(cursorAddress) | ||
); | ||
} finally { | ||
asSafeBuffer = false; | ||
} | ||
} else | ||
bytes.length = lastSize; | ||
let bufferId = keyBytesView.getUint32(4, true); | ||
let bytes; | ||
if (bufferId) { | ||
bytes = getMMapBuffer(bufferId, lastSize); | ||
} else { | ||
bytes = compression ? compression.getValueBytes : getValueBytes; | ||
if (lastSize > bytes.maxLength) { | ||
store.lastSize = lastSize; | ||
asSafeBuffer = store.encoding == 'binary'; | ||
try { | ||
bytes = store._returnLargeBuffer(() => getCurrentValue(cursorAddress)); | ||
} finally { | ||
asSafeBuffer = false; | ||
} | ||
} else | ||
bytes.length = lastSize; | ||
} | ||
if (store.decoder) { | ||
value = store.decoder.decode(bytes, lastSize); | ||
} else if (store.encoding == 'binary') | ||
value = bytes.isShared ? Uint8ArraySlice.call(bytes, 0, lastSize) : bytes; | ||
value = bytes.isGlobal ? Uint8ArraySlice.call(bytes, 0, lastSize) : bytes; | ||
else { | ||
@@ -526,2 +569,6 @@ value = bytes.toString('utf8', 0, lastSize); | ||
}, | ||
useReadTxn() { | ||
let txn = readTxnRenewed ? readTxn : renewReadTxn(this); | ||
txn.refCount = (txn.refCount || 0) + 1; | ||
}, | ||
close(callback) { | ||
@@ -544,6 +591,6 @@ this.status = 'closing'; | ||
const doClose = () => { | ||
this.db.close(); | ||
if (this.isRoot) { | ||
if (this.isRoot) | ||
env.close(); | ||
} | ||
else | ||
this.db.close(); | ||
this.status = 'closed'; | ||
@@ -561,7 +608,20 @@ if (callback) | ||
getStats() { | ||
readTxnRenewed ? readTxn : renewReadTxn(this); | ||
return this.db.stat(); | ||
} | ||
(env.writeTxn || (readTxnRenewed ? readTxn : renewReadTxn(this))); | ||
let dbStats = this.db.stat(); | ||
dbStats.root = env.stat(); | ||
dbStats.env = env.info(); | ||
dbStats.free = env.freeStat(); | ||
return dbStats; | ||
}, | ||
}); | ||
let get = LMDBStore.prototype.get; | ||
let lastReadTxnRef; | ||
function getMMapBuffer(bufferId, size) { | ||
let buffer = mmaps[bufferId]; | ||
if (!buffer) { | ||
buffer = mmaps[bufferId] = getSharedBuffer(bufferId, env.address); | ||
} | ||
let offset = keyBytesView.getUint32(8, true); | ||
return new Uint8Array(buffer, offset, size); | ||
} | ||
function renewReadTxn(store) { | ||
@@ -573,3 +633,9 @@ if (!readTxn) { | ||
try { | ||
readTxn = new Txn(env, 0x20000); | ||
let lastReadTxn = lastReadTxnRef && lastReadTxnRef.deref(); | ||
readTxn = new Txn(env, 0x20000, lastReadTxn && !lastReadTxn.isDone && lastReadTxn); | ||
if (readTxn.address == 0) { | ||
readTxn = lastReadTxn; | ||
if (readTxn.onlyCursor) | ||
readTxn.onlyCursor = false; | ||
} | ||
break; | ||
@@ -596,4 +662,5 @@ } catch (error) { | ||
readTxnRenewed = null; | ||
if (readTxn.cursorCount - (readTxn.renewingCursorCount || 0) > 0) { | ||
if (readTxn.refCount - (readTxn.renewingrefCount || 0) > 0) { | ||
readTxn.onlyCursor = true; | ||
lastReadTxnRef = new WeakRef(readTxn); | ||
readTxn = null; | ||
@@ -611,1 +678,9 @@ } else | ||
} | ||
Txn.prototype.done = function() { | ||
this.refCount--; | ||
if (this.refCount == 0 && this.onlyCursor) { | ||
this.abort(); | ||
this.isDone = true; | ||
} | ||
} |
@@ -28,3 +28,3 @@ [![license](https://img.shields.io/badge/license-MIT-brightgreen)](LICENSE) | ||
``` | ||
`lmdb-js` is based on the Node-API for maximum compatility across all supported Node versions and futue Deno versions. It also includes accelerated, high-speed functions for direct V8 interaction that are compiled for, and (automatically) loaded in Node v16. The standard Node-API based functions are used in all other versions and still provide excellent performance, but for absolute maximum performance on older versions of Node, you can use `npm install --build-from-source`. | ||
`lmdb-js` is based on the Node-API for maximum compatility across all supported Node versions and future Deno versions. It also includes accelerated, high-speed functions for direct V8 interaction that are compiled for, and (automatically) loaded in Node v16. The standard Node-API based functions are used in all other versions and still provide excellent performance, but for absolute maximum performance on older versions of Node, you can use `npm install --build-from-source`. | ||
@@ -90,2 +90,4 @@ In Deno, this package could be directly used from the [deno.land `lmdb` module](https://deno.land/x/lmdb/mod.ts), but Node-API support is currently in-progress, so probably will require Deno v1.24+ (for older versions of Deno, you can use `lmdb-js` v2.2.x). | ||
Symbol.for('even symbols') | ||
false | ||
true | ||
-10 // negative supported | ||
@@ -156,3 +158,3 @@ -1.1 // decimals supported | ||
Also, the callback function can be an async function (or return a promise), but this is not recommended. If the function returns a promise, this will delay/defer the commit until the callback's promise is resolved. However, while waiting for the callback to finish, other code may execute operations that would end up in the current transaction and may result in a surprising order of operations, and long running transactions are generally discouraged since they extend the single write lock. | ||
Also, the callback function can be an async function (or return a promise), but this is not recommended. If the function returns a promise, this will delay/defer the commit until the callback's promise is resolved. However, while waiting for the callback to finish, other code may execute operations that would end up in the current transaction and may result in a surprising order of operations, and long running transactions are generally discouraged since they extend the single write lock. | ||
@@ -505,7 +507,11 @@ ### `db.childTransaction(callback: Function): Promise` | ||
A few LMDB options are available at build time, and can be specified with options with `npm install` (which can be specified in your package.json install script): | ||
`npm install --use_robust=true`: This will enable LMDB's MDB_USE_ROBUST option, which uses robust semaphores/mutexes so that if you are using multiple processes, and one process dies in the middle of transaction, the OS will cleanup the semaphore/mutex, aborting the transaction and allowing other processes to run without hanging. There is a slight performance overhead, but this is recommended if you will be using multiple processes. | ||
`npm install lmdb --build-from-source --use_robust=false`: This will disable LMDB's MDB_USE_ROBUST option, which uses robust semaphores/mutexes so that if you are using multiple processes, and one process dies in the middle of transaction, the OS will cleanup the semaphore/mutex, aborting the transaction and allowing other processes to run without hanging. There is a slight performance overhead to robust mutexes, but keeping this enabled is recommended if you will be using multiple processes. | ||
On MacOS, there is a default limit of 10 robust locked semaphores, which imposes a limit on the number of open write transactions (if you have over 10 database environments with a write transaction). If you need more concurrent write transactions, you can increase your maximum undoable semaphore count by setting kern.sysv.semmnu on your local computer. Otherwise don't use the robust mutex option. You can also try to minimize overlapping transactions and/or reduce the number of database environments (and use more databases within each environment). | ||
On MacOS, there is a default limit of 10 robust locked semaphores, which imposes a limit on the number of open write transactions (if you have over 10 database environments with a write transaction). If you need more concurrent write transactions, you can increase your maximum undoable semaphore count with: | ||
``` | ||
sudo sysctl kern.sysv.semume=50 | ||
``` | ||
Otherwise you may need to disable the robust mutex option. You can also try to minimize overlapping transactions and/or reduce the number of database environments (and use more databases within each environment). | ||
`npm install --use_data_v1=true`: This will build from an older version of LMDB that uses the legacy data format version 1 (the latest LMDB uses data format version 2). For portability of the data format, this may be preferable since many libraries still use older versions of LMDB. Since this is an older version of LMDB, some features may not be available, including encryption and remapping. | ||
`npm install lmdb --build-from-source --use_data_v1=true`: This will build from an older version of LMDB that uses the legacy data format version 1 (the latest LMDB uses data format version 2). For portability of the data format, this may be preferable since many libraries still use older versions of LMDB. Since this is an older version of LMDB, some features may not be available, including encryption and remapping. | ||
@@ -512,0 +518,0 @@ #### Turbo Mode |
@@ -121,2 +121,3 @@ const SKIP = {}; | ||
let iterable = this; | ||
Object.defineProperty(array, 'iterable', { value: iterable }); | ||
function next(result) { | ||
@@ -131,3 +132,2 @@ while (result.done !== true) { | ||
} | ||
array.iterable = iterable; | ||
resolve(iterable._asArray = array); | ||
@@ -134,0 +134,0 @@ } |
36
write.js
@@ -16,2 +16,4 @@ import { getAddress, write, compress } from './native.js'; | ||
const HAS_TXN = 8; | ||
const CONDITIONAL_VERSION_LESS_THAN = 0x800; | ||
const CONDITIONAL_ALLOW_NOTFOUND = 0x800; | ||
@@ -79,10 +81,13 @@ const SYNC_PROMISE_SUCCESS = Promise.resolve(true); | ||
var lastPromisedResolution = uncommittedResolution; | ||
let lastValue, valueBuffer; | ||
function writeInstructions(flags, store, key, value, version, ifVersion) { | ||
let writeStatus; | ||
let targetBytes, position, encoder; | ||
let valueBuffer, valueSize, valueBufferStart; | ||
let valueSize, valueBufferStart; | ||
if (flags & 2) { | ||
// encode first in case we have to write a shared structure | ||
encoder = store.encoder; | ||
if (value && value['\x10binary-data\x02']) | ||
if (typeof value !== 'object' && value && value === lastValue && !store.compression) { | ||
// reuse last valueBuffer | ||
} else if (value && value['\x10binary-data\x02']) | ||
valueBuffer = value['\x10binary-data\x02']; | ||
@@ -103,2 +108,3 @@ else if (encoder) { | ||
throw new Error('Invalid value to put in database ' + value + ' (' + (typeof value) +'), consider using encoder'); | ||
lastValue = (writeTxn || store.compression) ? null : value; // can't reuse values from write txns because we reset the buffer | ||
valueBufferStart = valueBuffer.start; | ||
@@ -458,3 +464,3 @@ if (valueBufferStart > -1) // if we have buffers with start/end position | ||
function resolveCommit(async) { | ||
afterCommit(); | ||
afterCommit(txnResolution.uint32[txnResolution.flagPosition + 1]); | ||
if (async) | ||
@@ -507,5 +513,5 @@ resetReadTxn(); | ||
} | ||
function afterCommit() { | ||
function afterCommit(txnId) { | ||
for (let i = 0, l = afterCommitCallbacks.length; i < l; i++) { | ||
afterCommitCallbacks[i]({ next: uncommittedResolution, last: unwrittenResolution}); | ||
afterCommitCallbacks[i]({ next: uncommittedResolution, last: unwrittenResolution, txnId }); | ||
} | ||
@@ -584,3 +590,3 @@ } | ||
// close them. | ||
if (writeTxn.cursorCount > 0) | ||
if (writeTxn.refCount > 0) | ||
writeTxn.isDone = true; | ||
@@ -633,7 +639,6 @@ env.writeTxn = writeTxn = parentTxn || null; | ||
}, | ||
ifVersion(key, version, callback) { | ||
ifVersion(key, version, callback, options) { | ||
if (!callback) { | ||
return new Batch((operations, callback) => { | ||
let promise = this.ifVersion(key, version, operations); | ||
let promise = this.ifVersion(key, version, operations, options); | ||
if (callback) | ||
@@ -651,3 +656,8 @@ promise.then(callback); | ||
} | ||
let finishStartWrite = writeInstructions(key === undefined || version === undefined ? 1 : 4, this, key, undefined, undefined, version); | ||
let flags = key === undefined || version === undefined ? 1 : 4; | ||
if (options?.ifLessThan) | ||
flags |= CONDITIONAL_VERSION_LESS_THAN; | ||
if (options?.allowNotFound) | ||
flags |= CONDITIONAL_ALLOW_NOTFOUND; | ||
let finishStartWrite = writeInstructions(flags, this, key, undefined, undefined, version); | ||
let promise; | ||
@@ -788,6 +798,6 @@ batchDepth += 2; | ||
let callbackDone; | ||
this.transactions++; | ||
env.beginTxn(flags == undefined ? 3 : flags); | ||
writeTxn = env.writeTxn = { write: true }; | ||
try { | ||
this.transactions++; | ||
env.beginTxn(flags == undefined ? 3 : flags); | ||
writeTxn = env.writeTxn = { write: true }; | ||
this.emit('begin-transaction'); | ||
@@ -794,0 +804,0 @@ return when(callback(), (result) => { |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
5485
543
2275760
118
1