Socket
Socket
Sign inDemoInstall

lmdb

Package Overview
Dependencies
Maintainers
3
Versions
171
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

lmdb - npm Package Compare versions

Comparing version 2.1.7 to 2.2.0-beta1

4

caching.js

@@ -1,2 +0,2 @@

import { WeakLRUCache } from './external.js';
import { WeakLRUCache, clearKeptObjects } from './external.js';
import { FAILED_CONDITION } from './write.js';

@@ -28,2 +28,4 @@ let getLastVersion;

this.db.cachingDb = this;
if (options.cache.clearKeptInterval)
options.cache.clearKeptObjects = clearKeptObjects;
this.cache = new WeakLRUCache(options.cache);

@@ -30,0 +32,0 @@ }

@@ -12,2 +12,5 @@ /*export { toBufferKey as keyValueToBuffer, compareKeys, compareKeys as compareKey, fromBufferKey as bufferToKeyValue } */

import { EventEmitter } from 'https://deno.land/std/node/events.ts'
setExternals({ orderedBinary, MsgpackrEncoder, WeakLRUCache, arch, path, EventEmitter, fs: Deno });
let os: string = Deno.build.os
if (os == 'windows')
os = 'win32'
setExternals({ orderedBinary, MsgpackrEncoder, WeakLRUCache, arch, path, EventEmitter, fs: Deno, os });
export let Env, Compression, Cursor, getAddress, clearKeptObjects, setGlobalBuffer,
require, arch, fs, lmdbError, path, EventEmitter, orderedBinary, MsgpackrEncoder, WeakLRUCache;
require, arch, fs, os, tmpdir, lmdbError, path, EventEmitter, orderedBinary, MsgpackrEncoder, WeakLRUCache;
export function setNativeFunctions(externals) {

@@ -7,16 +7,20 @@ Env = externals.Env;

getAddress = externals.getAddress;
clearKeptObjects = externals.clearKeptObjects;
setGlobalBuffer = externals.setGlobalBuffer;
Cursor = externals.Cursor;
lmdbError = externals.lmdbError;
clearKeptObjects = externals.clearKeptObjects;
setGlobalBuffer = externals.setGlobalBuffer;
Cursor = externals.Cursor;
lmdbError = externals.lmdbError;
if (externals.tmpdir)
tmpdir = externals.tmpdir
}
export function setExternals(externals) {
require = externals.require;
arch = externals.arch;
fs = externals.fs;
path = externals.path;
EventEmitter = externals.EventEmitter;
orderedBinary = externals.orderedBinary;
MsgpackrEncoder = externals.MsgpackrEncoder;
WeakLRUCache = externals.WeakLRUCache;
require = externals.require;
arch = externals.arch;
fs = externals.fs;
path = externals.path;
EventEmitter = externals.EventEmitter;
orderedBinary = externals.orderedBinary;
MsgpackrEncoder = externals.MsgpackrEncoder;
WeakLRUCache = externals.WeakLRUCache;
tmpdir = externals.tmpdir;
os = externals.os;
}

@@ -47,3 +47,3 @@ declare namespace lmdb {

**/
getMany(ids: K[], callback?: (error: any, values: V[]) => any): Promise<V[]>
getMany(ids: K[], callback?: (error: any, values: V[]) => any): Promise<(V | undefined)[]>

@@ -239,2 +239,6 @@ /**

clearSync(): void
/** A promise-like object that resolves when previous writes have been committed. */
committed: Promise<boolean>
/** A promise-like object that resolves when previous writes have been committed and fully flushed/synced to disk/storage. */
flushed: Promise<boolean>
/**

@@ -282,3 +286,3 @@ * Check the reader locks and remove any stale reader locks. Returns the number of stale locks that were removed.

type Key = Key[] | string | symbol | number | boolean | Buffer;
type Key = Key[] | string | symbol | number | boolean | Uint8Array;

@@ -285,0 +289,0 @@ interface DatabaseOptions {

@@ -9,78 +9,79 @@ import { fileURLToPath } from './deps.ts';

if (!libPath || !exists(libPath)) {
//console.log({ libPath }, 'does not exist')
libPath = (Deno.env.get('LMDB_LIB_PATH') || (tmpdir() + '/lmdb-js-' + (version || '') + '.lib')) as string;
const ARCH = { x86_64: 'x64', aarch64: 'arm64' }
if (!exists(libPath)) {
let os: string = Deno.build.os;
os = os == 'windows' ? 'win32' : os;
os += '-' + ARCH[Deno.build.arch];
let libraryUrl = 'https://cdn.jsdelivr.net/npm/lmdb@' + (version || 'latest') +
'/prebuilds/' + os + '/node.abi93' + (os == 'win32' ? '' : '.glibc') + '.node';
console.log('Download', libraryUrl);
let response = await fetch(libraryUrl);
if (response.status == 200) {
let binaryLibraryBuffer = await response.arrayBuffer();
Deno.writeFileSync(libPath, new Uint8Array(binaryLibraryBuffer));
} else {
throw new Error('Unable to fetch ' + libraryUrl + ', HTTP response: ' + response.status);
}
}
//console.log({ libPath }, 'does not exist')
libPath = (Deno.env.get('LMDB_LIB_PATH') || (tmpdir() + '/lmdb-js-' + (version || '') + '.lib')) as string;
const ARCH = { x86_64: 'x64', aarch64: 'arm64' }
if (!exists(libPath)) {
let os: string = Deno.build.os;
os = os == 'windows' ? 'win32' : os;
os += '-' + ARCH[Deno.build.arch];
let libraryUrl = 'https://cdn.jsdelivr.net/npm/lmdb@' + (version || 'latest') +
'/prebuilds/' + os + '/node.abi93' + (os == 'win32' ? '' : '.glibc') + '.node';
console.log('Download', libraryUrl);
let response = await fetch(libraryUrl);
if (response.status == 200) {
let binaryLibraryBuffer = await response.arrayBuffer();
Deno.writeFileSync(libPath, new Uint8Array(binaryLibraryBuffer));
} else {
throw new Error('Unable to fetch ' + libraryUrl + ', HTTP response: ' + response.status);
}
}
}
let lmdbLib = Deno.dlopen(libPath, {
// const char* path, char* keyBuffer, Compression* compression, int jsFlags, int flags, int maxDbs,
// int maxReaders, mdb_size_t mapSize, int pageSize, char* encryptionKey
// const char* path, char* keyBuffer, Compression* compression, int jsFlags, int flags, int maxDbs,
// int maxReaders, mdb_size_t mapSize, int pageSize, char* encryptionKey
envOpen: { parameters: ['u32', 'u32', 'pointer', 'pointer', 'f64', 'u32', 'u32', 'f64', 'u32', 'pointer'], result: 'i64'},
closeEnv: { parameters: ['f64'], result: 'void'},
freeData: { parameters: ['f64'], result: 'void'},
getAddress: { parameters: ['pointer'], result: 'usize'},
getMaxKeySize: { parameters: ['f64'], result: 'u32'},
openDbi: { parameters: ['f64', 'u32', 'pointer', 'u32', 'f64'], result: 'i64'},
getDbi: { parameters: ['f64'], result: 'u32'},
readerCheck: { parameters: ['f64'], result: 'i32'},
beginTxn: { parameters: ['f64', 'u32'], result: 'i64'},
resetTxn: { parameters: ['f64'], result: 'void'},
renewTxn: { parameters: ['f64'], result: 'i32'},
abortTxn: { parameters: ['f64'], result: 'void'},
commitTxn: { parameters: ['f64'], result: 'i32'},
commitEnvTxn: { parameters: ['f64'], result: 'i32'},
abortEnvTxn: { parameters: ['f64'], result: 'void'},
getError: { parameters: ['i32', 'pointer'], result: 'void'},
dbiGetByBinary: { parameters: ['f64', 'u32'], result: 'u32'},
openCursor: { parameters: ['f64'], result: 'i64'},
cursorRenew: { parameters: ['f64'], result: 'i32'},
cursorClose: { parameters: ['f64'], result: 'i32'},
cursorIterate: { parameters: ['f64'], result: 'i32'},
cursorPosition: { parameters: ['f64', 'u32', 'u32', 'u32', 'f64'], result: 'i32'},
cursorCurrentValue: { parameters: ['f64'], result: 'i32'},
startWriting: { parameters: ['f64', 'f64'], nonblocking: true, result: 'i32'},
compress: { parameters: ['f64', 'f64'], nonblocking: true, result: 'void'},
envWrite: { parameters: ['f64', 'f64'], result: 'i32'},
setGlobalBuffer: { parameters: ['pointer', 'usize'], result: 'void'},
setCompressionBuffer: { parameters: ['f64', 'pointer', 'usize', 'u32'], result: 'void'},
newCompression: { parameters: ['pointer', 'usize', 'u32'], result: 'u64'},
prefetch: { parameters: ['f64', 'f64'], nonblocking: true, result: 'i32'},
closeEnv: { parameters: ['f64'], result: 'void'},
freeData: { parameters: ['f64'], result: 'void'},
getAddress: { parameters: ['pointer'], result: 'usize'},
getMaxKeySize: { parameters: ['f64'], result: 'u32'},
openDbi: { parameters: ['f64', 'u32', 'pointer', 'u32', 'f64'], result: 'i64'},
getDbi: { parameters: ['f64'], result: 'u32'},
readerCheck: { parameters: ['f64'], result: 'i32'},
beginTxn: { parameters: ['f64', 'u32'], result: 'i64'},
resetTxn: { parameters: ['f64'], result: 'void'},
renewTxn: { parameters: ['f64'], result: 'i32'},
abortTxn: { parameters: ['f64'], result: 'void'},
commitTxn: { parameters: ['f64'], result: 'i32'},
commitEnvTxn: { parameters: ['f64'], result: 'i32'},
abortEnvTxn: { parameters: ['f64'], result: 'void'},
getError: { parameters: ['i32', 'pointer'], result: 'void'},
dbiGetByBinary: { parameters: ['f64', 'u32'], result: 'u32'},
openCursor: { parameters: ['f64'], result: 'i64'},
cursorRenew: { parameters: ['f64'], result: 'i32'},
cursorClose: { parameters: ['f64'], result: 'i32'},
cursorIterate: { parameters: ['f64'], result: 'i32'},
cursorPosition: { parameters: ['f64', 'u32', 'u32', 'u32', 'f64'], result: 'i32'},
cursorCurrentValue: { parameters: ['f64'], result: 'i32'},
startWriting: { parameters: ['f64', 'f64'], nonblocking: true, result: 'i32'},
compress: { parameters: ['f64', 'f64'], nonblocking: true, result: 'void'},
envWrite: { parameters: ['f64', 'f64'], result: 'i32'},
setGlobalBuffer: { parameters: ['pointer', 'usize'], result: 'void'},
setCompressionBuffer: { parameters: ['f64', 'pointer', 'usize', 'u32'], result: 'void'},
newCompression: { parameters: ['pointer', 'usize', 'u32'], result: 'u64'},
prefetch: { parameters: ['f64', 'f64'], nonblocking: true, result: 'i32'},
envSync: { parameters: ['f64'], nonblocking: true, result: 'i32'},
});
let { envOpen, closeEnv, getAddress, freeData, getMaxKeySize, openDbi, getDbi, readerCheck,
commitEnvTxn, abortEnvTxn, beginTxn, resetTxn, renewTxn, abortTxn, commitTxn, dbiGetByBinary, startWriting, compress, envWrite, openCursor, cursorRenew, cursorClose, cursorIterate, cursorPosition, cursorCurrentValue, setGlobalBuffer: setGlobalBuffer2, setCompressionBuffer, getError, newCompression, prefetch } = lmdbLib.symbols;
commitEnvTxn, abortEnvTxn, beginTxn, resetTxn, renewTxn, abortTxn, commitTxn, dbiGetByBinary, startWriting, compress, envWrite, openCursor, cursorRenew, cursorClose, cursorIterate, cursorPosition, cursorCurrentValue, setGlobalBuffer: setGlobalBuffer2, setCompressionBuffer, getError, newCompression, prefetch,envSync } = lmdbLib.symbols;
let registry = new FinalizationRegistry<number>(address => {
// when an object is GC'ed, free it in C.
freeData(address);
// when an object is GC'ed, free it in C.
freeData(address);
});
class CBridge {
address: number;
constructor(address: number) {
this.address = address || 0;
if (address) {
registry.register(this, address);
}
}
address: number;
constructor(address: number) {
this.address = address || 0;
if (address) {
registry.register(this, address);
}
}
/* static addMethods(...methods: ) {
for (let method of methods) {
this.prototype[method] = function() {
return symbols[method](this.address, ...arguments);
};
}
}*/
for (let method of methods) {
this.prototype[method] = function() {
return symbols[method](this.address, ...arguments);
};
}
}*/
}

@@ -91,93 +92,103 @@ const textEncoder = new TextEncoder();

function checkError(rc: number): number {
if (rc && rc < MAX_ERROR) {
// TODO: Look up error and throw
lmdbError(rc);
}
return rc;
if (rc && rc < MAX_ERROR) {
// TODO: Look up error and throw
lmdbError(rc);
}
return rc;
}
function lmdbError(rc: number) {
getError(rc, keyBytes);
let message = textDecoder.decode(keyBytes.subarray(0, keyBytes.indexOf(0))) || ('Error code: ' + rc);
throw new Error(message);
getError(rc, keyBytes);
let message = textDecoder.decode(keyBytes.subarray(0, keyBytes.indexOf(0))) || ('Error code: ' + rc);
throw new Error(message);
}
let keyBytes: Uint8Array;
class Env extends CBridge {
open(options: any, flags: number, jsFlags: number) {
let rc = envOpen(flags, jsFlags, toCString(options.path), keyBytes = options.keyBytes, 0,
options.maxDbs || 12, options.maxReaders || 126, options.mapSize, options.pageSize, new Uint8Array(0)) as number;
this.address = checkError(rc);
registry.register(this, this.address);
return 0;
open(options: any, flags: number, jsFlags: number) {
let rc = envOpen(flags, jsFlags, toCString(options.path), keyBytes = options.keyBytes, 0,
options.maxDbs || 12, options.maxReaders || 126, options.mapSize, options.pageSize, new Uint8Array(0)) as number;
this.address = checkError(rc);
registry.register(this, this.address);
return 0;
}
openDbi(flags: number, name: string, keyType: number, compression: Compression) {
let rc: number = openDbi(this.address, flags, toCString(name), keyType, compression?.address || 0) as number;
if (rc == -30798) { // MDB_NOTFOUND
return;
}
return new Dbi(checkError(rc),
getDbi(rc) as number);
}
close() {
closeEnv(this.address);
}
getMaxKeySize() {
return getMaxKeySize(this.address);
}
readerCheck() {
return readerCheck(this.address);
}
beginTxn(flags: number) {
let rc: number = beginTxn(this.address, flags) as number;
return new Transaction(checkError(rc), flags);
}
commitTxn() {
checkError(commitEnvTxn(this.address) as number);
}
abortTxn() {
abortEnvTxn(this.address);
}
startWriting(instructions: number, callback: (value: number) => number) {
(startWriting(this.address, instructions) as Promise<number>).then(callback);
}
compress(compressionPointer: number, callback: (value: void) => void) {
return (compress(this.address, compressionPointer) as Promise<void>).then(callback);
}
write(instructions: number) {
return checkError(envWrite(this.address, instructions) as number);
}
sync(callback: Function) {
return envSync(this.address).then((result: number) => {
try {
checkError(result);
callback(null);
} catch(error) {
callback(error);
}
});
}
openDbi(flags: number, name: string, keyType: number, compression: Compression) {
let rc: number = openDbi(this.address, flags, toCString(name), keyType, compression?.address || 0) as number;
if (rc == -30798) { // MDB_NOTFOUND
return;
}
return new Dbi(checkError(rc),
getDbi(rc) as number);
}
close() {
closeEnv(this.address);
}
getMaxKeySize() {
return getMaxKeySize(this.address);
}
readerCheck() {
return readerCheck(this.address);
}
beginTxn(flags: number) {
let rc: number = beginTxn(this.address, flags) as number;
return new Transaction(checkError(rc), flags);
}
commitTxn() {
checkError(commitEnvTxn(this.address) as number);
}
abortTxn() {
abortEnvTxn(this.address);
}
startWriting(instructions: number, callback: (value: number) => number) {
(startWriting(this.address, instructions) as Promise<number>).then(callback);
}
compress(compressionPointer: number, callback: (value: void) => void) {
return (compress(this.address, compressionPointer) as Promise<void>).then(callback);
}
write(instructions: number) {
return checkError(envWrite(this.address, instructions) as number);
}
}
//Env.addMethods('startWriting', 'write', 'openDB');
class Dbi extends CBridge {
dbi: number;
constructor(address: number, dbi: number) {
super(address);
this.dbi = dbi;
}
getByBinary(keySize: number): number {
return dbiGetByBinary(this.address, keySize) as number;
}
prefetch(keys: number, callback: () => void): void {
(prefetch(this.address, keys) as Promise<number>).then(() => callback());
}
dbi: number;
constructor(address: number, dbi: number) {
super(address);
this.dbi = dbi;
}
getByBinary(keySize: number): number {
return dbiGetByBinary(this.address, keySize) as number;
}
prefetch(keys: number, callback: () => void): void {
(prefetch(this.address, keys) as Promise<number>).then(() => callback());
}
}
class Transaction extends CBridge {
flags: number;
constructor(address: number, flags: number) {
super(address);
this.flags = flags;
}
reset() {
resetTxn(this.address);
}
renew() {
let rc = renewTxn(this.address) as number;
if (rc)
lmdbError(rc);
}
abort() {
abortTxn(this.address);
}
commit() {
commitTxn(this.address);
}
flags: number;
constructor(address: number, flags: number) {
super(address);
this.flags = flags;
}
reset() {
resetTxn(this.address);
}
renew() {
let rc = renewTxn(this.address) as number;
if (rc)
lmdbError(rc);
}
abort() {
abortTxn(this.address);
}
commit() {
commitTxn(this.address);
}
}

@@ -187,38 +198,38 @@

class Compression extends CBridge {
constructor(options: { dictionary: Uint8Array, threshold: number }) {
let dictionary = options.dictionary || new Uint8Array(0);
super(newCompression(dictionary, dictionary.length, options.threshold || 1000) as number);
}
setBuffer(bytes: Uint8Array, dictLength: number) {
setCompressionBuffer(this.address, bytes, bytes.length, dictLength);
}
constructor(options: { dictionary: Uint8Array, threshold: number }) {
let dictionary = options.dictionary || new Uint8Array(0);
super(newCompression(dictionary, dictionary.length, options.threshold || 1000) as number);
}
setBuffer(bytes: Uint8Array, dictLength: number) {
setCompressionBuffer(this.address, bytes, bytes.length, dictLength);
}
}
class Cursor extends CBridge {
constructor(dbi: Dbi) {
super(openCursor(dbi.address) as number);
}
renew() {
cursorRenew(this.address);
}
position(flags: number, offset: number, keySize: number, endKeyAddress: number) {
return cursorPosition(this.address, flags, offset, keySize, endKeyAddress);
}
iterate() {
return cursorIterate(this.address);
}
getCurrentValue() {
return cursorCurrentValue(this.address);
}
close() {
return cursorClose(this.address);
}
constructor(dbi: Dbi) {
super(openCursor(dbi.address) as number);
}
renew() {
cursorRenew(this.address);
}
position(flags: number, offset: number, keySize: number, endKeyAddress: number) {
return cursorPosition(this.address, flags, offset, keySize, endKeyAddress);
}
iterate() {
return cursorIterate(this.address);
}
getCurrentValue() {
return cursorCurrentValue(this.address);
}
close() {
return cursorClose(this.address);
}
}
function toCString(str: string): Uint8Array {
return str == null ? new Uint8Array(0) : textEncoder.encode(str + '\x00');
return str == null ? new Uint8Array(0) : textEncoder.encode(str + '\x00');
}
function setGlobalBuffer(buffer: Uint8Array) {
setGlobalBuffer2(buffer, buffer.length);
setGlobalBuffer2(buffer, buffer.length);
}
setNativeFunctions({ Env, Compression, Cursor, getAddress, lmdbError, setGlobalBuffer });
setNativeFunctions({ Env, Compression, Cursor, getAddress, tmpdir, lmdbError, setGlobalBuffer });
export const { toBufferKey: keyValueToBuffer, compareKeys, compareKeys: compareKey, fromBufferKey: bufferToKeyValue } = orderedBinary;

@@ -231,34 +242,34 @@ export { ABORT, asBinary, IF_EXISTS } from './write.js';

function tmpdir(): string | null {
/* This follows the node js implementation, but has a few
differences:
* On windows, if none of the environment variables are defined,
we return null.
* On unix we use a plain Deno.env.get, instead of safeGetenv,
which special cases setuid binaries.
* Node removes a single trailing / or \, we remove all.
*/
if (Deno.build.os == 'windows') {
const temp = Deno.env.get("TEMP") || Deno.env.get("TMP");
if (temp) {
return temp.replace(/(?<!:)[/\\]*$/, "");
}
const base = Deno.env.get("SYSTEMROOT") || Deno.env.get("WINDIR");
if (base) {
return base + "\\temp";
}
return null;
} else { // !isWindows
const temp = Deno.env.get("TMPDIR") || Deno.env.get("TMP") ||
Deno.env.get("TEMP") || "/tmp";
return temp.replace(/(?<!^)\/*$/, "");
}
/* This follows the node js implementation, but has a few
differences:
* On windows, if none of the environment variables are defined,
we return null.
* On unix we use a plain Deno.env.get, instead of safeGetenv,
which special cases setuid binaries.
* Node removes a single trailing / or \, we remove all.
*/
if (Deno.build.os == 'windows') {
const temp = Deno.env.get("TEMP") || Deno.env.get("TMP");
if (temp) {
return temp.replace(/(?<!:)[/\\]*$/, "");
}
const base = Deno.env.get("SYSTEMROOT") || Deno.env.get("WINDIR");
if (base) {
return base + "\\temp";
}
return null;
} else { // !isWindows
const temp = Deno.env.get("TMPDIR") || Deno.env.get("TMP") ||
Deno.env.get("TEMP") || "/tmp";
return temp.replace(/(?<!^)\/*$/, "");
}
}
function exists(path: string): boolean {
try {
return Boolean(Deno.statSync(path));
} catch (error) {
if (error.name == 'NotFound')
try {
return Boolean(Deno.statSync(path));
} catch (error) {
if (error.name == 'NotFound')
return false
throw error
}
throw error
}
}

@@ -7,3 +7,3 @@ import { createRequire } from 'module';

import { setExternals, setNativeFunctions } from './external.js';
import { arch } from 'os';
import { arch, tmpdir, platform } from 'os';
import fs from 'fs';

@@ -19,3 +19,3 @@ import { Encoder as MsgpackrEncoder } from 'msgpackr';

setExternals({
require, arch, fs, path, MsgpackrEncoder, WeakLRUCache, orderedBinary, EventEmitter
require, arch, fs, tmpdir, path, MsgpackrEncoder, WeakLRUCache, orderedBinary, EventEmitter, os: platform(),
});

@@ -22,0 +22,0 @@ export { toBufferKey as keyValueToBuffer, compareKeys, compareKeys as compareKey, fromBufferKey as bufferToKeyValue } from 'ordered-binary';

@@ -1,2 +0,2 @@

import { Compression, getAddress, require, arch, fs, path as pathModule, lmdbError, EventEmitter, MsgpackrEncoder, Env } from './external.js';
import { Compression, getAddress, require, arch, fs, path as pathModule, lmdbError, EventEmitter, MsgpackrEncoder, Env, tmpdir, os } from './external.js';
import { CachingStore, setGetLastVersion } from './caching.js';

@@ -31,3 +31,9 @@ import { addReadMethods, makeReusableBuffer } from './read.js';

}
path = path || '.'
if (!path) {
if (!options)
options = {};
options.deleteOnClose = true;
options.noSync = true;
path = tmpdir() + '/' + Math.floor(Math.random() * 2821109907455).toString(36) + '.mdb'
}
let extension = pathModule.extname(path);

@@ -47,3 +53,3 @@ let name = pathModule.basename(path, extension);

pageSize: 4096,
//overlappingSync: true,
overlappingSync: (options && options.noSync) ? false : os != 'win32',
// default map size limit of 4 exabytes when using remapChunks, since it is not preallocated and we can

@@ -61,4 +67,2 @@ // make it super huge.

}
if (options.separateFlushed === undefined)
options.separateFlushed = options.overlappingSync;

@@ -106,3 +110,5 @@ if (!exists(options.noSubdir ? pathModule.dirname(path) : path))

let env = new Env();
let rc = env.open(options, flags, options.separateFlushed ? 1 : 0);
let jsFlags = (options.separateFlushed ? 1 : 0) |
(options.deleteOnClose ? 2 : 0)
let rc = env.open(options, flags, jsFlags);
if (rc)

@@ -156,3 +162,4 @@ lmdbError(rc);

flags |= 0x08; // integer key
this.db = env.openDbi(flags, dbName, keyType, dbOptions.compression);
if (!((flags & 0xff) && !dbName)) // if there are any dupsort options on the main db, skip as we have to use a write txn below
this.db = env.openDbi(flags, dbName, keyType, dbOptions.compression);
this._commitReadTxn(); // current read transaction becomes invalid after opening another db

@@ -215,2 +222,4 @@ if (!this.db) {// not found

openDB(dbName, dbOptions) {
if (this.dupSort && this.name == null)
throw new Error('Can not open named databases if the main database is dupSort')
if (typeof dbName == 'object' && !dbOptions) {

@@ -217,0 +226,0 @@ dbOptions = dbName;

{
"name": "lmdb",
"author": "Kris Zyp",
"version": "2.1.7",
"version": "2.2.0-beta1",
"description": "Simple, efficient, scalable, high-performance LMDB interface",

@@ -68,7 +68,7 @@ "license": "MIT",

"dependencies": {
"msgpackr": "^1.5.2",
"msgpackr": "^1.5.3",
"nan": "^2.14.2",
"node-gyp-build": "^4.2.3",
"ordered-binary": "^1.2.3",
"weak-lru-cache": "^1.2.1"
"ordered-binary": "^1.2.4",
"weak-lru-cache": "^1.2.2"
},

@@ -75,0 +75,0 @@ "devDependencies": {

@@ -7,3 +7,2 @@ import { RangeIterable } from './util/RangeIterable.js';

let getValueBytes = makeReusableBuffer(0);
let lastSize;
const START_ADDRESS_POSITION = 4064;

@@ -14,3 +13,3 @@

}) {
let readTxn, readTxnRenewed;
let readTxn, readTxnRenewed, returnNullWhenBig = false;
let renewId = 1;

@@ -27,3 +26,3 @@ Object.assign(LMDBStore.prototype, {

if (string)
lastSize = string.length;
this.lastSize = string.length;
return string;

@@ -34,3 +33,3 @@ },

try {
lastSize = this.db.getByBinary(this.writeKey(id, keyBytes, 0));
this.lastSize = this.db.getByBinary(this.writeKey(id, keyBytes, 0));
} catch (error) {

@@ -43,13 +42,20 @@ if (error.message.startsWith('MDB_BAD_VALSIZE') && this.writeKey(id, keyBytes, 0) == 0)

let bytes = compression ? compression.getValueBytes : getValueBytes;
if (lastSize > bytes.maxLength) {
if (lastSize === 0xffffffff)
if (this.lastSize > bytes.maxLength) {
if (this.lastSize === 0xffffffff)
return;
bytes = this._allocateGetBuffer(lastSize);
lastSize = this.db.getByBinary(this.writeKey(id, keyBytes, 0));
if (returnNullWhenBig && this.lastSize >= 0x10000)
return null;
if (this.lastSize >= 0x10000 && !compression && this.db.getSharedByBinary) {
if (this.lastShared)
env.detachBuffer(this.lastShared.buffer)
return this.lastShared = this.db.getSharedByBinary(this.writeKey(id, keyBytes, 0));
}
bytes = this._allocateGetBuffer(this.lastSize);
this.lastSize = this.db.getByBinary(this.writeKey(id, keyBytes, 0));
}
bytes.length = lastSize;
bytes.length = this.lastSize;
return bytes;
},
_allocateGetBuffer(lastSize) {
let newLength = Math.min(Math.max(lastSize * 2, 0x1000), 0xfffffff8);
_allocateGetBuffer(lastSize, exactSize) {
let newLength = exactSize ? lastSize : Math.min(Math.max(lastSize * 2, 0x1000), 0xfffffff8);
let bytes;

@@ -62,2 +68,3 @@ if (this.compression) {

this.compression.setBuffer(bytes, dictLength);
this.compression.fullBytes = bytes;
// the section after the dictionary is the target area for get values

@@ -76,4 +83,32 @@ bytes = bytes.subarray(dictLength);

getBinary(id) {
let fastBuffer = this.getBinaryFast(id);
return fastBuffer && Uint8ArraySlice.call(fastBuffer, 0, lastSize);
let bytesToRestore, compressionBytesToRestore;
try {
returnNullWhenBig = true;
let fastBuffer = this.getBinaryFast(id);
if (fastBuffer === null) {
if (this.compression) {
bytesToRestore = this.compression.getValueBytes;
compressionBytesToRestore = this.compression.fullBytes;
} else
bytesToRestore = getValueBytes;
// allocate buffer specifically for this get
this._allocateGetBuffer(this.lastSize, true);
return this.getBinaryFast(id);
}
return fastBuffer && Uint8ArraySlice.call(fastBuffer, 0, this.lastSize);
} finally {
returnNullWhenBig = false;
if (bytesToRestore) {
if (compressionBytesToRestore) {
let compression = this.compression;
let dictLength = (compression.dictionary.length >> 3) << 3;
compression.setBuffer(compressionBytesToRestore, dictLength);
compression.fullBytes = compressionBytesToRestore;
compression.getValueBytes = bytesToRestore;
} else {
setGlobalBuffer(bytesToRestore);
getValueBytes = bytesToRestore;
}
}
}
},

@@ -102,3 +137,3 @@ get(id) {

version: getLastVersion(),
//size: lastSize
//size: this.lastSize
};

@@ -108,3 +143,3 @@ else

value,
//size: lastSize
//size: this.lastSize
};

@@ -131,7 +166,7 @@ }

this.getBinaryFast(key);
return lastSize !== 0xffffffff;
return this.lastSize !== 0xffffffff;
}
else if (this.useVersions) {
this.getBinaryFast(key);
return lastSize !== 0xffffffff && getLastVersion() === versionOrValue;
return this.lastSize !== 0xffffffff && getLastVersion() === versionOrValue;
}

@@ -395,8 +430,8 @@ else {

let txn = (env.writeTxn || (readTxnRenewed ? readTxn : renewReadTxn()));
lastSize = this.keyIsCompatibility ? txn.getBinaryShared(id) : this.db.get(this.writeKey(id, keyBytes, 0));
if (lastSize === 0xffffffff) { // not found code
this.lastSize = this.keyIsCompatibility ? txn.getBinaryShared(id) : this.db.get(this.writeKey(id, keyBytes, 0));
if (this.lastSize === 0xffffffff) { // not found code
return; //undefined
}
return lastSize;
lastSize = keyBytesView.getUint32(0, true);
return this.lastSize;
this.lastSize = keyBytesView.getUint32(0, true);
let bufferIndex = keyBytesView.getUint32(12, true);

@@ -406,3 +441,3 @@ lastOffset = keyBytesView.getUint32(8, true);

let startOffset;
if (!buffer || lastOffset < (startOffset = buffer.startOffset) || (lastOffset + lastSize > startOffset + 0x100000000)) {
if (!buffer || lastOffset < (startOffset = buffer.startOffset) || (lastOffset + this.lastSize > startOffset + 0x100000000)) {
if (buffer)

@@ -417,3 +452,3 @@ env.detachBuffer(buffer.buffer);

return buffer;
return buffer.slice(lastOffset, lastOffset + lastSize);/*Uint8ArraySlice.call(buffer, lastOffset, lastOffset + lastSize)*/
return buffer.slice(lastOffset, lastOffset + this.lastSize);/*Uint8ArraySlice.call(buffer, lastOffset, lastOffset + this.lastSize)*/
},

@@ -420,0 +455,0 @@ prefetch(keys, callback) {

@@ -154,2 +154,9 @@ [![license](https://img.shields.io/badge/license-MIT-brightgreen)](LICENSE)

### `db.committed: Promise`
This is a promise-like object that resolves when all previous writes have been committed.
### `db.flushed: Promise`
This is a promise-like object that resolves when all previous writes have been committed and fully flushed/synced to disk/storage.
### `db.putSync(key, value, versionOrOptions?: number | PutOptions): boolean`

@@ -364,3 +371,3 @@ This will set the provided value at the specified key, but will do so synchronously. If this is called inside of a transaction, the put will be performed in the current transaction. If not, a transaction will be started, the put will be executed, the transaction will be committed, and then the function will return. We do not recommend this be used for any high-frequency operations as it can be vastly slower (often blocking the main JS thread for multiple milliseconds) than the `put` operation (typically consumes a few _microseconds_ on a worker thread). The third argument may be a version number or an options object that supports `append`, `appendDup`, `noOverwrite`, `noDupData`, and `version` for corresponding LMDB put flags.

`db.openDB(name, options)` or `db.openDB(options)`
If the `path` has an `.` in it, it is treated as a file name, otherwise it is treated as a directory name, where the data will be stored. The `options` argument to either of the functions should be an object, and supports the following properties, all of which are optional (except `name` if not otherwise specified):
If the `path` has an `.` in it, it is treated as a file name, otherwise it is treated as a directory name, where the data will be stored. The path can be omitted to create a temporary database, which will be created in the system temp directory and deleted on close. The `options` argument to either of the functions should be an object, and supports the following properties, all of which are optional (except `name` if not otherwise specified):
* `name` - This is the name of the database. This defaults to null (which is the root database) when opening the database environment (`open`). When an opening a database within an environment (`openDB`), this is required, if not specified in first parameter.

@@ -375,7 +382,7 @@ * `encoding` - Sets the encoding for the database values, which can be `'msgpack'`, `'json'`, `'cbor'`, `'string'`, `'ordered-binary'`or `'binary'`.

* `compression` - This enables compression. This can be set a truthy value to enable compression with default settings, or it can be an object with compression settings.
* `cache` - Setting this to true enables caching. This can also be set to an object specifying the settings/options for the cache (see [settings for weak-lru-cache](https://github.com/kriszyp/weak-lru-cache#weaklrucacheoptions-constructor)).
* `cache` - Setting this to true enables caching. This can also be set to an object specifying the settings/options for the cache (see [settings for weak-lru-cache](https://github.com/kriszyp/weak-lru-cache#weaklrucacheoptions-constructor)). For long-running synchronous operations, it is recommended that you set the `clearKeptInterval` (a value of 100 is a good choice).
* `useVersions` - Set this to true if you will be setting version numbers on the entries in the database. Note that you can not change this flag once a database has entries in it (or they won't be read correctly).
* `keyEncoding` - This indicates the encoding to use for the database keys, and can be `'uint32'` for unsigned 32-bit integers, `'binary'` for raw buffers/Uint8Arrays, and the default `'ordered-binary'` allows any JS primitive as a keys.
* `keyEncoder` - Provide a custom key encoder.
* `dupSort` - Enables duplicate entries for keys. You will usually want to retrieve the values for a key with `getValues`.
* `dupSort` - Enables duplicate entries for keys. You will usually want to retrieve the values for a key with `getValues`. Note that you can not set this flag on the unnamed/main database and also have named databases.
* `strictAsyncOrder` - Maintain strict ordering of execution of asynchronous transaction callbacks relative to asynchronous single operations.

@@ -387,4 +394,4 @@

* `maxReaders` - The maximum number of concurrent read transactions (readers) to be able to open ([more information](http://www.lmdb.tech/doc/group__mdb.html#gae687966c24b790630be2a41573fe40e2)).
* `overlappingSync` - This enables committing transactions where LMDB waits for a transaction to be fully flushed to disk _after_ the transaction has been committed. This option is discussed in more detail below.
* `separateFlushed` - Resolve asynchronous operations when commits are finished and visible and include a separate promise for when a commit is flushed to disk, as a `flushed` property on the commit promise.
* `overlappingSync` - This enables committing transactions where LMDB waits for a transaction to be fully flushed to disk _after_ the transaction has been committed and defaults to being enabled. This option is discussed in more detail below.
* `separateFlushed` - Resolve asynchronous operations when commits are finished and visible and include a separate promise for when a commit is flushed to disk, as a `flushed` property on the commit promise. Note that you can alternately use the `flushed` property on the database.
* `pageSize` - This defines the page size of the database. This is 4,096 by default. You may want to consider setting this to 8,192 for databases larger than available memory (and moreso if you have range queries) or 4,096 for databases that can mostly cache in memory. Note that this only effects the page size of new databases (does not affect existing databases).

@@ -409,21 +416,15 @@ * `eventTurnBatching` - This is enabled by default and will ensure that all asynchronous write operations performed in the same event turn will be batched together into the same transaction. Disabling this allows lmdb-js to commit a transaction at any time, and asynchronous operations will only be guaranteed to be in the same transaction if explicitly batched together (with `transaction`, `batch`, `ifVersion`). If this is disabled (set to `false`), you can control how many writes can occur before starting a transaction with `txnStartThreshold` (allow a transaction will still be started at the next event turn if the threshold is not met). Disabling event turn batching (and using lower `txnStartThreshold` values) can facilitate a faster response time to write operations. `txnStartThreshold` defaults to 5.

### Overlapping Sync Options
The `overlappingSync` option enables a new technique for committing transactions where LMDB waits for a transaction to be fully flushed to disk _after_ the transaction has been committed. This means that the expensive/slow disk flushing operations do not occur during the writer lock, and allows disk flushing to occur in parallel with future transactions, providing potentially significant performance benefits. This uses a multi-step process of updating meta pointers to ensure database integrity even if a crash occurs.
The `overlappingSync` option enables transactions to be committed such that LMDB waits for a transaction to be fully flushed to disk _after_ the transaction has been committed. This option is enabled by default on non-Windows operating systems. This means that the expensive/slow disk flushing operations do not occur during the writer lock, and allows disk flushing to occur in parallel with future transactions, providing potentially significant performance benefits. This uses a multi-step process of updating meta pointers to ensure database integrity even if a crash occurs.
When this is enabled, there are two events of potential interest: when the transaction is committed and the data is visible (to all other threads/processes), and when the transaction is flushed and durable. When enabled, the `separateFlushed` is also enabled by default and for write operations, the returned promise will resolve when the transaction is committed. The promise will also have a `flushed` property that holds a second promise that is resolved when the OS reports that the transaction writes has been fully flushed to disk and are truly durable (at least as far the hardward/OS is capable of guaranteeing this). For example:
When this is enabled, there are two events of potential interest: when the transaction is committed and the data is visible (to all other threads/processes), and when the transaction is flushed and durable. The write actions return a promise for when they are committed. The database includes a `flushed` property with a promise-like object that resolves when the last commit is fully flushed/synced to disk and is durable. Alternately, the `separateFlushed` option can be enabled and for write operations, the returned promise will still resolve when the transaction is committed and the promise will also have a `flushed` property that holds a second promise that is resolved when the OS reports that the transaction writes has been fully flushed to disk and are truly durable (at least as far the hardward/OS is capable of guaranteeing this). For example:
```
let db = open('my-db', { overlappingSync: true })
let db = open('my-db', { overlappingSync: true });
let written = db.put(key, value);
await written; // wait for it to be committed
let v = db.get(key) // this value now be retrieved from the db
await written.flushed // wait for commit to be fully flushed to disk
await db.flushed // wait for last commit to be fully flushed to disk
```
The `separateFlushed` defaults to whatever `overlappedSync` was set to. However, you can explicitly set it. If you want to use `overlappingSync`, but have all write operations resolve when the transaction is fully flushed and durable, you can set `separateFlushed` to `false`. Alternately, if you want to use differing `overlappingSync` settings, but also have a `flushed` promise, you can set `separateFlushed` to `true`.
Enabling `overlappingSync` option is generally not recommended on Windows, as Window's disk flushing operation tends to have very poor performance characteristics on larger databases (whereas Windows tends to perform well with standard transactions). This option may be enabled by default in the future, for non-Windows platforms.
Enabling `overlappingSync` option is generally not recommended on Windows, as Window's disk flushing operation tends to have very poor performance characteristics on larger databases (whereas Windows tends to perform well with standard transactions). This option may be enabled by default in the future, for non-Windows platforms. This is probably a good setting:
```
overlappingSync: os.platform() != 'win32',
separateFlushed: true,
```
#### Serialization options

@@ -430,0 +431,0 @@ If you are using the default encoding of `'msgpack'`, the [msgpackr](https://github.com/kriszyp/msgpackr) package is used for serialization and deserialization. You can provide encoder options that are passed to msgpackr or cbor, as well, by including them in the `encoder` property object. For example, these options can be potentially useful:

@@ -30,3 +30,3 @@ import { getAddress } from './external.js';

export function addWriteMethods(LMDBStore, { env, fixedBuffer, resetReadTxn, useWritemap, maxKeySize,
eventTurnBatching, txnStartThreshold, batchStartThreshold, overlappingSync, commitDelay, separateFlushed }) {
eventTurnBatching, txnStartThreshold, batchStartThreshold, overlappingSync, commitDelay, separateFlushed, maxFlushDelay }) {
// stands for write instructions

@@ -67,2 +67,3 @@ var dynamicBytes;

batchStartThreshold = batchStartThreshold || 1000;
maxFlushDelay = maxFlushDelay || 250;

@@ -276,3 +277,3 @@ allocateInstructionBuffer();

}
if (!flushPromise && overlappingSync && separateFlushed)
if (!flushPromise && overlappingSync)
flushPromise = new Promise(resolve => flushResolvers.push(resolve));

@@ -339,2 +340,3 @@ if (writeStatus & WAITING_OPERATION) { // write thread is waiting

}
let committedFlushResolvers, lastSync = Promise.resolve()
function startWriting() {

@@ -347,2 +349,3 @@ if (enqueuedCommit) {

flushResolvers = [];
let start = Date.now();
env.startWriting(startAddress, (status) => {

@@ -355,4 +358,21 @@ if (dynamicBytes.uint32[dynamicBytes.position << 1] & TXN_DELIMITER)

case 0:
for (let i = 0; i < resolvers.length; i++)
resolvers[i]();
if (resolvers.length > 0) {
if (committedFlushResolvers)
committedFlushResolvers.push(...resolvers)
else {
committedFlushResolvers = resolvers
let delay = Math.min(Date.now() - start, maxFlushDelay)
setTimeout(() => lastSync.then(() => {
let resolvers = committedFlushResolvers
committedFlushResolvers = null
lastSync = new Promise((resolve) => {
env.sync(() => {
for (let i = 0; i < resolvers.length; i++)
resolvers[i]();
resolve();
});
});
}), delay);
}
}
case 1:

@@ -359,0 +379,0 @@ break;

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc