Comparing version 2.0.0-beta1 to 2.0.0-beta2
@@ -12,3 +12,3 @@ 'use strict'; | ||
import { open } from '../index.js'; | ||
import { open } from '../node-index.js'; | ||
var env; | ||
@@ -15,0 +15,0 @@ var dbi; |
30
deno.js
@@ -0,11 +1,20 @@ | ||
import { setNativeFunctions } from './native.js' | ||
// probably use Deno.build.os | ||
import { arch } from 'https://deno.land/std/node/os.ts' | ||
let lmdbLib = Deno.dlopen('./build/Release/lmdb-store.node', { | ||
envOpen: { parameters: ['u32', 'buffer', 'usize'], result: 'usize'} | ||
envOpen: { parameters: ['u32', 'buffer', 'usize'], result: 'usize'},/* | ||
free: { parameters: ['buffer', 'usize'], result: 'void'}, | ||
getAddress: { parameters: ['buffer', 'usize'], result: 'usize'}, | ||
startWriting: { parameters: ['buffer', 'usize'], nonblocking: true, result: 'u32'}, | ||
write: { parameters: ['buffer', 'usize'], result: 'u32'}, | ||
getBinary: { parameters: ['buffer', 'usize'], result: 'u32'}, | ||
*/ | ||
}) | ||
let b = new Uint8Array([1,2]) | ||
console.log(symbols.envOpen(0, b, 2)) | ||
let { Env, open } = lmdbLib.symbols | ||
let { envOpen, getAddress, free } = lmdbLib.symbols | ||
let registry = new FinalizationRegistry(address => { | ||
// when an object is GC'ed, free it in C. | ||
free(address) | ||
free(address, 1) | ||
}) | ||
@@ -31,5 +40,18 @@ | ||
open(flags, path) { | ||
return open(this.address, flags, path) | ||
return envOpen(this.address, flags, path) | ||
} | ||
} | ||
Env.addMethods('startWriting', 'write', 'openDB') | ||
class Dbi extends CBridge { | ||
} | ||
class Compression extends CBridge { | ||
} | ||
class Cursor extends CBridge { | ||
} | ||
setNativeFunctions({ Env, Compression, Cursor, fs: Deno, arch, getAddress, getAddressShared: getAddress }) |
@@ -139,3 +139,6 @@ import { EventEmitter } from 'events' | ||
/** | ||
* @deprecated Since v1.3.0, this will be replaced with the functionality of transactionAsync in a future release | ||
* Execute a transaction asyncronously, running all the actions within the action callback in the transaction, | ||
* and committing the transaction after the action callback completes. | ||
* existing version | ||
* @param action The function to execute within the transaction | ||
**/ | ||
@@ -148,3 +151,2 @@ transaction<T>(action: () => T): T | ||
* @param action The function to execute within the transaction | ||
* @param abort If true will abort the transaction when completed | ||
**/ | ||
@@ -158,11 +160,8 @@ transactionSync<T>(action: () => T): T | ||
**/ | ||
transactionAsync<T>(action: () => T): Promise<T> | ||
childTransaction<T>(action: () => T): Promise<T> | ||
/** | ||
* Execute a transaction asyncronously, running all the actions within the action callback in the transaction, | ||
* and committing the transaction after the action callback completes. | ||
* existing version | ||
* @param action The function to execute within the transaction | ||
* @param abort If true will abort the transaction when completed | ||
* Execute a set of write operations that will all be batched together in next queued asynchronous transaction. | ||
* @param action The function to execute with a set of write operations. | ||
**/ | ||
childTransaction<T>(action: () => T): Promise<T> | ||
batch<T>(action: () => T): Promise<T> | ||
/** | ||
@@ -201,10 +200,18 @@ * Execute writes actions that are all conditionally dependent on the entry with the provided key having the provided | ||
/** | ||
* Delete this database/store. | ||
* Delete this database/store (asynchronously). | ||
**/ | ||
deleteDB(): void | ||
drop(): Promise<void> | ||
/** | ||
* Clear all the entries from this database/store. | ||
* Synchronously delete this database/store. | ||
**/ | ||
clear(): void | ||
dropSync(): void | ||
/** | ||
* Asynchronously clear all the entries from this database/store. | ||
**/ | ||
clearAsync(): Promise<void> | ||
/** | ||
* Synchronously clear all the entries from this database/store. | ||
**/ | ||
clearSync(): void | ||
/** | ||
* Check the reader locks and remove any stale reader locks. Returns the number of stale locks that were removed. | ||
@@ -325,4 +332,8 @@ **/ | ||
export function getLastVersion(): number | ||
export function compareKey(a: Key, b: Key): number | ||
export function compareKeys(a: Key, b: Key): number | ||
class Binary {} | ||
/* Wrap a Buffer/Uint8Array for direct assignment as a value bypassing any encoding, for put (and doesExist) operations. | ||
*/ | ||
export function asBinary(buffer: Uint8Array): Binary | ||
} | ||
export = lmdb |
87
index.js
@@ -1,25 +0,13 @@ | ||
import fs from 'fs' // TODO: or Deno | ||
import { extname, basename, dirname} from 'path' | ||
import EventEmitter from 'events' | ||
import { Env, Cursor, Compression, getBufferForAddress, getAddress } from './native.js' | ||
import { Env, Compression, getAddress, require, arch, fs } from './native.js' | ||
import { CachingStore, setGetLastVersion } from './caching.js' | ||
import { addQueryMethods } from './query.js' | ||
import { addWriteMethods, ABORT } from './writer.js' | ||
export { ABORT } from './writer.js' | ||
import { addWriteMethods } from './writer.js' | ||
import { applyKeyHandling } from './keys.js' | ||
export { toBufferKey as keyValueToBuffer, compareKeys, compareKeys as compareKey, fromBufferKey as bufferToKeyValue } from 'ordered-binary/index.js' | ||
import { createRequire } from 'module' | ||
import { Encoder as MsgpackrEncoder } from 'msgpackr' | ||
export { levelup } from './level.js' | ||
const require = createRequire(import.meta.url) | ||
import os from 'os' | ||
const binaryBuffer = Symbol('binaryBuffer') | ||
setGetLastVersion(getLastVersion) | ||
const Uint8ArraySlice = Uint8Array.prototype.slice | ||
const keyBytes = Buffer.allocUnsafeSlow(2048) | ||
const keyBuffer = keyBytes.buffer | ||
const keyBytesView = keyBytes.dataView = new DataView(keyBytes.buffer, 0, 2048) // max key size is actually 1978 | ||
keyBytes.uint32 = new Uint32Array(keyBuffer, 0, 512) | ||
keyBytes.float64 = new Float64Array(keyBuffer, 0, 256) | ||
keyBytes.uint32.address = keyBytes.address = keyBuffer.address = getAddress(keyBuffer) | ||
let keyBytes, keyBytesView | ||
const buffers = [] | ||
@@ -35,7 +23,2 @@ | ||
export const allDbs = new Map() | ||
const SYNC_PROMISE_RESULT = Promise.resolve(true) | ||
const SYNC_PROMISE_FAIL = Promise.resolve(false) | ||
SYNC_PROMISE_RESULT.isSync = true | ||
SYNC_PROMISE_FAIL.isSync = true | ||
let env | ||
@@ -47,2 +30,4 @@ let defaultCompression | ||
export function open(path, options) { | ||
if (!keyBytes) | ||
allocateFixedBuffer() | ||
let env = new Env() | ||
@@ -61,3 +46,3 @@ let committingWrites | ||
let name = basename(path, extension) | ||
let is32Bit = os.arch().endsWith('32') | ||
let is32Bit = arch().endsWith('32') | ||
let remapChunks = (options && options.remapChunks) || ((options && options.mapSize) ? | ||
@@ -349,5 +334,6 @@ (is32Bit && options.mapSize > 0x100000000) : // larger than fits in address space, must use dynamic maps | ||
else { | ||
if (this.encoder) { | ||
if (versionOrValue && versionOrValue[binaryBuffer]) | ||
versionOrValue = versionOrValue[binaryBuffer] | ||
else if (this.encoder) | ||
versionOrValue = this.encoder.encode(versionOrValue) | ||
} | ||
if (typeof versionOrValue == 'string') | ||
@@ -358,15 +344,2 @@ versionOrValue = Buffer.from(versionOrValue) | ||
} | ||
batch(operations) { | ||
/*if (writeTxn) { | ||
this.commitBatchNow(operations.map(operation => [this.db, operation.key, operation.value])) | ||
return Promise.resolve(true) | ||
}*/ | ||
let scheduledOperations = this.getScheduledOperations() | ||
for (let operation of operations) { | ||
let value = operation.value | ||
scheduledOperations.push([operation.key, value]) | ||
scheduledOperations.bytes += operation.key.length + (value && value.length || 0) + 200 | ||
} | ||
return this.scheduleCommit().unconditionalResults | ||
} | ||
backup(path) { | ||
@@ -410,18 +383,26 @@ return new Promise((resolve, reject) => env.copy(path, false, (error) => { | ||
deleteDB() { | ||
console.warn('deleteDB() is deprecated, use drop or dropSync instead') | ||
return this.dropSync() | ||
} | ||
dropSync() { | ||
this.transactionSync(() => | ||
this.db.drop({ | ||
justFreePages: false | ||
}) | ||
, { abortable: false }) | ||
}), | ||
{ abortable: false }) | ||
} | ||
clear(callback) { | ||
if (typeof callback == 'function') | ||
return this.clearAsync(callback) | ||
console.warn('clear() is deprecated, use clearAsync or clearSync instead') | ||
this.clearSync() | ||
} | ||
clearSync() { | ||
if (this.encoder && this.encoder.structures) | ||
this.encoder.structures = [] | ||
this.transactionSync(() => | ||
this.db.drop({ | ||
justFreePages: true | ||
}) | ||
, { abortable: false }) | ||
if (this.encoder && this.encoder.structures) | ||
this.encoder.structures = [] | ||
if (typeof callback == 'function') | ||
callback(null) | ||
}), | ||
{ abortable: false }) | ||
} | ||
@@ -449,2 +430,3 @@ readerCheck() { | ||
let existingStructures = existingStructuresBuffer ? this.encoder.decode(existingStructuresBuffer) : [] | ||
console.log('Upgrading structures for ' + this.name + ' from ' + previousLength + '(is ' + existingStructures.length + ') to ' + structures.length + ' on ', process.pid) | ||
if (existingStructures.length != previousLength) | ||
@@ -466,3 +448,3 @@ return false // it changed, we need to indicate that we couldn't update | ||
}, saveKey, keyBytes, keyBytesView, getLastVersion }) | ||
addWriteMethods(LMDBStore, { env, fixedBuffer: keyBytes, resetReadTxn, ...options }) | ||
addWriteMethods(LMDBStore, { env, fixedBuffer: keyBytes, resetReadTxn, binaryBuffer, ...options }) | ||
LMDBStore.prototype.supports = { | ||
@@ -520,2 +502,7 @@ permanence: true, | ||
} | ||
export function asBinary(buffer) { | ||
return { | ||
[binaryBuffer]: buffer | ||
} | ||
} | ||
let saveBuffer, saveDataView, saveDataAddress | ||
@@ -531,2 +518,10 @@ let savePosition = 8000 | ||
} | ||
function allocateFixedBuffer() { | ||
keyBytes = Buffer.allocUnsafeSlow(2048) | ||
const keyBuffer = keyBytes.buffer | ||
keyBytesView = keyBytes.dataView = new DataView(keyBytes.buffer, 0, 2048) // max key size is actually 1978 | ||
keyBytes.uint32 = new Uint32Array(keyBuffer, 0, 512) | ||
keyBytes.float64 = new Float64Array(keyBuffer, 0, 256) | ||
keyBytes.uint32.address = keyBytes.address = keyBuffer.address = getAddress(keyBuffer) | ||
} | ||
function saveKey(key, writeKey, saveTo) { | ||
@@ -533,0 +528,0 @@ if (savePosition > 6200) { |
@@ -1,25 +0,11 @@ | ||
import { createRequire } from 'module'; | ||
const require = createRequire(import.meta.url) | ||
import { fileURLToPath } from 'url' | ||
import { dirname } from 'path' | ||
let nativeMethods, dirName = dirname(fileURLToPath(import.meta.url)).replace(/dist$/, '') | ||
try { | ||
console.log(dirName) | ||
nativeMethods = require('node-gyp-build')(dirName) | ||
if (process.versions.modules == 93) | ||
require('v8').setFlagsFromString('--turbo-fast-api-calls') | ||
} catch(error) { | ||
if (process.versions.modules == 93) { | ||
// use this abi version as the backup version without turbo-fast-api-calls enabled | ||
Object.defineProperty(process.versions, 'modules', { value: '92' }) | ||
try { | ||
nativeMethods = require('node-gyp-build')(dirName) | ||
} catch(secondError) { | ||
throw error | ||
} finally { | ||
Object.defineProperty(process.versions, 'modules', { value: '93' }) | ||
} | ||
} else | ||
throw error | ||
export let Env, Compression, Cursor, getAddress, getAddressShared, require, arch, fs | ||
export function setNativeFunctions(nativeInterface) { | ||
Env = nativeInterface.Env | ||
Compression = nativeInterface.Compression | ||
getAddress = nativeInterface.getAddress | ||
getAddressShared = nativeInterface.getAddressShared | ||
Cursor = nativeInterface.Cursor | ||
require = nativeInterface.require | ||
arch = nativeInterface.arch | ||
fs = nativeInterface.fs | ||
} | ||
export const { Env, Cursor, Compression, getBufferForAddress, getAddress, getAddressShared } = nativeMethods |
{ | ||
"name": "lmdb", | ||
"author": "Kris Zyp", | ||
"version": "2.0.0-beta1", | ||
"version": "2.0.0-beta2", | ||
"description": "Simple, efficient, scalable data store wrapper for LMDB", | ||
@@ -21,8 +21,7 @@ "license": "MIT", | ||
".": { | ||
"require": "./dist/index.cjs", | ||
"import": "./index.js" | ||
}, | ||
"./index.js": { | ||
"require": "./dist/index.cjs", | ||
"import": "./index.js" | ||
"node": { | ||
"require": "./dist/index.cjs", | ||
"import": "./node-index.js" | ||
}, | ||
"default": "./index.js" | ||
} | ||
@@ -39,4 +38,4 @@ }, | ||
"prepare": "rollup -c", | ||
"before-publish": "rollup -c && prebuildify-ci download && prebuildify --target 17.0.1 && prebuildify --target 16.9.0 && prebuildify --target 14.17.6 && prebuildify --target 12.18.0 && prebuildify --target electron@15.2.0", | ||
"prebuild": "prebuildify --target 17.0.1 && prebuildify --target 16.9.0 && prebuildify --target 14.17.6 && prebuildify --target 12.18.0 && prebuildify --target electron@15.2.0", | ||
"before-publish": "rollup -c && prebuildify-ci download && prebuildify --target 17.0.1 && prebuildify --target 16.9.0 && prebuildify --target 15.5.0 && prebuildify --target 15.4.0 && prebuildify --target 14.17.6 && prebuildify --target 12.18.0 && prebuildify --target electron@15.2.0", | ||
"prebuild": "prebuildify --target 17.0.1 && prebuildify --target 16.9.0 && prebuildify --target 15.5.0 && prebuildify --target 14.17.6 && prebuildify --target 12.18.0 && prebuildify --target electron@15.2.0", | ||
"prebuild-arm64": "prebuildify --arch=arm64 --target 17.0.1 && prebuildify --arch=arm64 --target 16.9.0 && prebuildify --arch=arm64 --target 14.17.6 && prebuildify --arch=arm64 --target electron@15.2.0", | ||
@@ -43,0 +42,0 @@ "recompile": "node-gyp clean && node-gyp configure && node-gyp build", |
@@ -21,3 +21,3 @@ [![license](https://img.shields.io/badge/license-MIT-brightgreen)](LICENSE) | ||
This library, `lmdb-store` is published to the NPM package `lmdb-store` and `lmdb`, and can be installed with: | ||
This library is published to the NPM package `lmdb` (and the 1.x was also published to `lmdb-store`), and can be installed with: | ||
```npm install lmdb``` | ||
@@ -33,3 +33,3 @@ | ||
`lmdb-store` is designed for synchronous reads, and asynchronous writes. In idiomatic NodeJS code, I/O operations are performed asynchronously. LMDB is a memory-mapped database, reading and writing within a transaction does not use any I/O (other than the slight possibility of a page fault), and can usually be performed faster than Node's event queue callbacks can even execute, and it is easier to write code for instant synchronous values from reads. On the otherhand, commiting transactions does involve I/O, and vastly higher throughput can be achieved by batching operations and executing on a separate thread. Consequently, `lmdb-store` is designed for transactions to go through this asynchronous batching process and return a simple promise that resolves once data is written and flushed to disk. | ||
`lmdb-store` is designed for synchronous reads, and asynchronous writes. In idiomatic NodeJS code, I/O operations are performed asynchronously. LMDB is a memory-mapped database, reading and writing within a transaction does not use any I/O (other than the slight possibility of a page fault), and can usually be performed faster than Node's event queue callbacks can even execute, and it is easier to write code for instant synchronous values from reads. On the otherhand, commiting transactions does involve I/O, and vastly higher throughput can be achieved by batching operations and executing on a separate thread. Consequently, `lmdb-store` is designed for transactioSSns to go through this asynchronous batching process and return a simple promise that resolves once data is written and flushed to disk. | ||
@@ -75,2 +75,4 @@ With the default sync'ing configuration, LMDB has a crash-proof design; a machine can be turned off at any point, and data can not be corrupted unless the written data is actually changed or tampered. Writing data and waiting for confirmation that has been writted to the physical medium is critical for data integrity, but is well known to have latency (although not necessarily less efficient). However, by batching writes, when a database is under load, slower transactions enable more writes per transaction, and this library is able to drive LMDB to achieve the maximum levels of throughput with fully sync'ed operations, preserving both the durability/safety of the transactions and legendary performance. | ||
In addition, you can use `asBinary` to directly store a buffer or Uint8Array as a value, bypassing any encoding. | ||
### Keys | ||
@@ -233,3 +235,3 @@ When using the various APIs, keys can be any JS primitive (string, number, boolean, symbol), an array of primitives, or a Buffer. Using the default `ordered-binary` conversion, primitives are translated to binary keys used by LMDB in such a way that consistent ordering is preserved. Numbers are ordered naturally, which come before strings, which are ordered lexically. The keys are stored with type information preserved. The `getRange`operations that return a set of entries will return entries with the original JS primitive values for the keys. If arrays are used as keys, they are ordering by first value in the array, with each subsequent element being a tie-breaker. Numbers are stored as doubles, with reversal of sign bit for proper ordering plus type information, so any JS number can be used as a key. For example, here are the order of some different keys: | ||
``` | ||
const { open } = require('lmdb'); | ||
import { open } from 'lmdb'; | ||
let rootStore = open('all-my-data'); | ||
@@ -257,2 +259,9 @@ let usersStore = myStore.openDB('users'); | ||
### `asBinary(buffer): Binary` | ||
This can be used to directly store a buffer or Uint8Array as a value, bypassing any encoding. If you are using a store with an encoding that isn't `binary`, setting a value with a Uint8Array will typically be encoding with encoding (for example MessagePack wraps in a header, preserving its type for `get`). However, if you want to bypass encoding, for example, if you have already encoded a value, you can use `asBinary`: | ||
``` | ||
let buffer = encode(myValue) // if we have already serialized a value, perhaps to compare it or check its size | ||
db.put(key, asBinary(buffer)) // we can directly store the encoded value | ||
``` | ||
### `close(): void` | ||
@@ -259,0 +268,0 @@ This will close the current store. This closes the underlying LMDB database, and if this is the root database (opened with `open` as opposed to `store.openDB`), it will close the environment (and child stores will no longer be able to interact with the database). |
export default [ | ||
{ | ||
input: "index.js", | ||
input: "node-index.js", | ||
output: [ | ||
@@ -5,0 +5,0 @@ { |
@@ -13,3 +13,3 @@ import path from 'path'; | ||
import { open, levelup, bufferToKeyValue, keyValueToBuffer, ABORT } from '../index.js'; | ||
import { open, levelup, bufferToKeyValue, keyValueToBuffer, asBinary, ABORT } from '../node-index.js'; | ||
import { ArrayLikeIterable } from '../util/ArrayLikeIterable.js' | ||
@@ -63,2 +63,3 @@ | ||
//overlappingSync: true, | ||
eventTurnBatching: false, | ||
keyEncoder: orderedBinaryEncoder, | ||
@@ -70,3 +71,3 @@ compression: { | ||
if (!options.checkLast) | ||
db.clear(); | ||
db.clearSync(); | ||
db2 = db.openDB(Object.assign({ | ||
@@ -78,3 +79,3 @@ name: 'mydb4', | ||
if (!options.checkLast) | ||
db2.clear(); | ||
db2.clearSync(); | ||
db3 = db.openDB({ | ||
@@ -87,3 +88,3 @@ name: 'mydb5', | ||
if (!options.checkLast) | ||
db3.clear(); | ||
db3.clearSync(); | ||
}); | ||
@@ -252,2 +253,10 @@ if (options.checkLast) { | ||
}); | ||
it('forced compression due to starting with 255', async function() { | ||
await db.put('key1', asBinary(Buffer.from([255]))); | ||
let entry = db.getBinary('key1'); | ||
entry.length.should.equal(1); | ||
entry[0].should.equal(255); | ||
(await db.remove('key1')).should.equal(true); | ||
}); | ||
if (options.encoding == 'ordered-binary') | ||
@@ -262,2 +271,11 @@ return // no more tests need to be applied for this | ||
}); | ||
it('store binary', async function() { | ||
let dataIn = {foo: 4, bar: true} | ||
let buffer = db.encoder.encode(dataIn); | ||
if (typeof buffer == 'string') | ||
return | ||
await db.put('key1', asBinary(buffer)); | ||
let dataOut = db.get('key1'); | ||
dataOut.should.deep.equal(dataIn); | ||
}); | ||
it('writes batch with callback', async function() { | ||
@@ -602,3 +620,3 @@ let dataIn = {name: 'for batch 1'} | ||
let ranTransaction | ||
db.put('key1', 'async initial value'); // should be queued for async write, but should put before queued transaction | ||
db.put('key1', 'async initial value'); // should be queued for async write, but should put before queued transaction | ||
let errorHandled | ||
@@ -657,31 +675,30 @@ if (!db.cache) { | ||
it('async transaction with interrupting sync transaction default order', async function() { | ||
db.strictAsyncOrder = true | ||
let order = [] | ||
let ranSyncTxn | ||
db.on('beforecommit', ()=> { | ||
// force eventTurnBatching on | ||
}) | ||
db.transactionAsync(() => { | ||
order.push('a1'); | ||
db.put('async1', 'test'); | ||
if (!ranSyncTxn) { | ||
ranSyncTxn = true; | ||
setImmediate(() => { | ||
db.transactionSync(() => { | ||
order.push('s1'); | ||
db.put('inside-sync', 'test'); | ||
for (let i =0; i< 10;i++) { | ||
db.strictAsyncOrder = true | ||
let order = [] | ||
let ranSyncTxn | ||
db.transactionAsync(() => { | ||
order.push('a1'); | ||
db.put('async1', 'test'); | ||
if (!ranSyncTxn) { | ||
ranSyncTxn = true; | ||
setImmediate(() => { | ||
db.transactionSync(() => { | ||
order.push('s1'); | ||
db.put('inside-sync', 'test'); | ||
}); | ||
}); | ||
}); | ||
} | ||
}); | ||
db.put('outside-txn', 'test'); | ||
await db.transactionAsync(() => { | ||
order.push('a2'); | ||
db.put('async2', 'test'); | ||
}); | ||
order.should.deep.equal(['a1', 's1', 'a2']); | ||
should.equal(db.get('async1'), 'test'); | ||
should.equal(db.get('outside-txn'), 'test'); | ||
should.equal(db.get('inside-sync'), 'test'); | ||
should.equal(db.get('async2'), 'test'); | ||
} | ||
}); | ||
db.put('outside-txn', 'test'); | ||
await db.transactionAsync(() => { | ||
order.push('a2'); | ||
db.put('async2', 'test'); | ||
}); | ||
order.should.deep.equal(['a1', 's1', 'a2']); | ||
should.equal(db.get('async1'), 'test'); | ||
should.equal(db.get('outside-txn'), 'test'); | ||
should.equal(db.get('inside-sync'), 'test'); | ||
should.equal(db.get('async2'), 'test'); | ||
} | ||
}); | ||
@@ -692,2 +709,12 @@ it('multiple async mixed', async function() { | ||
if (i%4 < 3) { | ||
if (i%8 == 1) { | ||
let sync = () => db.transactionSync(() => { | ||
db.put('foo' + i, i) | ||
}) | ||
if (i%16 == 1) | ||
setImmediate(sync) | ||
else | ||
sync() | ||
continue | ||
} | ||
db.strictAsyncOrder = i%4 == 2 | ||
@@ -694,0 +721,0 @@ result = db.transaction(() => { |
@@ -25,3 +25,3 @@ import { getAddressShared as getAddress } from './native.js' | ||
var log = [] | ||
export function addWriteMethods(LMDBStore, { env, fixedBuffer, resetReadTxn, useWritemap, | ||
export function addWriteMethods(LMDBStore, { env, fixedBuffer, resetReadTxn, useWritemap, binaryBuffer, | ||
eventTurnBatching, txnStartThreshold, batchStartThreshold, overlappingSync, commitDelay, separateFlushed }) { | ||
@@ -72,4 +72,4 @@ // stands for write instructions | ||
let encoder = store.encoder | ||
if (value instanceof Uint8Array) | ||
valueBuffer = value | ||
if (value && value[binaryBuffer]) | ||
valueBuffer = value[binaryBuffer] | ||
else if (encoder) { | ||
@@ -85,3 +85,5 @@ if (encoder.copyBuffers) // use this as indicator for support buffer reuse for now | ||
valueBuffer = Buffer.from(value) // TODO: Would be nice to write strings inline in the instructions | ||
} else | ||
} else if (value instanceof Uint8Array) | ||
valueBuffer = value | ||
else | ||
throw new Error('Invalid value to put in database ' + value + ' (' + (typeof value) +'), consider using encoder') | ||
@@ -151,2 +153,3 @@ valueBufferStart = valueBuffer.start | ||
if (flags & 2) { | ||
let mustCompress | ||
if (valueBufferStart > -1) { // if we have buffers with start/end position | ||
@@ -156,2 +159,3 @@ // record pointer to value buffer | ||
(valueBuffer.address = getAddress(valueBuffer.buffer) + valueBuffer.byteOffset)) + valueBufferStart | ||
mustCompress = valueBuffer[valueBufferStart] >= 254 // this is the compression indicator, so we must compress | ||
} else { | ||
@@ -162,5 +166,6 @@ let valueArrayBuffer = valueBuffer.buffer | ||
(valueArrayBuffer.address = getAddress(valueArrayBuffer))) + valueBuffer.byteOffset | ||
mustCompress = valueBuffer[0] >= 254 // this is the compression indicator, so we must compress | ||
} | ||
uint32[(position++ << 1) - 1] = valueSize | ||
if (store.compression && valueSize >= store.compression.threshold) { | ||
if (store.compression && (valueSize >= store.compression.threshold || mustCompress)) { | ||
flags |= 0x100000; | ||
@@ -211,3 +216,5 @@ float64[position] = store.compression.address | ||
nextUint32 = uint32 | ||
let newResolution = store.cache ? | ||
let resolution = nextResolution | ||
// create the placeholder next resolution | ||
nextResolution = resolution.next = store.cache ? | ||
{ | ||
@@ -230,5 +237,2 @@ uint32: nextUint32, | ||
} | ||
let resolution = nextResolution | ||
resolution.next = newResolution | ||
nextResolution = newResolution | ||
let writtenBatchDepth = batchDepth | ||
@@ -455,3 +459,2 @@ | ||
async function executeTxnCallbacks() { | ||
env.beginTxn(0) | ||
env.writeTxn = writeTxn = {} | ||
@@ -507,3 +510,2 @@ let promises | ||
env.writeTxn = writeTxn = false | ||
return env.commitTxn() | ||
function txnError(error, i) { | ||
@@ -622,2 +624,4 @@ (txnCallbacks.errors || (txnCallbacks.errors = []))[i] = error | ||
clearAsync(callback) { | ||
if (this.encoder && this.encoder.structures) | ||
this.encoder.structures = [] | ||
return writeInstructions(12, this, undefined, undefined, undefined, undefined)(callback) | ||
@@ -624,0 +628,0 @@ }, |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Native code
Supply chain riskContains native code (e.g., compiled binaries or shared libraries). Including native code can obscure malicious behavior.
Found 3 instances in 1 package
Network access
Supply chain riskThis module accesses the network.
Found 1 instance in 1 package
Dynamic require
Supply chain riskDynamic require can indicate the package is performing dangerous or unsafe dynamic code execution.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
15083686
158
10848
468
30
30