@alcalzone/jsonl-db
Advanced tools
Comparing version
@@ -9,9 +9,36 @@ import * as fs from "fs-extra"; | ||
/** | ||
* An optional reviver functionn (similar to JSON.parse) to transform parsed values before they are accessible in the database. | ||
* An optional reviver function (similar to JSON.parse) to transform parsed values before they are accessible in the database. | ||
* If this function is defined, it must always return a value. | ||
*/ | ||
reviver?: (key: string, value: any) => any; | ||
reviver?: (key: string, value: any) => V; | ||
/** | ||
* Configure when the DB should be automatically compressed. | ||
* If multiple conditions are configured, the DB is compressed when any of them are fulfilled | ||
*/ | ||
autoCompress?: Partial<{ | ||
/** | ||
* Compress when uncompressedSize >= size * sizeFactor. Default: +Infinity | ||
*/ | ||
sizeFactor: number; | ||
/** | ||
* Configure the minimum size necessary for auto-compression based on size. Default: 0 | ||
*/ | ||
sizeFactorMinimumSize: number; | ||
/** | ||
* Compress after a certain time has passed. Default: never | ||
*/ | ||
intervalMs: number; | ||
/** | ||
* Configure the minimum count of changes for auto-compression based on time. Default: 1 | ||
*/ | ||
intervalMinChanges: number; | ||
/** Compress when closing the DB. Default: false */ | ||
onClose: boolean; | ||
/** Compress after opening the DB. Default: false */ | ||
onOpen: boolean; | ||
}>; | ||
} | ||
export declare class JsonlDB<V extends unknown = unknown> { | ||
constructor(filename: string, options?: JsonlDBOptions<V>); | ||
private validateOptions; | ||
readonly filename: string; | ||
@@ -29,2 +56,6 @@ readonly dumpFilename: string; | ||
get size(): number; | ||
private _uncompressedSize; | ||
/** Returns the line count of the appendonly file, excluding empty lines */ | ||
get uncompressedSize(): number; | ||
private _changesSinceLastCompress; | ||
private _isOpen; | ||
@@ -37,2 +68,3 @@ get isOpen(): boolean; | ||
private _dumpBacklog; | ||
private compressInterval; | ||
private _openPromise; | ||
@@ -49,2 +81,8 @@ open(): Promise<void>; | ||
exportJson(filename: string, options?: fs.WriteOptions): Promise<void>; | ||
private updateStatistics; | ||
private needToCompress; | ||
/** | ||
* Writes a line into the correct backlog | ||
* @param noAutoCompress Whether auto-compression should be disabled | ||
*/ | ||
private write; | ||
@@ -56,2 +94,3 @@ private entryToLine; | ||
private writeThread; | ||
private compressPromise; | ||
/** Compresses the db by dumping it and overwriting the aof file. */ | ||
@@ -58,0 +97,0 @@ compress(): Promise<void>; |
@@ -17,4 +17,8 @@ "use strict"; | ||
constructor(filename, options = {}) { | ||
var _a; | ||
this._db = new Map(); | ||
this._uncompressedSize = Number.NaN; | ||
this._changesSinceLastCompress = 0; | ||
this._isOpen = false; | ||
this.validateOptions(options); | ||
this.filename = filename; | ||
@@ -31,6 +35,40 @@ this.dumpFilename = this.filename + ".dump"; | ||
this[Symbol.iterator] = this._db[Symbol.iterator].bind(this._db); | ||
// Start regular auto-compression | ||
const { intervalMs, intervalMinChanges = 1 } = (_a = options.autoCompress) !== null && _a !== void 0 ? _a : {}; | ||
if (intervalMs) { | ||
this.compressInterval = setInterval(() => { | ||
if (this._changesSinceLastCompress >= intervalMinChanges) { | ||
void this.compress(); | ||
} | ||
}, intervalMs); | ||
} | ||
} | ||
validateOptions(options) { | ||
if (options.autoCompress) { | ||
const { sizeFactor, sizeFactorMinimumSize, intervalMs, intervalMinChanges, } = options.autoCompress; | ||
if (sizeFactor != undefined && sizeFactor <= 1) { | ||
throw new Error("sizeFactor must be > 1"); | ||
} | ||
if (sizeFactorMinimumSize != undefined && | ||
sizeFactorMinimumSize < 0) { | ||
throw new Error("sizeFactorMinimumSize must be >= 0"); | ||
} | ||
if (intervalMs != undefined && intervalMs < 10) { | ||
throw new Error("intervalMs must be >= 10"); | ||
} | ||
if (intervalMinChanges != undefined && intervalMinChanges < 1) { | ||
throw new Error("intervalMinChanges must be >= 1"); | ||
} | ||
} | ||
} | ||
get size() { | ||
return this._db.size; | ||
} | ||
/** Returns the line count of the appendonly file, excluding empty lines */ | ||
get uncompressedSize() { | ||
if (!this._isOpen) { | ||
throw new Error("The database is not open!"); | ||
} | ||
return this._uncompressedSize; | ||
} | ||
get isOpen() { | ||
@@ -41,2 +79,3 @@ return this._isOpen; | ||
async open() { | ||
var _a; | ||
// Open the file for appending and reading | ||
@@ -51,2 +90,3 @@ this._fd = await fs.open(this.filename, "a+"); | ||
let lineNo = 0; | ||
this._uncompressedSize = 0; | ||
try { | ||
@@ -61,2 +101,3 @@ await new Promise((resolve, reject) => { | ||
try { | ||
this._uncompressedSize++; | ||
this.parseLine(line); | ||
@@ -87,2 +128,5 @@ } | ||
this._isOpen = true; | ||
if ((_a = this.options.autoCompress) === null || _a === void 0 ? void 0 : _a.onOpen) { | ||
await this.compress(); | ||
} | ||
} | ||
@@ -146,3 +190,3 @@ /** Parses a line and updates the internal DB correspondingly */ | ||
this._db.set(key, value); | ||
this.write(this.entryToLine(key, value)); | ||
this.write(this.entryToLine(key, value), true); | ||
} | ||
@@ -157,9 +201,43 @@ } | ||
// TODO: use cork() and uncork() to throttle filesystem accesses | ||
write(line) { | ||
updateStatistics(command) { | ||
if (command === "") { | ||
this._uncompressedSize = 0; | ||
} | ||
else { | ||
this._uncompressedSize++; | ||
} | ||
this._changesSinceLastCompress++; | ||
} | ||
needToCompress() { | ||
var _a; | ||
// compression is busy? | ||
if (this.compressPromise) | ||
return false; | ||
const { sizeFactor = Number.POSITIVE_INFINITY, sizeFactorMinimumSize = 0, } = (_a = this.options.autoCompress) !== null && _a !== void 0 ? _a : {}; | ||
if (this.uncompressedSize >= sizeFactorMinimumSize && | ||
this.uncompressedSize >= sizeFactor * this.size) { | ||
return true; | ||
} | ||
return false; | ||
} | ||
/** | ||
* Writes a line into the correct backlog | ||
* @param noAutoCompress Whether auto-compression should be disabled | ||
*/ | ||
write(line, noAutoCompress = false) { | ||
/* istanbul ignore else */ | ||
if (this._compressBacklog && !this._compressBacklog.destroyed) { | ||
// The compress backlog handling also handles the file statistics | ||
this._compressBacklog.write(line); | ||
} | ||
else if (this._writeBacklog && !this._writeBacklog.destroyed) { | ||
this._writeBacklog.write(line); | ||
// Update line statistics | ||
this.updateStatistics(line); | ||
// Either compress or write to the main file, never both | ||
if (!noAutoCompress && this.needToCompress()) { | ||
this.compress(); | ||
} | ||
else { | ||
this._writeBacklog.write(line); | ||
} | ||
} | ||
@@ -246,4 +324,9 @@ else { | ||
async compress() { | ||
if (!this._writeBacklog) | ||
if (!this._writeBacklog || this.compressPromise) | ||
return; | ||
this.compressPromise = deferred_promise_1.createDeferredPromise(); | ||
// Immediately remember the database size or writes while compressing | ||
// will be incorrectly reflected | ||
this._uncompressedSize = this.size; | ||
this._changesSinceLastCompress = 0; | ||
await this.dump(); | ||
@@ -269,2 +352,3 @@ // After dumping, restart the write thread so no duplicate entries get written | ||
while (null !== (line = this._compressBacklog.read())) { | ||
this.updateStatistics(line); | ||
this._writeBacklog.write(line); | ||
@@ -274,7 +358,20 @@ } | ||
this._compressBacklog = undefined; | ||
// If any method is waiting for the compress process, signal it that we're done | ||
this.compressPromise.resolve(); | ||
this.compressPromise = undefined; | ||
} | ||
/** Closes the DB and waits for all data to be written */ | ||
async close() { | ||
var _a; | ||
var _a, _b; | ||
this._isOpen = false; | ||
if (this.compressInterval) | ||
clearInterval(this.compressInterval); | ||
if (this.compressPromise) { | ||
// Wait until any pending compress processes are complete | ||
await this.compressPromise; | ||
} | ||
else if ((_a = this.options.autoCompress) === null || _a === void 0 ? void 0 : _a.onClose) { | ||
// Compress if required | ||
await this.compress(); | ||
} | ||
if (this._writeBacklog) { | ||
@@ -286,3 +383,3 @@ this._closeDBPromise = deferred_promise_1.createDeferredPromise(); | ||
// Disable writing into the dump backlog stream | ||
(_a = this._dumpBacklog) === null || _a === void 0 ? void 0 : _a.end(); | ||
(_b = this._dumpBacklog) === null || _b === void 0 ? void 0 : _b.end(); | ||
this._dumpBacklog = undefined; | ||
@@ -289,0 +386,0 @@ await this._closeDBPromise; |
{ | ||
"name": "@alcalzone/jsonl-db", | ||
"version": "0.5.1", | ||
"version": "1.0.0", | ||
"description": "Simple JSONL-based key-value store", | ||
@@ -5,0 +5,0 @@ "main": "./build/index.js", |
@@ -32,3 +32,3 @@ # jsonl-db | ||
if (db.has("key")) { | ||
result = db.get("key"); | ||
result = db.get("key"); | ||
} | ||
@@ -47,3 +47,3 @@ // ...forEach, keys(), entries(), values(), ... | ||
function reviver(key: string, value: any) { | ||
// MUST return a value. If you don't want to transform `value`, return it. | ||
// MUST return a value. If you don't want to transform `value`, return it. | ||
} | ||
@@ -68,3 +68,3 @@ | ||
After a while, the main db file may contain unnecessary entries. To remove them, use the `compress()` method. | ||
After a while, the main db file may contain unnecessary entries. The raw number of entries can be read using the `uncompressedSize` property. To remove unnecessary entries, use the `compress()` method. | ||
@@ -77,2 +77,16 @@ ```ts | ||
The database can automatically compress the database file under some conditions. To do so, use the `autoCompress` parameter of the constructor options: | ||
```ts | ||
const db = new DB("/path/to/file", { autoCompress: { /* auto compress options */ }}); | ||
``` | ||
The following options exist (all optional) and can be combined: | ||
| Option | Default | Description | | ||
|-----------------|---------|-------------| | ||
| sizeFactor | +Infinity | Compress when `uncompressedSize >= size * sizeFactor` | | ||
| sizeFactorMinimumSize | 0 | Configure the minimum size necessary for auto-compression based on size | | ||
| intervalMs | +Infinity | Compress after a certain time has passed | | ||
| intervalMinChanges | 1 | Configure the minimum count of changes for auto-compression based on time | | ||
| onClose | false | Compress when closing the DB | | ||
| onOpen | false | Compress after opening the DB | | ||
Importing JSON files can be done this way: | ||
@@ -100,2 +114,5 @@ ```ts | ||
### 1.0.0 (2020-04-29) | ||
Added auto-compress functionality | ||
### 0.5.1 (2020-04-28) | ||
@@ -102,0 +119,0 @@ Fix: The main export no longer exports `JsonlDB` as `DB`. |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
43328
28.43%490
38.42%0
-100%141
13.71%