@alcalzone/jsonl-db
Advanced tools
@@ -133,2 +133,5 @@ export interface JsonlDBOptions<V> { | ||
private entryToLine; | ||
private makeLazyClear; | ||
private makeLazyDelete; | ||
private makeLazyWrite; | ||
/** | ||
@@ -135,0 +138,0 @@ * Saves a compressed copy of the DB into the given path. |
@@ -30,2 +30,8 @@ "use strict"; | ||
const stream = __importStar(require("stream")); | ||
var Operation; | ||
(function (Operation) { | ||
Operation[Operation["Clear"] = 0] = "Clear"; | ||
Operation[Operation["Write"] = 1] = "Write"; | ||
Operation[Operation["Delete"] = 2] = "Delete"; | ||
})(Operation || (Operation = {})); | ||
/** | ||
@@ -327,3 +333,3 @@ * fsync on a directory ensures there are no rename operations etc. which haven't been persisted to disk. | ||
this._db.clear(); | ||
this.write(""); | ||
this.write(this.makeLazyClear()); | ||
} | ||
@@ -337,3 +343,3 @@ delete(key) { | ||
// Something was deleted | ||
this.write(this.entryToLine(key)); | ||
this.write(this.makeLazyDelete(key)); | ||
} | ||
@@ -347,3 +353,3 @@ return ret; | ||
this._db.set(key, value); | ||
this.write(this.entryToLine(key, value)); | ||
this.write(this.makeLazyWrite(key, value)); | ||
return this; | ||
@@ -369,3 +375,3 @@ } | ||
this._db.set(key, value); | ||
this.write(this.entryToLine(key, value), true); | ||
this.write(this.makeLazyWrite(key, value), true); | ||
} | ||
@@ -379,4 +385,4 @@ } | ||
} | ||
updateStatistics(command) { | ||
if (command === "") { | ||
updateStatistics(entry) { | ||
if (entry.op === Operation.Clear) { | ||
this._uncompressedSize = 0; | ||
@@ -443,3 +449,3 @@ } | ||
*/ | ||
write(line, noAutoCompress = false) { | ||
write(lazy, noAutoCompress = false) { | ||
var _a; | ||
@@ -449,7 +455,7 @@ /* istanbul ignore else */ | ||
// The compress backlog handling also handles the file statistics | ||
this._compressBacklog.write(line); | ||
this._compressBacklog.write(lazy); | ||
} | ||
else if (this._writeBacklog && !this._writeBacklog.destroyed) { | ||
// Update line statistics | ||
this.updateStatistics(line); | ||
this.updateStatistics(lazy); | ||
// Either compress or write to the main file, never both | ||
@@ -460,3 +466,3 @@ if (!noAutoCompress && this.needToCompress()) { | ||
else { | ||
this._writeBacklog.write(line); | ||
this._writeBacklog.write(lazy); | ||
// If this is a throttled stream, uncork it as soon as the write | ||
@@ -476,3 +482,3 @@ // buffer is larger than configured | ||
if (this._dumpBacklog && !this._dumpBacklog.destroyed) { | ||
this._dumpBacklog.write(line); | ||
this._dumpBacklog.write(lazy); | ||
} | ||
@@ -492,2 +498,36 @@ } | ||
} | ||
makeLazyClear() { | ||
return { | ||
op: Operation.Clear, | ||
serialize: | ||
/* istanbul ignore next - this is impossible to test since it requires exact timing */ () => "", | ||
}; | ||
} | ||
makeLazyDelete(key) { | ||
let serialized; | ||
return { | ||
op: Operation.Delete, | ||
key, | ||
serialize: () => { | ||
if (serialized == undefined) { | ||
serialized = this.entryToLine(key); | ||
} | ||
return serialized; | ||
}, | ||
}; | ||
} | ||
makeLazyWrite(key, value) { | ||
let serialized; | ||
return { | ||
op: Operation.Write, | ||
key, | ||
value, | ||
serialize: () => { | ||
if (serialized == undefined) { | ||
serialized = this.entryToLine(key, value); | ||
} | ||
return serialized; | ||
}, | ||
}; | ||
} | ||
/** | ||
@@ -508,8 +548,10 @@ * Saves a compressed copy of the DB into the given path. | ||
for (const [key, value] of entries) { | ||
await fs.appendFile(this._dumpFd, this.entryToLine(key, value) + "\n"); | ||
await fs.appendFile(this._dumpFd, | ||
// No need to serialize lazily here | ||
this.entryToLine(key, value) + "\n"); | ||
} | ||
// In case there is any data in the backlog stream, persist that too | ||
let line; | ||
while (null !== (line = this._dumpBacklog.read())) { | ||
await fs.appendFile(this._dumpFd, line + "\n"); | ||
let lazy; | ||
while (null !== (lazy = this._dumpBacklog.read())) { | ||
await fs.appendFile(this._dumpFd, lazy.serialize() + "\n"); | ||
} | ||
@@ -535,13 +577,31 @@ this._dumpBacklog.destroy(); | ||
(_a = this._openPromise) === null || _a === void 0 ? void 0 : _a.resolve(); | ||
// The chunk map is used to buffer all entries that are currently waiting in line | ||
// so we avoid serializing redundant entries. When the write backlog is throttled, | ||
// the chunk map will only be used for a short time. | ||
const chunk = new Map(); | ||
for await (const action of this | ||
._writeBacklog) { | ||
if (action === "") { | ||
// Since we opened the file in append mode, we cannot truncate | ||
// therefore close and open in write mode again | ||
await fs.close(this._fd); | ||
this._fd = await fs.open(this.filename, "w+"); | ||
if (action.op === Operation.Clear) { | ||
chunk.clear(); | ||
chunk.set("", action); | ||
} | ||
else { | ||
await fs.appendFile(this._fd, action + "\n"); | ||
// Only remember the last entry for each key | ||
chunk.set(action.key, action); | ||
} | ||
// When the backlog has been drained, perform the necessary write actions | ||
if (this._writeBacklog.readableLength === 0) { | ||
for (const entry of chunk.values()) { | ||
if (entry.op === Operation.Clear) { | ||
// Since we opened the file in append mode, we cannot truncate | ||
// therefore close and open in write mode again | ||
await fs.close(this._fd); | ||
this._fd = await fs.open(this.filename, "w+"); | ||
} | ||
else { | ||
await fs.appendFile(this._fd, entry.serialize() + "\n"); | ||
} | ||
} | ||
chunk.clear(); | ||
} | ||
// When this is a throttled stream, auto-cork it when it was drained | ||
@@ -600,6 +660,6 @@ if (this._writeBacklog.readableLength === 0 && this._isOpen) { | ||
// In case there is any data in the backlog stream, persist that too | ||
let line; | ||
while (null !== (line = this._compressBacklog.read())) { | ||
this.updateStatistics(line); | ||
this._writeBacklog.write(line); | ||
let lazy; | ||
while (null !== (lazy = this._compressBacklog.read())) { | ||
this.updateStatistics(lazy); | ||
this._writeBacklog.write(lazy); | ||
} | ||
@@ -606,0 +666,0 @@ this._compressBacklog.destroy(); |
{ | ||
"name": "@alcalzone/jsonl-db", | ||
"version": "2.3.0", | ||
"version": "2.4.0", | ||
"description": "Simple JSONL-based key-value store", | ||
@@ -48,3 +48,3 @@ "main": "./build/index.js", | ||
"@types/mock-fs": "^4.13.1", | ||
"@types/node": "^16.11.11", | ||
"@types/node": "^12.20.39", | ||
"@types/proper-lockfile": "^4.1.2", | ||
@@ -86,3 +86,4 @@ "@typescript-eslint/eslint-plugin": "^4.33.0", | ||
"release": "release-script", | ||
"prepare": "husky install" | ||
"prepare": "husky install", | ||
"perf": "ts-node test/perf.ts" | ||
}, | ||
@@ -89,0 +90,0 @@ "config": { |
@@ -145,2 +145,5 @@ # jsonl-db | ||
--> | ||
### 2.4.0 (2021-12-27) | ||
* Stringifying the individual lines now happens lazily and only when actually necessary, increasing the throughput by 30...50x. | ||
### 2.3.0 (2021-12-19) | ||
@@ -147,0 +150,0 @@ * Add the ability to dump the database to a different location |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
69656
5.87%871
7.8%224
1.36%