New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@alcalzone/jsonl-db

Package Overview
Dependencies
Maintainers
1
Versions
35
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@alcalzone/jsonl-db - npm Package Compare versions

Comparing version 1.2.5 to 1.3.0

0

build/index.d.ts
export { FsWriteOptions, JsonlDB, JsonlDBOptions } from "./lib/db";
//# sourceMappingURL=index.d.ts.map

@@ -0,0 +0,0 @@ "use strict";

6

build/lib/db.d.ts

@@ -73,2 +73,3 @@ export interface JsonlDBOptions<V> {

readonly dumpFilename: string;
readonly backupFilename: string;
private options;

@@ -100,2 +101,7 @@ private _db;

open(): Promise<void>;
/**
* Makes sure that there are no remains of a previous broken compress attempt and restores
* a DB backup if it exists.
*/
private tryRecoverDBFiles;
/** Reads a line and extracts the key without doing a full-blown JSON.parse() */

@@ -102,0 +108,0 @@ private parseKey;

192

build/lib/db.js
"use strict";
var __asyncValues = (this && this.__asyncValues) || function (o) {
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
var m = o[Symbol.asyncIterator], i;
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
__setModuleDefault(result, mod);
return result;
};

@@ -13,7 +25,22 @@ Object.defineProperty(exports, "__esModule", { value: true });

const objects_1 = require("alcalzone-shared/objects");
const fs = require("fs-extra");
const path = require("path");
const lockfile = require("proper-lockfile");
const readline = require("readline");
const stream = require("stream");
const fs = __importStar(require("fs-extra"));
const path = __importStar(require("path"));
const lockfile = __importStar(require("proper-lockfile"));
const readline = __importStar(require("readline"));
const stream = __importStar(require("stream"));
/**
* fsync on a directory ensures there are no rename operations etc. which haven't been persisted to disk.
*/
async function fsyncDir(dirname) {
// Windows will cause `EPERM: operation not permitted, fsync`
// for directories, so don't do this
/* istanbul ignore else */
if (process.platform === "win32")
return;
else {
const fd = await fs.open(dirname, "r");
await fs.fsync(fd);
await fs.close(fd);
}
}
class JsonlDB {

@@ -29,2 +56,3 @@ constructor(filename, options = {}) {

this.dumpFilename = this.filename + ".dump";
this.backupFilename = this.filename + ".bak";
this.options = options;

@@ -102,2 +130,4 @@ // Bind all map properties we can use directly

}
// If the application crashed previously, try to recover from it
await this.tryRecoverDBFiles();
this._fd = await fs.open(this.filename, "a+");

@@ -180,2 +210,83 @@ const readStream = fs.createReadStream(this.filename, {

}
/**
* Makes sure that there are no remains of a previous broken compress attempt and restores
* a DB backup if it exists.
*/
async tryRecoverDBFiles() {
// During the compression, the following sequence of events happens:
// 1. A .jsonl.dump file gets written with a compressed copy of the data
// 2. Files get renamed: .jsonl -> .jsonl.bak, .jsonl.dump -> .jsonl
// 3. .bak file gets removed
// 4. Buffered data gets written to the .jsonl file
// This means if the .jsonl file is absent or truncated, we should be able to pick either the .dump or the .bak file
// and restore the .jsonl file from it
let dbFileIsOK = false;
try {
const dbFileStats = await fs.stat(this.filename);
dbFileIsOK = dbFileStats.isFile() && dbFileStats.size > 0;
}
catch {
// ignore
}
// Prefer the DB file if it exists, remove the others in case they exist
if (dbFileIsOK) {
try {
await fs.remove(this.backupFilename);
}
catch {
// ignore
}
try {
await fs.remove(this.dumpFilename);
}
catch {
// ignore
}
return;
}
// The backup file should have complete data - the dump file could be subject to an incomplete write
let bakFileIsOK = false;
try {
const bakFileStats = await fs.stat(this.backupFilename);
bakFileIsOK = bakFileStats.isFile() && bakFileStats.size > 0;
}
catch {
// ignore
}
if (bakFileIsOK) {
// Overwrite the broken db file with it and delete the dump file
await fs.move(this.backupFilename, this.filename, {
overwrite: true,
});
try {
await fs.remove(this.dumpFilename);
}
catch {
// ignore
}
return;
}
// Try the dump file as a last attempt
let dumpFileIsOK = false;
try {
const dumpFileStats = await fs.stat(this.dumpFilename);
dumpFileIsOK = dumpFileStats.isFile() && dumpFileStats.size > 0;
}
catch {
// ignore
}
if (dumpFileIsOK) {
// Overwrite the broken db file with the dump file and delete the backup file
await fs.move(this.dumpFilename, this.filename, {
overwrite: true,
});
try {
await fs.remove(this.backupFilename);
}
catch {
// ignore
}
return;
}
}
/** Reads a line and extracts the key without doing a full-blown JSON.parse() */

@@ -396,2 +507,3 @@ parseKey(line) {

// Close the file and resolve the close promise
await fs.fsync(this._dumpFd); // The dump should be on disk ASAP, so we fsync
await fs.close(this._dumpFd);

@@ -403,4 +515,3 @@ this._dumpFd = undefined;

async writeThread() {
var e_1, _a;
var _b;
var _a;
// This must be called before any awaits

@@ -412,32 +523,23 @@ this._writeBacklog = new stream.PassThrough({ objectMode: true });

this._fd = await fs.open(this.filename, "a+");
(_b = this._openPromise) === null || _b === void 0 ? void 0 : _b.resolve();
try {
for (var _c = __asyncValues(this
._writeBacklog), _d; _d = await _c.next(), !_d.done;) {
const action = _d.value;
if (action === "") {
// Since we opened the file in append mode, we cannot truncate
// therefore close and open in write mode again
await fs.close(this._fd);
this._fd = await fs.open(this.filename, "w+");
}
else {
await fs.appendFile(this._fd, action + "\n");
}
// When this is a throttled stream, auto-cork it when it was drained
if (this._writeBacklog.readableLength === 0 && this._isOpen) {
this.autoCork();
}
(_a = this._openPromise) === null || _a === void 0 ? void 0 : _a.resolve();
for await (const action of this
._writeBacklog) {
if (action === "") {
// Since we opened the file in append mode, we cannot truncate
// therefore close and open in write mode again
await fs.close(this._fd);
this._fd = await fs.open(this.filename, "w+");
}
}
catch (e_1_1) { e_1 = { error: e_1_1 }; }
finally {
try {
if (_d && !_d.done && (_a = _c.return)) await _a.call(_c);
else {
await fs.appendFile(this._fd, action + "\n");
}
finally { if (e_1) throw e_1.error; }
// When this is a throttled stream, auto-cork it when it was drained
if (this._writeBacklog.readableLength === 0 && this._isOpen) {
this.autoCork();
}
}
this._writeBacklog.destroy();
// The write backlog was closed, this means that the DB is being closed
// close the file and resolve the close promise
// Flush the file contents to disk, close the file and resolve the close promise
await fs.fsync(this._fd);
await fs.close(this._fd);

@@ -460,2 +562,6 @@ this._writePromise.resolve();

this.uncork();
// Replace the aof file. To make sure that the data fully reaches the storage, we employ the following strategy:
// 1. Ensure there are no pending rename operations or file creations
await fsyncDir(path.dirname(this.filename));
// 2. Ensure the db file is fully written to disk. The write thread will fsync before closing
if (this._writeBacklog) {

@@ -466,8 +572,10 @@ this._writeBacklog.end();

}
// Replace the aof file
await fs.move(this.filename, this.filename + ".bak", {
// 3. Create backup, rename the dump file, then ensure the directory entries are written to disk
await fs.move(this.filename, this.backupFilename, {
overwrite: true,
});
await fs.move(this.dumpFilename, this.filename, { overwrite: true });
await fs.unlink(this.filename + ".bak");
await fsyncDir(path.dirname(this.filename));
// 4. Delete backup
await fs.unlink(this.backupFilename);
if (this._isOpen) {

@@ -540,3 +648,3 @@ // Start the write thread again

}
catch (_b) {
catch {
// whatever - just don't crash

@@ -543,0 +651,0 @@ }

{
"name": "@alcalzone/jsonl-db",
"version": "1.2.5",
"version": "1.3.0",
"description": "Simple JSONL-based key-value store",

@@ -5,0 +5,0 @@ "main": "./build/index.js",

@@ -136,2 +136,6 @@ # jsonl-db

-->
### 1.3.0 (2021-06-22)
* When opening the DB, recover from crashes that happened while compressing the DB
* Ensure that the DB files are flushed to disk when closing or renaming files
### 1.2.5 (2021-05-29)

@@ -138,0 +142,0 @@ Prevent opening one DB file in multiple instances of the DB using lockfiles

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc