Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@naturalcycles/db-lib

Package Overview
Dependencies
Maintainers
2
Versions
301
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@naturalcycles/db-lib - npm Package Compare versions

Comparing version 9.12.0 to 9.12.1

12

dist/commondao/common.dao.d.ts

@@ -115,5 +115,5 @@ /// <reference types="node" />

* "Streaming" is implemented by buffering incoming rows into **batches**
* (of size opt.batchSize, which defaults to 500),
* and then executing db.saveBatch(batch) with the concurrency
* of opt.batchConcurrency (which defaults to 16).
* (of size opt.chunkSize, which defaults to 500),
* and then executing db.saveBatch(chunk) with the concurrency
* of opt.chunkConcurrency (which defaults to 16).
*/

@@ -127,5 +127,5 @@ streamSaveTransform(opt?: CommonDaoStreamSaveOptions<DBM>): Transform[];

/**
* Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
* `deleteByIds` for each batch concurrently (infinite concurrency).
* This is expected to be more memory-efficient way of deleting big numbers of rows.
* Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
* `deleteByIds` for each chunk concurrently (infinite concurrency).
* This is expected to be more memory-efficient way of deleting large number of rows.
*/

@@ -132,0 +132,0 @@ deleteByQuery(q: DBQuery<DBM>, opt?: CommonDaoStreamDeleteOptions<DBM>): Promise<number>;

@@ -700,5 +700,5 @@ "use strict";

* "Streaming" is implemented by buffering incoming rows into **batches**
* (of size opt.batchSize, which defaults to 500),
* and then executing db.saveBatch(batch) with the concurrency
* of opt.batchConcurrency (which defaults to 16).
* (of size opt.chunkSize, which defaults to 500),
* and then executing db.saveBatch(chunk) with the concurrency
* of opt.chunkConcurrency (which defaults to 16).
*/

@@ -715,3 +715,3 @@ streamSaveTransform(opt = {}) {

const { beforeSave } = this.cfg.hooks;
const { batchSize = 500, batchConcurrency = 16, errorMode } = opt;
const { chunkSize = 500, chunkConcurrency = 16, errorMode } = opt;
return [

@@ -730,3 +730,3 @@ (0, nodejs_lib_1.transformMap)(async (bm) => {

}),
(0, nodejs_lib_1.transformBuffer)({ batchSize }),
(0, nodejs_lib_1.transformChunk)({ chunkSize }),
(0, nodejs_lib_1.transformMap)(async (batch) => {

@@ -739,3 +739,3 @@ await this.cfg.db.saveBatch(table, batch, {

}, {
concurrency: batchConcurrency,
concurrency: chunkConcurrency,
errorMode,

@@ -775,5 +775,5 @@ flattenArrayOutput: true,

/**
* Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
* `deleteByIds` for each batch concurrently (infinite concurrency).
* This is expected to be more memory-efficient way of deleting big numbers of rows.
* Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
* `deleteByIds` for each chunk concurrently (infinite concurrency).
* This is expected to be more memory-efficient way of deleting large number of rows.
*/

@@ -787,4 +787,4 @@ async deleteByQuery(q, opt = {}) {

let deleted = 0;
if (opt.batchSize) {
const { batchSize, batchConcurrency = 16 } = opt;
if (opt.chunkSize) {
const { chunkSize, chunkConcurrency = 16 } = opt;
await (0, nodejs_lib_1._pipeline)([

@@ -795,3 +795,3 @@ this.cfg.db.streamQuery(q.select(['id']), opt),

}),
(0, nodejs_lib_1.transformBuffer)({ batchSize }),
(0, nodejs_lib_1.transformChunk)({ chunkSize }),
(0, nodejs_lib_1.transformMap)(async (ids) => {

@@ -801,3 +801,3 @@ deleted += await this.cfg.db.deleteByQuery(dbQuery_1.DBQuery.create(q.table).filterIn('id', ids), opt);

predicate: js_lib_1._passthroughPredicate,
concurrency: batchConcurrency,
concurrency: chunkConcurrency,
}),

@@ -808,3 +808,3 @@ // LogProgress should be AFTER the mapper, to be able to report correct stats

logEvery: 2, // 500 * 2 === 1000
batchSize,
chunkSize,
...opt,

@@ -811,0 +811,0 @@ }),

@@ -253,13 +253,13 @@ import { BaseDBEntity, CommonLogger, ErrorMode, Promisable, ZodError, ZodSchema } from '@naturalcycles/js-lib';

* Applicable to some of stream operations, e.g deleteByQuery.
* If set - `deleteByQuery` won't execute it "all at once", but in batches.
* If set - `deleteByQuery` won't execute it "all at once", but in batches (chunks).
*
* Defaults to undefined, so the operation is executed "all at once".
*/
batchSize?: number;
chunkSize?: number;
/**
* When batchSize is set - this option controls how many batches to run concurrently.
* When chunkSize is set - this option controls how many chunks to run concurrently.
* Defaults to 16, "the magic number of JavaScript concurrency".
*/
batchConcurrency?: number;
chunkConcurrency?: number;
}
export type CommonDaoCreateOptions = CommonDBCreateOptions;

@@ -24,3 +24,3 @@ import { AsyncMapper, ErrorMode } from '@naturalcycles/js-lib';

*/
batchSize?: number;
chunkSize?: number;
/**

@@ -27,0 +27,0 @@ * @default ErrorMode.SUPPRESS

@@ -14,3 +14,3 @@ "use strict";

async function dbPipelineCopy(opt) {
const { batchSize = 100, dbInput, dbOutput, concurrency = 16, limit = 0, sinceUpdated, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, } = opt;
const { chunkSize = 100, dbInput, dbOutput, concurrency = 16, limit = 0, sinceUpdated, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, } = opt;
let { tables } = opt;

@@ -46,3 +46,3 @@ const sinceUpdatedStr = sinceUpdated ? ' since ' + (0, nodejs_lib_1.grey)((0, js_lib_1.localTime)(sinceUpdated).toPretty()) : '';

(0, nodejs_lib_1.transformTap)(() => rows++),
(0, nodejs_lib_1.transformBuffer)({ batchSize }),
(0, nodejs_lib_1.transformChunk)({ chunkSize }),
(0, nodejs_lib_1.writableForEach)(async (dbms) => {

@@ -49,0 +49,0 @@ await dbOutput.saveBatch(table, dbms, saveOptions);

@@ -17,3 +17,3 @@ "use strict";

async function dbPipelineRestore(opt) {
const { db, concurrency = 16, batchSize = 100, limit, sinceUpdated, inputDirPath, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, recreateTables = false, } = opt;
const { db, concurrency = 16, chunkSize = 100, limit, sinceUpdated, inputDirPath, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, recreateTables = false, } = opt;
const strict = errorMode !== js_lib_1.ErrorMode.SUPPRESS;

@@ -91,3 +91,3 @@ const onlyTables = opt.tables && new Set(opt.tables);

}),
(0, nodejs_lib_1.transformBuffer)({ batchSize }),
(0, nodejs_lib_1.transformChunk)({ chunkSize }),
(0, nodejs_lib_1.writableForEach)(async (dbms) => {

@@ -94,0 +94,0 @@ await db.saveBatch(table, dbms, saveOptions);

@@ -43,3 +43,3 @@ {

},
"version": "9.12.0",
"version": "9.12.1",
"description": "Lowest Common Denominator API to supported Databases",

@@ -46,0 +46,0 @@ "keywords": [

@@ -316,15 +316,15 @@ import {

* Applicable to some of stream operations, e.g deleteByQuery.
* If set - `deleteByQuery` won't execute it "all at once", but in batches.
* If set - `deleteByQuery` won't execute it "all at once", but in batches (chunks).
*
* Defaults to undefined, so the operation is executed "all at once".
*/
batchSize?: number
chunkSize?: number
/**
* When batchSize is set - this option controls how many batches to run concurrently.
* When chunkSize is set - this option controls how many chunks to run concurrently.
* Defaults to 16, "the magic number of JavaScript concurrency".
*/
batchConcurrency?: number
chunkConcurrency?: number
}
export type CommonDaoCreateOptions = CommonDBCreateOptions

@@ -40,3 +40,3 @@ import { Transform } from 'node:stream'

stringId,
transformBuffer,
transformChunk,
transformLogProgress,

@@ -922,5 +922,5 @@ transformMap,

* "Streaming" is implemented by buffering incoming rows into **batches**
* (of size opt.batchSize, which defaults to 500),
* and then executing db.saveBatch(batch) with the concurrency
* of opt.batchConcurrency (which defaults to 16).
* (of size opt.chunkSize, which defaults to 500),
* and then executing db.saveBatch(chunk) with the concurrency
* of opt.chunkConcurrency (which defaults to 16).
*/

@@ -941,3 +941,3 @@ streamSaveTransform(opt: CommonDaoStreamSaveOptions<DBM> = {}): Transform[] {

const { batchSize = 500, batchConcurrency = 16, errorMode } = opt
const { chunkSize = 500, chunkConcurrency = 16, errorMode } = opt

@@ -962,3 +962,3 @@ return [

),
transformBuffer<DBM>({ batchSize }),
transformChunk<DBM>({ chunkSize }),
transformMap<DBM[], DBM[]>(

@@ -973,3 +973,3 @@ async batch => {

{
concurrency: batchConcurrency,
concurrency: chunkConcurrency,
errorMode,

@@ -1011,5 +1011,5 @@ flattenArrayOutput: true,

/**
* Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
* `deleteByIds` for each batch concurrently (infinite concurrency).
* This is expected to be more memory-efficient way of deleting big numbers of rows.
* Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
* `deleteByIds` for each chunk concurrently (infinite concurrency).
* This is expected to be more memory-efficient way of deleting large number of rows.
*/

@@ -1027,4 +1027,4 @@ async deleteByQuery(

if (opt.batchSize) {
const { batchSize, batchConcurrency = 16 } = opt
if (opt.chunkSize) {
const { chunkSize, chunkConcurrency = 16 } = opt

@@ -1036,3 +1036,3 @@ await _pipeline([

}),
transformBuffer<string>({ batchSize }),
transformChunk<string>({ chunkSize }),
transformMap<string[], void>(

@@ -1047,3 +1047,3 @@ async ids => {

predicate: _passthroughPredicate,
concurrency: batchConcurrency,
concurrency: chunkConcurrency,
},

@@ -1055,3 +1055,3 @@ ),

logEvery: 2, // 500 * 2 === 1000
batchSize,
chunkSize,
...opt,

@@ -1058,0 +1058,0 @@ }),

@@ -11,3 +11,3 @@ import {

NDJsonStats,
transformBuffer,
transformChunk,
transformLogProgress,

@@ -51,3 +51,3 @@ TransformLogProgressOptions,

*/
batchSize?: number
chunkSize?: number

@@ -103,3 +103,3 @@ /**

const {
batchSize = 100,
chunkSize = 100,
dbInput,

@@ -159,3 +159,3 @@ dbOutput,

transformTap(() => rows++),
transformBuffer({ batchSize }),
transformChunk({ chunkSize }),
writableForEach(async dbms => {

@@ -162,0 +162,0 @@ await dbOutput.saveBatch(table, dbms, saveOptions)

@@ -16,3 +16,3 @@ import fs from 'node:fs'

NDJsonStats,
transformBuffer,
transformChunk,
transformFilterSync,

@@ -129,3 +129,3 @@ transformJsonParse,

concurrency = 16,
batchSize = 100,
chunkSize = 100,
limit,

@@ -229,3 +229,3 @@ sinceUpdated,

}),
transformBuffer({ batchSize }),
transformChunk({ chunkSize }),
writableForEach(async dbms => {

@@ -232,0 +232,0 @@ await db.saveBatch(table, dbms, saveOptions)

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc