Socket
Socket
Sign inDemoInstall

mongodb

Package Overview
Dependencies
Maintainers
8
Versions
560
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

mongodb - npm Package Compare versions

Comparing version 6.9.0-dev.20241001.sha.85f7dcf9 to 6.9.0-dev.20241002.sha.d56e235c

64

lib/cmap/commands.js

@@ -271,5 +271,49 @@ "use strict";

class DocumentSequence {
constructor(documents) {
this.documents = documents;
/**
* Create a new document sequence for the provided field.
* @param field - The field it will replace.
*/
constructor(field, documents) {
this.field = field;
this.documents = [];
this.chunks = [];
this.serializedDocumentsLength = 0;
// Document sequences starts with type 1 at the first byte.
// Field strings must always be UTF-8.
const buffer = Buffer.allocUnsafe(1 + 4 + this.field.length + 1);
buffer[0] = 1;
// Third part is the field name at offset 5 with trailing null byte.
encodeUTF8Into(buffer, `${this.field}\0`, 5);
this.chunks.push(buffer);
this.header = buffer;
if (documents) {
for (const doc of documents) {
this.push(doc, BSON.serialize(doc));
}
}
}
/**
* Push a document to the document sequence. Will serialize the document
* as well and return the current serialized length of all documents.
* @param document - The document to add.
* @param buffer - The serialized document in raw BSON.
* @returns The new total document sequence length.
*/
push(document, buffer) {
this.serializedDocumentsLength += buffer.length;
// Push the document.
this.documents.push(document);
// Push the document raw bson.
this.chunks.push(buffer);
// Write the new length.
this.header?.writeInt32LE(4 + this.field.length + 1 + this.serializedDocumentsLength, 1);
return this.serializedDocumentsLength + this.header.length;
}
/**
* Get the fully serialized bytes for the document sequence section.
* @returns The section bytes.
*/
toBin() {
return Buffer.concat(this.chunks);
}
}

@@ -356,17 +400,3 @@ exports.DocumentSequence = DocumentSequence;

if (value instanceof DocumentSequence) {
// Document sequences starts with type 1 at the first byte.
const buffer = Buffer.allocUnsafe(1 + 4 + key.length + 1);
buffer[0] = 1;
// Third part is the field name at offset 5 with trailing null byte.
encodeUTF8Into(buffer, `${key}\0`, 5);
chunks.push(buffer);
// Fourth part are the documents' bytes.
let docsLength = 0;
for (const doc of value.documents) {
const docBson = this.serializeBson(doc);
docsLength += docBson.length;
chunks.push(docBson);
}
// Second part of the sequence is the length at offset 1;
buffer.writeInt32LE(4 + key.length + 1 + docsLength, 1);
chunks.push(value.toBin());
// Why are we removing the field from the command? This is because it needs to be

@@ -373,0 +403,0 @@ // removed in the OP_MSG request first section, and DocumentSequence is not a

@@ -16,5 +16,5 @@ "use strict";

/** @internal */
constructor(client, command, options = {}) {
constructor(client, commandBuilder, options = {}) {
super(client, new utils_1.MongoDBNamespace('admin', '$cmd'), options);
this.command = command;
this.commandBuilder = commandBuilder;
this.clientBulkWriteOptions = options;

@@ -29,8 +29,14 @@ }

return this.cursorResponse;
throw new error_1.MongoBulkWriteCursorError('No client bulk write cursor response returned from the server.');
throw new error_1.MongoClientBulkWriteCursorError('No client bulk write cursor response returned from the server.');
}
/**
* Get the last set of operations the cursor executed.
*/
get operations() {
return this.commandBuilder.lastOperations;
}
clone() {
const clonedOptions = (0, utils_1.mergeOptions)({}, this.clientBulkWriteOptions);
delete clonedOptions.session;
return new ClientBulkWriteCursor(this.client, this.command, {
return new ClientBulkWriteCursor(this.client, this.commandBuilder, {
...clonedOptions

@@ -41,3 +47,3 @@ });

async _initialize(session) {
const clientBulkWriteOperation = new client_bulk_write_1.ClientBulkWriteOperation(this.command, {
const clientBulkWriteOperation = new client_bulk_write_1.ClientBulkWriteOperation(this.commandBuilder, {
...this.clientBulkWriteOptions,

@@ -44,0 +50,0 @@ ...this.cursorOptions,

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.MongoWriteConcernError = exports.MongoServerSelectionError = exports.MongoSystemError = exports.MongoMissingDependencyError = exports.MongoMissingCredentialsError = exports.MongoCompatibilityError = exports.MongoInvalidArgumentError = exports.MongoParseError = exports.MongoNetworkTimeoutError = exports.MongoNetworkError = exports.MongoTopologyClosedError = exports.MongoCursorExhaustedError = exports.MongoServerClosedError = exports.MongoCursorInUseError = exports.MongoUnexpectedServerResponseError = exports.MongoGridFSChunkError = exports.MongoGridFSStreamError = exports.MongoTailableCursorError = exports.MongoChangeStreamError = exports.MongoBulkWriteCursorError = exports.MongoGCPError = exports.MongoAzureError = exports.MongoOIDCError = exports.MongoAWSError = exports.MongoKerberosError = exports.MongoExpiredSessionError = exports.MongoTransactionError = exports.MongoNotConnectedError = exports.MongoDecompressionError = exports.MongoBatchReExecutionError = exports.MongoRuntimeError = exports.MongoAPIError = exports.MongoDriverError = exports.MongoServerError = exports.MongoError = exports.MongoErrorLabel = exports.GET_MORE_RESUMABLE_CODES = exports.MONGODB_ERROR_CODES = exports.NODE_IS_RECOVERING_ERROR_MESSAGE = exports.LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = exports.LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = void 0;
exports.MongoWriteConcernError = exports.MongoServerSelectionError = exports.MongoSystemError = exports.MongoMissingDependencyError = exports.MongoMissingCredentialsError = exports.MongoCompatibilityError = exports.MongoInvalidArgumentError = exports.MongoParseError = exports.MongoNetworkTimeoutError = exports.MongoNetworkError = exports.MongoTopologyClosedError = exports.MongoCursorExhaustedError = exports.MongoServerClosedError = exports.MongoCursorInUseError = exports.MongoUnexpectedServerResponseError = exports.MongoGridFSChunkError = exports.MongoGridFSStreamError = exports.MongoTailableCursorError = exports.MongoChangeStreamError = exports.MongoClientBulkWriteExecutionError = exports.MongoClientBulkWriteCursorError = exports.MongoGCPError = exports.MongoAzureError = exports.MongoOIDCError = exports.MongoAWSError = exports.MongoKerberosError = exports.MongoExpiredSessionError = exports.MongoTransactionError = exports.MongoNotConnectedError = exports.MongoDecompressionError = exports.MongoBatchReExecutionError = exports.MongoRuntimeError = exports.MongoAPIError = exports.MongoDriverError = exports.MongoServerError = exports.MongoError = exports.MongoErrorLabel = exports.GET_MORE_RESUMABLE_CODES = exports.MONGODB_ERROR_CODES = exports.NODE_IS_RECOVERING_ERROR_MESSAGE = exports.LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = exports.LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = void 0;
exports.isNetworkErrorBeforeHandshake = isNetworkErrorBeforeHandshake;

@@ -559,3 +559,3 @@ exports.needsRetryableWriteLabel = needsRetryableWriteLabel;

*/
class MongoBulkWriteCursorError extends MongoRuntimeError {
class MongoClientBulkWriteCursorError extends MongoRuntimeError {
/**

@@ -576,7 +576,33 @@ * **Do not use this constructor!**

get name() {
return 'MongoBulkWriteCursorError';
return 'MongoClientBulkWriteCursorError';
}
}
exports.MongoBulkWriteCursorError = MongoBulkWriteCursorError;
exports.MongoClientBulkWriteCursorError = MongoClientBulkWriteCursorError;
/**
* An error indicating that an error occurred on the client when executing a client bulk write.
*
* @public
* @category Error
*/
class MongoClientBulkWriteExecutionError extends MongoRuntimeError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message) {
super(message);
}
get name() {
return 'MongoClientBulkWriteExecutionError';
}
}
exports.MongoClientBulkWriteExecutionError = MongoClientBulkWriteExecutionError;
/**
* An error generated when a ChangeStream operation fails to execute.

@@ -583,0 +609,0 @@ *

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.MongoTailableCursorError = exports.MongoSystemError = exports.MongoServerSelectionError = exports.MongoServerError = exports.MongoServerClosedError = exports.MongoRuntimeError = exports.MongoParseError = exports.MongoOIDCError = exports.MongoNotConnectedError = exports.MongoNetworkTimeoutError = exports.MongoNetworkError = exports.MongoMissingDependencyError = exports.MongoMissingCredentialsError = exports.MongoKerberosError = exports.MongoInvalidArgumentError = exports.MongoGridFSStreamError = exports.MongoGridFSChunkError = exports.MongoGCPError = exports.MongoExpiredSessionError = exports.MongoError = exports.MongoDriverError = exports.MongoDecompressionError = exports.MongoCursorInUseError = exports.MongoCursorExhaustedError = exports.MongoCompatibilityError = exports.MongoChangeStreamError = exports.MongoBulkWriteCursorError = exports.MongoBatchReExecutionError = exports.MongoAzureError = exports.MongoAWSError = exports.MongoAPIError = exports.ChangeStreamCursor = exports.ClientEncryption = exports.MongoBulkWriteError = exports.UUID = exports.Timestamp = exports.ObjectId = exports.MinKey = exports.MaxKey = exports.Long = exports.Int32 = exports.Double = exports.Decimal128 = exports.DBRef = exports.Code = exports.BSONType = exports.BSONSymbol = exports.BSONRegExp = exports.Binary = exports.BSON = void 0;
exports.ConnectionPoolClearedEvent = exports.ConnectionCreatedEvent = exports.ConnectionClosedEvent = exports.ConnectionCheckOutStartedEvent = exports.ConnectionCheckOutFailedEvent = exports.ConnectionCheckedOutEvent = exports.ConnectionCheckedInEvent = exports.CommandSucceededEvent = exports.CommandStartedEvent = exports.CommandFailedEvent = exports.WriteConcern = exports.ReadPreference = exports.ReadConcern = exports.TopologyType = exports.ServerType = exports.ReadPreferenceMode = exports.ReadConcernLevel = exports.ProfilingLevel = exports.ReturnDocument = exports.ServerApiVersion = exports.ExplainVerbosity = exports.MongoErrorLabel = exports.CURSOR_FLAGS = exports.Compressor = exports.AuthMechanism = exports.GSSAPICanonicalizationValue = exports.AutoEncryptionLoggerLevel = exports.BatchType = exports.UnorderedBulkOperation = exports.OrderedBulkOperation = exports.MongoClient = exports.ListIndexesCursor = exports.ListCollectionsCursor = exports.GridFSBucketWriteStream = exports.GridFSBucketReadStream = exports.GridFSBucket = exports.FindCursor = exports.Db = exports.Collection = exports.ClientSession = exports.ChangeStream = exports.CancellationToken = exports.AggregationCursor = exports.Admin = exports.AbstractCursor = exports.configureExplicitResourceManagement = exports.MongoWriteConcernError = exports.MongoUnexpectedServerResponseError = exports.MongoTransactionError = exports.MongoTopologyClosedError = void 0;
exports.MongoClientAuthProviders = exports.MongoCryptKMSRequestNetworkTimeoutError = exports.MongoCryptInvalidArgumentError = exports.MongoCryptError = exports.MongoCryptCreateEncryptedCollectionError = exports.MongoCryptCreateDataKeyError = exports.MongoCryptAzureKMSRequestError = exports.SrvPollingEvent = exports.WaitingForSuitableServerEvent = exports.ServerSelectionSucceededEvent = exports.ServerSelectionStartedEvent = exports.ServerSelectionFailedEvent = exports.ServerSelectionEvent = exports.TopologyOpeningEvent = exports.TopologyDescriptionChangedEvent = exports.TopologyClosedEvent = exports.ServerOpeningEvent = exports.ServerHeartbeatSucceededEvent = exports.ServerHeartbeatStartedEvent = exports.ServerHeartbeatFailedEvent = exports.ServerDescriptionChangedEvent = exports.ServerClosedEvent = exports.ConnectionReadyEvent = exports.ConnectionPoolReadyEvent = exports.ConnectionPoolMonitoringEvent = exports.ConnectionPoolCreatedEvent = exports.ConnectionPoolClosedEvent = void 0;
exports.MongoSystemError = exports.MongoServerSelectionError = exports.MongoServerError = exports.MongoServerClosedError = exports.MongoRuntimeError = exports.MongoParseError = exports.MongoOIDCError = exports.MongoNotConnectedError = exports.MongoNetworkTimeoutError = exports.MongoNetworkError = exports.MongoMissingDependencyError = exports.MongoMissingCredentialsError = exports.MongoKerberosError = exports.MongoInvalidArgumentError = exports.MongoGridFSStreamError = exports.MongoGridFSChunkError = exports.MongoGCPError = exports.MongoExpiredSessionError = exports.MongoError = exports.MongoDriverError = exports.MongoDecompressionError = exports.MongoCursorInUseError = exports.MongoCursorExhaustedError = exports.MongoCompatibilityError = exports.MongoClientBulkWriteExecutionError = exports.MongoClientBulkWriteCursorError = exports.MongoChangeStreamError = exports.MongoBatchReExecutionError = exports.MongoAzureError = exports.MongoAWSError = exports.MongoAPIError = exports.ChangeStreamCursor = exports.ClientEncryption = exports.MongoBulkWriteError = exports.UUID = exports.Timestamp = exports.ObjectId = exports.MinKey = exports.MaxKey = exports.Long = exports.Int32 = exports.Double = exports.Decimal128 = exports.DBRef = exports.Code = exports.BSONType = exports.BSONSymbol = exports.BSONRegExp = exports.Binary = exports.BSON = void 0;
exports.ConnectionCreatedEvent = exports.ConnectionClosedEvent = exports.ConnectionCheckOutStartedEvent = exports.ConnectionCheckOutFailedEvent = exports.ConnectionCheckedOutEvent = exports.ConnectionCheckedInEvent = exports.CommandSucceededEvent = exports.CommandStartedEvent = exports.CommandFailedEvent = exports.WriteConcern = exports.ReadPreference = exports.ReadConcern = exports.TopologyType = exports.ServerType = exports.ReadPreferenceMode = exports.ReadConcernLevel = exports.ProfilingLevel = exports.ReturnDocument = exports.ServerApiVersion = exports.ExplainVerbosity = exports.MongoErrorLabel = exports.CURSOR_FLAGS = exports.Compressor = exports.AuthMechanism = exports.GSSAPICanonicalizationValue = exports.AutoEncryptionLoggerLevel = exports.BatchType = exports.UnorderedBulkOperation = exports.OrderedBulkOperation = exports.MongoClient = exports.ListIndexesCursor = exports.ListCollectionsCursor = exports.GridFSBucketWriteStream = exports.GridFSBucketReadStream = exports.GridFSBucket = exports.FindCursor = exports.Db = exports.Collection = exports.ClientSession = exports.ChangeStream = exports.CancellationToken = exports.AggregationCursor = exports.Admin = exports.AbstractCursor = exports.configureExplicitResourceManagement = exports.MongoWriteConcernError = exports.MongoUnexpectedServerResponseError = exports.MongoTransactionError = exports.MongoTopologyClosedError = exports.MongoTailableCursorError = void 0;
exports.MongoClientAuthProviders = exports.MongoCryptKMSRequestNetworkTimeoutError = exports.MongoCryptInvalidArgumentError = exports.MongoCryptError = exports.MongoCryptCreateEncryptedCollectionError = exports.MongoCryptCreateDataKeyError = exports.MongoCryptAzureKMSRequestError = exports.SrvPollingEvent = exports.WaitingForSuitableServerEvent = exports.ServerSelectionSucceededEvent = exports.ServerSelectionStartedEvent = exports.ServerSelectionFailedEvent = exports.ServerSelectionEvent = exports.TopologyOpeningEvent = exports.TopologyDescriptionChangedEvent = exports.TopologyClosedEvent = exports.ServerOpeningEvent = exports.ServerHeartbeatSucceededEvent = exports.ServerHeartbeatStartedEvent = exports.ServerHeartbeatFailedEvent = exports.ServerDescriptionChangedEvent = exports.ServerClosedEvent = exports.ConnectionReadyEvent = exports.ConnectionPoolReadyEvent = exports.ConnectionPoolMonitoringEvent = exports.ConnectionPoolCreatedEvent = exports.ConnectionPoolClosedEvent = exports.ConnectionPoolClearedEvent = void 0;
const admin_1 = require("./admin");

@@ -70,4 +70,5 @@ Object.defineProperty(exports, "Admin", { enumerable: true, get: function () { return admin_1.Admin; } });

Object.defineProperty(exports, "MongoBatchReExecutionError", { enumerable: true, get: function () { return error_1.MongoBatchReExecutionError; } });
Object.defineProperty(exports, "MongoBulkWriteCursorError", { enumerable: true, get: function () { return error_1.MongoBulkWriteCursorError; } });
Object.defineProperty(exports, "MongoChangeStreamError", { enumerable: true, get: function () { return error_1.MongoChangeStreamError; } });
Object.defineProperty(exports, "MongoClientBulkWriteCursorError", { enumerable: true, get: function () { return error_1.MongoClientBulkWriteCursorError; } });
Object.defineProperty(exports, "MongoClientBulkWriteExecutionError", { enumerable: true, get: function () { return error_1.MongoClientBulkWriteExecutionError; } });
Object.defineProperty(exports, "MongoCompatibilityError", { enumerable: true, get: function () { return error_1.MongoCompatibilityError; } });

@@ -74,0 +75,0 @@ Object.defineProperty(exports, "MongoCursorExhaustedError", { enumerable: true, get: function () { return error_1.MongoCursorExhaustedError; } });

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ClientBulkWriteOperation = void 0;
const beta_1 = require("../../beta");
const responses_1 = require("../../cmap/wire_protocol/responses");

@@ -16,5 +17,5 @@ const utils_1 = require("../../utils");

}
constructor(command, options) {
constructor(commandBuilder, options) {
super(undefined, options);
this.command = command;
this.commandBuilder = commandBuilder;
this.options = options;

@@ -30,3 +31,26 @@ this.ns = new utils_1.MongoDBNamespace('admin', '$cmd');

async execute(server, session) {
return await super.executeCommand(server, session, this.command, responses_1.ClientBulkWriteCursorResponse);
let command;
if (server.description.type === beta_1.ServerType.LoadBalancer) {
if (session) {
// Checkout a connection to build the command.
const connection = await server.pool.checkOut();
// Pin the connection to the session so it get used to execute the command and we do not
// perform a double check-in/check-out.
session.pin(connection);
command = this.commandBuilder.buildBatch(connection.hello?.maxMessageSizeBytes, connection.hello?.maxWriteBatchSize);
}
else {
throw new beta_1.MongoClientBulkWriteExecutionError('Session provided to the client bulk write operation must be present.');
}
}
else {
// At this point we have a server and the auto connect code has already
// run in executeOperation, so the server description will be populated.
// We can use that to build the command.
if (!server.description.maxWriteBatchSize || !server.description.maxMessageSizeBytes) {
throw new beta_1.MongoClientBulkWriteExecutionError('In order to execute a client bulk write, both maxWriteBatchSize and maxMessageSizeBytes must be provided by the servers hello response.');
}
command = this.commandBuilder.buildBatch(server.description.maxMessageSizeBytes, server.description.maxWriteBatchSize);
}
return await super.executeCommand(server, session, command, responses_1.ClientBulkWriteCursorResponse);
}

@@ -36,3 +60,7 @@ }

// Skipping the collation as it goes on the individual ops.
(0, operation_1.defineAspects)(ClientBulkWriteOperation, [operation_1.Aspect.WRITE_OPERATION, operation_1.Aspect.SKIP_COLLATION]);
(0, operation_1.defineAspects)(ClientBulkWriteOperation, [
operation_1.Aspect.WRITE_OPERATION,
operation_1.Aspect.SKIP_COLLATION,
operation_1.Aspect.CURSOR_CREATING
]);
//# sourceMappingURL=client_bulk_write.js.map

@@ -5,4 +5,9 @@ "use strict";

exports.buildOperation = buildOperation;
const bson_1 = require("../../bson");
const commands_1 = require("../../cmap/commands");
const utils_1 = require("../../utils");
/**
* The bytes overhead for the extra fields added post command generation.
*/
const MESSAGE_OVERHEAD_BYTES = 1000;
/** @internal */

@@ -18,2 +23,4 @@ class ClientBulkWriteCommandBuilder {

this.pkFactory = pkFactory ?? utils_1.DEFAULT_PK_FACTORY;
this.currentModelIndex = 0;
this.lastOperations = [];
}

@@ -31,23 +38,79 @@ /**

/**
* Build the bulk write commands from the models.
* Determines if there is another batch to process.
* @returns True if not all batches have been built.
*/
buildCommands() {
// Iterate the models to build the ops and nsInfo fields.
const operations = [];
hasNextBatch() {
return this.currentModelIndex < this.models.length;
}
/**
* Build a single batch of a client bulk write command.
* @param maxMessageSizeBytes - The max message size in bytes.
* @param maxWriteBatchSize - The max write batch size.
* @returns The client bulk write command.
*/
buildBatch(maxMessageSizeBytes, maxWriteBatchSize) {
let commandLength = 0;
let currentNamespaceIndex = 0;
const command = this.baseCommand();
const namespaces = new Map();
for (const model of this.models) {
while (this.currentModelIndex < this.models.length) {
const model = this.models[this.currentModelIndex];
const ns = model.namespace;
const index = namespaces.get(ns);
if (index != null) {
operations.push(buildOperation(model, index, this.pkFactory));
const nsIndex = namespaces.get(ns);
if (nsIndex != null) {
// Build the operation and serialize it to get the bytes buffer.
const operation = buildOperation(model, nsIndex, this.pkFactory);
const operationBuffer = bson_1.BSON.serialize(operation);
// Check if the operation buffer can fit in the command. If it can,
// then add the operation to the document sequence and increment the
// current length as long as the ops don't exceed the maxWriteBatchSize.
if (commandLength + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer);
// Increment the builder's current model index.
this.currentModelIndex++;
}
else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
else {
// The namespace is not already in the nsInfo so we will set it in the map, and
// construct our nsInfo and ops documents and buffers.
namespaces.set(ns, currentNamespaceIndex);
operations.push(buildOperation(model, currentNamespaceIndex, this.pkFactory));
currentNamespaceIndex++;
const nsInfo = { ns: ns };
const nsInfoBuffer = bson_1.BSON.serialize(nsInfo);
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
const operationBuffer = bson_1.BSON.serialize(operation);
// Check if the operation and nsInfo buffers can fit in the command. If they
// can, then add the operation and nsInfo to their respective document
// sequences and increment the current length as long as the ops don't exceed
// the maxWriteBatchSize.
if (commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength =
MESSAGE_OVERHEAD_BYTES +
command.nsInfo.push(nsInfo, nsInfoBuffer) +
command.ops.push(operation, operationBuffer);
// We've added a new namespace, increment the namespace index.
currentNamespaceIndex++;
// Increment the builder's current model index.
this.currentModelIndex++;
}
else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
}
const nsInfo = Array.from(namespaces.keys(), ns => ({ ns }));
// The base command.
// Set the last operations and return the command.
this.lastOperations = command.ops.documents;
return command;
}
baseCommand() {
const command = {

@@ -57,4 +120,4 @@ bulkWrite: 1,

ordered: this.options.ordered ?? true,
ops: new commands_1.DocumentSequence(operations),
nsInfo: new commands_1.DocumentSequence(nsInfo)
ops: new commands_1.DocumentSequence('ops'),
nsInfo: new commands_1.DocumentSequence('nsInfo')
};

@@ -74,3 +137,3 @@ // Add bypassDocumentValidation if it was present in the options.

}
return [command];
return command;
}

@@ -77,0 +140,0 @@ }

@@ -40,33 +40,27 @@ "use strict";

const commandBuilder = new command_builder_1.ClientBulkWriteCommandBuilder(this.operations, this.options, pkFactory);
const commands = commandBuilder.buildCommands();
// Unacknowledged writes need to execute all batches and return { ok: 1}
if (this.options.writeConcern?.w === 0) {
return await executeUnacknowledged(this.client, this.options, commands);
while (commandBuilder.hasNextBatch()) {
const operation = new client_bulk_write_1.ClientBulkWriteOperation(commandBuilder, this.options);
await (0, execute_operation_1.executeOperation)(this.client, operation);
}
return { ok: 1 };
}
return await executeAcknowledged(this.client, this.options, commands);
else {
const resultsMerger = new results_merger_1.ClientBulkWriteResultsMerger(this.options);
// For each command will will create and exhaust a cursor for the results.
let currentBatchOffset = 0;
while (commandBuilder.hasNextBatch()) {
const cursor = new client_bulk_write_cursor_1.ClientBulkWriteCursor(this.client, commandBuilder, this.options);
const docs = await cursor.toArray();
const operations = cursor.operations;
resultsMerger.merge(currentBatchOffset, operations, cursor.response, docs);
// Set the new batch index so we can back back to the index in the original models.
currentBatchOffset += operations.length;
}
return resultsMerger.result;
}
}
}
exports.ClientBulkWriteExecutor = ClientBulkWriteExecutor;
/**
* Execute an acknowledged bulk write.
*/
async function executeAcknowledged(client, options, commands) {
const resultsMerger = new results_merger_1.ClientBulkWriteResultsMerger(options);
// For each command will will create and exhaust a cursor for the results.
for (const command of commands) {
const cursor = new client_bulk_write_cursor_1.ClientBulkWriteCursor(client, command, options);
const docs = await cursor.toArray();
resultsMerger.merge(command.ops.documents, cursor.response, docs);
}
return resultsMerger.result;
}
/**
* Execute an unacknowledged bulk write.
*/
async function executeUnacknowledged(client, options, commands) {
for (const command of commands) {
const operation = new client_bulk_write_1.ClientBulkWriteOperation(command, options);
await (0, execute_operation_1.executeOperation)(client, operation);
}
return { ok: 1 };
}
//# sourceMappingURL=executor.js.map

@@ -33,2 +33,3 @@ "use strict";

* Merge the results in the cursor to the existing result.
* @param currentBatchOffset - The offset index to the original models.
* @param response - The cursor response.

@@ -38,3 +39,3 @@ * @param documents - The documents in the cursor.

*/
merge(operations, response, documents) {
merge(currentBatchOffset, operations, response, documents) {
// Update the counts from the cursor response.

@@ -55,3 +56,5 @@ this.result.insertedCount += response.insertedCount;

if ('insert' in operation) {
this.result.insertResults?.set(document.idx, { insertedId: operation.document._id });
this.result.insertResults?.set(document.idx + currentBatchOffset, {
insertedId: operation.document._id
});
}

@@ -69,7 +72,9 @@ // Handle update results.

}
this.result.updateResults?.set(document.idx, result);
this.result.updateResults?.set(document.idx + currentBatchOffset, result);
}
// Handle delete results.
if ('delete' in operation) {
this.result.deleteResults?.set(document.idx, { deletedCount: document.n });
this.result.deleteResults?.set(document.idx + currentBatchOffset, {
deletedCount: document.n
});
}

@@ -76,0 +81,0 @@ }

@@ -63,2 +63,5 @@ "use strict";

this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null;
this.maxMessageSizeBytes = hello?.maxMessageSizeBytes ?? null;
this.maxWriteBatchSize = hello?.maxWriteBatchSize ?? null;
this.maxBsonObjectSize = hello?.maxBsonObjectSize ?? null;
this.primary = hello?.primary ?? null;

@@ -65,0 +68,0 @@ this.me = hello?.me?.toLowerCase() ?? null;

@@ -338,3 +338,4 @@ "use strict";

'listCollections' in cmd ||
'listIndexes' in cmd);
'listIndexes' in cmd ||
'bulkWrite' in cmd);
}

@@ -341,0 +342,0 @@ return false;

{
"name": "mongodb",
"version": "6.9.0-dev.20241001.sha.85f7dcf9",
"version": "6.9.0-dev.20241002.sha.d56e235c",
"description": "The official MongoDB driver for Node.js",

@@ -5,0 +5,0 @@ "main": "lib/index.js",

@@ -432,7 +432,57 @@ import type { BSONSerializeOptions, Document, Long } from '../bson';

export class DocumentSequence {
field: string;
documents: Document[];
serializedDocumentsLength: number;
private chunks: Uint8Array[];
private header: Buffer;
constructor(documents: Document[]) {
this.documents = documents;
/**
* Create a new document sequence for the provided field.
* @param field - The field it will replace.
*/
constructor(field: string, documents?: Document[]) {
this.field = field;
this.documents = [];
this.chunks = [];
this.serializedDocumentsLength = 0;
// Document sequences starts with type 1 at the first byte.
// Field strings must always be UTF-8.
const buffer = Buffer.allocUnsafe(1 + 4 + this.field.length + 1);
buffer[0] = 1;
// Third part is the field name at offset 5 with trailing null byte.
encodeUTF8Into(buffer, `${this.field}\0`, 5);
this.chunks.push(buffer);
this.header = buffer;
if (documents) {
for (const doc of documents) {
this.push(doc, BSON.serialize(doc));
}
}
}
/**
* Push a document to the document sequence. Will serialize the document
* as well and return the current serialized length of all documents.
* @param document - The document to add.
* @param buffer - The serialized document in raw BSON.
* @returns The new total document sequence length.
*/
push(document: Document, buffer: Uint8Array): number {
this.serializedDocumentsLength += buffer.length;
// Push the document.
this.documents.push(document);
// Push the document raw bson.
this.chunks.push(buffer);
// Write the new length.
this.header?.writeInt32LE(4 + this.field.length + 1 + this.serializedDocumentsLength, 1);
return this.serializedDocumentsLength + this.header.length;
}
/**
* Get the fully serialized bytes for the document sequence section.
* @returns The section bytes.
*/
toBin(): Uint8Array {
return Buffer.concat(this.chunks);
}
}

@@ -547,17 +597,3 @@

if (value instanceof DocumentSequence) {
// Document sequences starts with type 1 at the first byte.
const buffer = Buffer.allocUnsafe(1 + 4 + key.length + 1);
buffer[0] = 1;
// Third part is the field name at offset 5 with trailing null byte.
encodeUTF8Into(buffer, `${key}\0`, 5);
chunks.push(buffer);
// Fourth part are the documents' bytes.
let docsLength = 0;
for (const doc of value.documents) {
const docBson = this.serializeBson(doc);
docsLength += docBson.length;
chunks.push(docBson);
}
// Second part of the sequence is the length at offset 1;
buffer.writeInt32LE(4 + key.length + 1 + docsLength, 1);
chunks.push(value.toBin());
// Why are we removing the field from the command? This is because it needs to be

@@ -564,0 +600,0 @@ // removed in the OP_MSG request first section, and DocumentSequence is not a

@@ -1,6 +0,8 @@

import type { Document } from '../bson';
import { type Document } from 'bson';
import { type ClientBulkWriteCursorResponse } from '../cmap/wire_protocol/responses';
import { MongoBulkWriteCursorError } from '../error';
import { MongoClientBulkWriteCursorError } from '../error';
import type { MongoClient } from '../mongo_client';
import { ClientBulkWriteOperation } from '../operations/client_bulk_write/client_bulk_write';
import { type ClientBulkWriteCommandBuilder } from '../operations/client_bulk_write/command_builder';
import { type ClientBulkWriteOptions } from '../operations/client_bulk_write/common';

@@ -27,3 +29,3 @@ import { executeOperation } from '../operations/execute_operation';

export class ClientBulkWriteCursor extends AbstractCursor {
public readonly command: Document;
commandBuilder: ClientBulkWriteCommandBuilder;
/** @internal */

@@ -35,6 +37,10 @@ private cursorResponse?: ClientBulkWriteCursorResponse;

/** @internal */
constructor(client: MongoClient, command: Document, options: ClientBulkWriteOptions = {}) {
constructor(
client: MongoClient,
commandBuilder: ClientBulkWriteCommandBuilder,
options: ClientBulkWriteOptions = {}
) {
super(client, new MongoDBNamespace('admin', '$cmd'), options);
this.command = command;
this.commandBuilder = commandBuilder;
this.clientBulkWriteOptions = options;

@@ -49,3 +55,3 @@ }

if (this.cursorResponse) return this.cursorResponse;
throw new MongoBulkWriteCursorError(
throw new MongoClientBulkWriteCursorError(
'No client bulk write cursor response returned from the server.'

@@ -55,6 +61,13 @@ );

/**
* Get the last set of operations the cursor executed.
*/
get operations(): Document[] {
return this.commandBuilder.lastOperations;
}
clone(): ClientBulkWriteCursor {
const clonedOptions = mergeOptions({}, this.clientBulkWriteOptions);
delete clonedOptions.session;
return new ClientBulkWriteCursor(this.client, this.command, {
return new ClientBulkWriteCursor(this.client, this.commandBuilder, {
...clonedOptions

@@ -66,3 +79,3 @@ });

async _initialize(session: ClientSession): Promise<InitialCursorResponse> {
const clientBulkWriteOperation = new ClientBulkWriteOperation(this.command, {
const clientBulkWriteOperation = new ClientBulkWriteOperation(this.commandBuilder, {
...this.clientBulkWriteOptions,

@@ -69,0 +82,0 @@ ...this.cursorOptions,

@@ -625,3 +625,3 @@ import type { Document } from './bson';

*/
export class MongoBulkWriteCursorError extends MongoRuntimeError {
export class MongoClientBulkWriteCursorError extends MongoRuntimeError {
/**

@@ -643,3 +643,3 @@ * **Do not use this constructor!**

override get name(): string {
return 'MongoBulkWriteCursorError';
return 'MongoClientBulkWriteCursorError';
}

@@ -649,2 +649,29 @@ }

/**
* An error indicating that an error occurred on the client when executing a client bulk write.
*
* @public
* @category Error
*/
export class MongoClientBulkWriteExecutionError extends MongoRuntimeError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message: string) {
super(message);
}
override get name(): string {
return 'MongoClientBulkWriteExecutionError';
}
}
/**
* An error generated when a ChangeStream operation fails to execute.

@@ -651,0 +678,0 @@ *

@@ -47,4 +47,5 @@ import { Admin } from './admin';

MongoBatchReExecutionError,
MongoBulkWriteCursorError,
MongoChangeStreamError,
MongoClientBulkWriteCursorError,
MongoClientBulkWriteExecutionError,
MongoCompatibilityError,

@@ -51,0 +52,0 @@ MongoCursorExhaustedError,

@@ -1,3 +0,2 @@

import { type Document } from 'bson';
import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta';
import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses';

@@ -9,2 +8,3 @@ import type { Server } from '../../sdam/server';

import { Aspect, defineAspects } from '../operation';
import { type ClientBulkWriteCommandBuilder } from './command_builder';
import { type ClientBulkWriteOptions } from './common';

@@ -17,3 +17,3 @@

export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCursorResponse> {
command: Document;
commandBuilder: ClientBulkWriteCommandBuilder;
override options: ClientBulkWriteOptions;

@@ -25,5 +25,5 @@

constructor(command: Document, options: ClientBulkWriteOptions) {
constructor(commandBuilder: ClientBulkWriteCommandBuilder, options: ClientBulkWriteOptions) {
super(undefined, options);
this.command = command;
this.commandBuilder = commandBuilder;
this.options = options;

@@ -43,3 +43,35 @@ this.ns = new MongoDBNamespace('admin', '$cmd');

): Promise<ClientBulkWriteCursorResponse> {
return await super.executeCommand(server, session, this.command, ClientBulkWriteCursorResponse);
let command;
if (server.description.type === ServerType.LoadBalancer) {
if (session) {
// Checkout a connection to build the command.
const connection = await server.pool.checkOut();
// Pin the connection to the session so it get used to execute the command and we do not
// perform a double check-in/check-out.
session.pin(connection);
command = this.commandBuilder.buildBatch(
connection.hello?.maxMessageSizeBytes,
connection.hello?.maxWriteBatchSize
);
} else {
throw new MongoClientBulkWriteExecutionError(
'Session provided to the client bulk write operation must be present.'
);
}
} else {
// At this point we have a server and the auto connect code has already
// run in executeOperation, so the server description will be populated.
// We can use that to build the command.
if (!server.description.maxWriteBatchSize || !server.description.maxMessageSizeBytes) {
throw new MongoClientBulkWriteExecutionError(
'In order to execute a client bulk write, both maxWriteBatchSize and maxMessageSizeBytes must be provided by the servers hello response.'
);
}
command = this.commandBuilder.buildBatch(
server.description.maxMessageSizeBytes,
server.description.maxWriteBatchSize
);
}
return await super.executeCommand(server, session, command, ClientBulkWriteCursorResponse);
}

@@ -49,2 +81,6 @@ }

// Skipping the collation as it goes on the individual ops.
defineAspects(ClientBulkWriteOperation, [Aspect.WRITE_OPERATION, Aspect.SKIP_COLLATION]);
defineAspects(ClientBulkWriteOperation, [
Aspect.WRITE_OPERATION,
Aspect.SKIP_COLLATION,
Aspect.CURSOR_CREATING
]);

@@ -1,2 +0,2 @@

import { type Document } from '../../bson';
import { BSON, type Document } from '../../bson';
import { DocumentSequence } from '../../cmap/commands';

@@ -31,2 +31,7 @@ import { type PkFactory } from '../../mongo_client';

/**
* The bytes overhead for the extra fields added post command generation.
*/
const MESSAGE_OVERHEAD_BYTES = 1000;
/** @internal */

@@ -37,2 +42,4 @@ export class ClientBulkWriteCommandBuilder {

pkFactory: PkFactory;
currentModelIndex: number;
lastOperations: Document[];

@@ -51,2 +58,4 @@ /**

this.pkFactory = pkFactory ?? DEFAULT_PK_FACTORY;
this.currentModelIndex = 0;
this.lastOperations = [];
}

@@ -66,24 +75,86 @@

/**
* Build the bulk write commands from the models.
* Determines if there is another batch to process.
* @returns True if not all batches have been built.
*/
buildCommands(): ClientBulkWriteCommand[] {
// Iterate the models to build the ops and nsInfo fields.
const operations = [];
hasNextBatch(): boolean {
return this.currentModelIndex < this.models.length;
}
/**
* Build a single batch of a client bulk write command.
* @param maxMessageSizeBytes - The max message size in bytes.
* @param maxWriteBatchSize - The max write batch size.
* @returns The client bulk write command.
*/
buildBatch(maxMessageSizeBytes: number, maxWriteBatchSize: number): ClientBulkWriteCommand {
let commandLength = 0;
let currentNamespaceIndex = 0;
const command: ClientBulkWriteCommand = this.baseCommand();
const namespaces = new Map<string, number>();
for (const model of this.models) {
while (this.currentModelIndex < this.models.length) {
const model = this.models[this.currentModelIndex];
const ns = model.namespace;
const index = namespaces.get(ns);
if (index != null) {
operations.push(buildOperation(model, index, this.pkFactory));
const nsIndex = namespaces.get(ns);
if (nsIndex != null) {
// Build the operation and serialize it to get the bytes buffer.
const operation = buildOperation(model, nsIndex, this.pkFactory);
const operationBuffer = BSON.serialize(operation);
// Check if the operation buffer can fit in the command. If it can,
// then add the operation to the document sequence and increment the
// current length as long as the ops don't exceed the maxWriteBatchSize.
if (
commandLength + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize
) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer);
// Increment the builder's current model index.
this.currentModelIndex++;
} else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
} else {
// The namespace is not already in the nsInfo so we will set it in the map, and
// construct our nsInfo and ops documents and buffers.
namespaces.set(ns, currentNamespaceIndex);
operations.push(buildOperation(model, currentNamespaceIndex, this.pkFactory));
currentNamespaceIndex++;
const nsInfo = { ns: ns };
const nsInfoBuffer = BSON.serialize(nsInfo);
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
const operationBuffer = BSON.serialize(operation);
// Check if the operation and nsInfo buffers can fit in the command. If they
// can, then add the operation and nsInfo to their respective document
// sequences and increment the current length as long as the ops don't exceed
// the maxWriteBatchSize.
if (
commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize
) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength =
MESSAGE_OVERHEAD_BYTES +
command.nsInfo.push(nsInfo, nsInfoBuffer) +
command.ops.push(operation, operationBuffer);
// We've added a new namespace, increment the namespace index.
currentNamespaceIndex++;
// Increment the builder's current model index.
this.currentModelIndex++;
} else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
}
// Set the last operations and return the command.
this.lastOperations = command.ops.documents;
return command;
}
const nsInfo = Array.from(namespaces.keys(), ns => ({ ns }));
// The base command.
private baseCommand(): ClientBulkWriteCommand {
const command: ClientBulkWriteCommand = {

@@ -93,4 +164,4 @@ bulkWrite: 1,

ordered: this.options.ordered ?? true,
ops: new DocumentSequence(operations),
nsInfo: new DocumentSequence(nsInfo)
ops: new DocumentSequence('ops'),
nsInfo: new DocumentSequence('nsInfo')
};

@@ -111,3 +182,4 @@ // Add bypassDocumentValidation if it was present in the options.

}
return [command];
return command;
}

@@ -114,0 +186,0 @@ }

@@ -1,3 +0,1 @@

import { type Document } from 'bson';
import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';

@@ -8,3 +6,3 @@ import { type MongoClient } from '../../mongo_client';

import { ClientBulkWriteOperation } from './client_bulk_write';
import { type ClientBulkWriteCommand, ClientBulkWriteCommandBuilder } from './command_builder';
import { ClientBulkWriteCommandBuilder } from './command_builder';
import {

@@ -61,41 +59,24 @@ type AnyClientBulkWriteModel,

);
const commands = commandBuilder.buildCommands();
// Unacknowledged writes need to execute all batches and return { ok: 1}
if (this.options.writeConcern?.w === 0) {
return await executeUnacknowledged(this.client, this.options, commands);
while (commandBuilder.hasNextBatch()) {
const operation = new ClientBulkWriteOperation(commandBuilder, this.options);
await executeOperation(this.client, operation);
}
return { ok: 1 };
} else {
const resultsMerger = new ClientBulkWriteResultsMerger(this.options);
// For each command will will create and exhaust a cursor for the results.
let currentBatchOffset = 0;
while (commandBuilder.hasNextBatch()) {
const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, this.options);
const docs = await cursor.toArray();
const operations = cursor.operations;
resultsMerger.merge(currentBatchOffset, operations, cursor.response, docs);
// Set the new batch index so we can back back to the index in the original models.
currentBatchOffset += operations.length;
}
return resultsMerger.result;
}
return await executeAcknowledged(this.client, this.options, commands);
}
}
/**
* Execute an acknowledged bulk write.
*/
async function executeAcknowledged(
client: MongoClient,
options: ClientBulkWriteOptions,
commands: ClientBulkWriteCommand[]
): Promise<ClientBulkWriteResult> {
const resultsMerger = new ClientBulkWriteResultsMerger(options);
// For each command will will create and exhaust a cursor for the results.
for (const command of commands) {
const cursor = new ClientBulkWriteCursor(client, command, options);
const docs = await cursor.toArray();
resultsMerger.merge(command.ops.documents, cursor.response, docs);
}
return resultsMerger.result;
}
/**
* Execute an unacknowledged bulk write.
*/
async function executeUnacknowledged(
client: MongoClient,
options: ClientBulkWriteOptions,
commands: Document[]
): Promise<{ ok: 1 }> {
for (const command of commands) {
const operation = new ClientBulkWriteOperation(command, options);
await executeOperation(client, operation);
}
return { ok: 1 };
}

@@ -45,2 +45,3 @@ import { type Document } from '../../bson';

* Merge the results in the cursor to the existing result.
* @param currentBatchOffset - The offset index to the original models.
* @param response - The cursor response.

@@ -51,2 +52,3 @@ * @param documents - The documents in the cursor.

merge(
currentBatchOffset: number,
operations: Document[],

@@ -72,3 +74,5 @@ response: ClientBulkWriteCursorResponse,

if ('insert' in operation) {
this.result.insertResults?.set(document.idx, { insertedId: operation.document._id });
this.result.insertResults?.set(document.idx + currentBatchOffset, {
insertedId: operation.document._id
});
}

@@ -86,7 +90,9 @@ // Handle update results.

}
this.result.updateResults?.set(document.idx, result);
this.result.updateResults?.set(document.idx + currentBatchOffset, result);
}
// Handle delete results.
if ('delete' in operation) {
this.result.deleteResults?.set(document.idx, { deletedCount: document.n });
this.result.deleteResults?.set(document.idx + currentBatchOffset, {
deletedCount: document.n
});
}

@@ -93,0 +99,0 @@ }

@@ -72,2 +72,8 @@ import { type Document, Long, type ObjectId } from '../bson';

logicalSessionTimeoutMinutes: number | null;
/** The max message size in bytes for the server. */
maxMessageSizeBytes: number | null;
/** The max number of writes in a bulk write command. */
maxWriteBatchSize: number | null;
/** The max bson object size. */
maxBsonObjectSize: number | null;

@@ -115,2 +121,5 @@ // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level

this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null;
this.maxMessageSizeBytes = hello?.maxMessageSizeBytes ?? null;
this.maxWriteBatchSize = hello?.maxWriteBatchSize ?? null;
this.maxBsonObjectSize = hello?.maxBsonObjectSize ?? null;
this.primary = hello?.primary ?? null;

@@ -117,0 +126,0 @@ this.me = hello?.me?.toLowerCase() ?? null;

@@ -516,3 +516,4 @@ import type { Document } from '../bson';

'listCollections' in cmd ||
'listIndexes' in cmd
'listIndexes' in cmd ||
'bulkWrite' in cmd
);

@@ -519,0 +520,0 @@ }

@@ -46,3 +46,2 @@ import { EJSON, type ObjectId } from '../bson';

commonWireVersion: number;
/**

@@ -49,0 +48,0 @@ * Create a TopologyDescription

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc