Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

mongodb

Package Overview
Dependencies
Maintainers
8
Versions
563
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

mongodb - npm Package Compare versions

Comparing version 6.9.0 to 6.10.0

lib/cursor/client_bulk_write_cursor.js

1

lib/bson.js

@@ -8,2 +8,3 @@ "use strict";

exports.parseUtf8ValidationOption = parseUtf8ValidationOption;
/* eslint-disable no-restricted-imports */
const bson_1 = require("bson");

@@ -10,0 +11,0 @@ var bson_2 = require("bson");

131

lib/bulk/common.js

@@ -5,3 +5,2 @@ "use strict";

exports.mergeBatchResults = mergeBatchResults;
const util_1 = require("util");
const bson_1 = require("../bson");

@@ -287,52 +286,32 @@ const error_1 = require("../error");

}
function executeCommands(bulkOperation, options, callback) {
async function executeCommands(bulkOperation, options) {
if (bulkOperation.s.batches.length === 0) {
return callback(undefined, new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered));
return new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
}
const batch = bulkOperation.s.batches.shift();
function resultHandler(err, result) {
// Error is a driver related error not a bulk op error, return early
if (err && 'message' in err && !(err instanceof error_1.MongoWriteConcernError)) {
return callback(new MongoBulkWriteError(err, new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered)));
for (const batch of bulkOperation.s.batches) {
const finalOptions = (0, utils_1.resolveOptions)(bulkOperation, {
...options,
ordered: bulkOperation.isOrdered
});
if (finalOptions.bypassDocumentValidation !== true) {
delete finalOptions.bypassDocumentValidation;
}
if (err instanceof error_1.MongoWriteConcernError) {
return handleMongoWriteConcernError(batch, bulkOperation.s.bulkResult, bulkOperation.isOrdered, err, callback);
// Is the bypassDocumentValidation options specific
if (bulkOperation.s.bypassDocumentValidation === true) {
finalOptions.bypassDocumentValidation = true;
}
// Merge the results together
mergeBatchResults(batch, bulkOperation.s.bulkResult, err, result);
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
if (bulkOperation.handleWriteError(callback, writeResult))
return;
// Execute the next command in line
executeCommands(bulkOperation, options, callback);
}
const finalOptions = (0, utils_1.resolveOptions)(bulkOperation, {
...options,
ordered: bulkOperation.isOrdered
});
if (finalOptions.bypassDocumentValidation !== true) {
delete finalOptions.bypassDocumentValidation;
}
// Set an operationIf if provided
if (bulkOperation.operationId) {
resultHandler.operationId = bulkOperation.operationId;
}
// Is the bypassDocumentValidation options specific
if (bulkOperation.s.bypassDocumentValidation === true) {
finalOptions.bypassDocumentValidation = true;
}
// Is the checkKeys option disabled
if (bulkOperation.s.checkKeys === false) {
finalOptions.checkKeys = false;
}
if (finalOptions.retryWrites) {
if (isUpdateBatch(batch)) {
finalOptions.retryWrites = finalOptions.retryWrites && !batch.operations.some(op => op.multi);
// Is the checkKeys option disabled
if (bulkOperation.s.checkKeys === false) {
finalOptions.checkKeys = false;
}
if (isDeleteBatch(batch)) {
finalOptions.retryWrites =
finalOptions.retryWrites && !batch.operations.some(op => op.limit === 0);
if (finalOptions.retryWrites) {
if (isUpdateBatch(batch)) {
finalOptions.retryWrites =
finalOptions.retryWrites && !batch.operations.some(op => op.multi);
}
if (isDeleteBatch(batch)) {
finalOptions.retryWrites =
finalOptions.retryWrites && !batch.operations.some(op => op.limit === 0);
}
}
}
try {
const operation = isInsertBatch(batch)

@@ -345,21 +324,35 @@ ? new insert_1.InsertOperation(bulkOperation.s.namespace, batch.operations, finalOptions)

: null;
if (operation != null) {
(0, execute_operation_1.executeOperation)(bulkOperation.s.collection.client, operation).then(result => resultHandler(undefined, result), error => resultHandler(error));
if (operation == null)
throw new error_1.MongoRuntimeError(`Unknown batchType: ${batch.batchType}`);
let thrownError = null;
let result;
try {
result = await (0, execute_operation_1.executeOperation)(bulkOperation.s.collection.client, operation);
}
catch (error) {
thrownError = error;
}
if (thrownError != null) {
if (thrownError instanceof error_1.MongoWriteConcernError) {
mergeBatchResults(batch, bulkOperation.s.bulkResult, thrownError, result);
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
throw new MongoBulkWriteError({
message: thrownError.result.writeConcernError.errmsg,
code: thrownError.result.writeConcernError.code
}, writeResult);
}
else {
// Error is a driver related error not a bulk op error, return early
throw new MongoBulkWriteError(thrownError, new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered));
}
}
mergeBatchResults(batch, bulkOperation.s.bulkResult, thrownError, result);
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
bulkOperation.handleWriteError(writeResult);
}
catch (err) {
// Force top level error
err.ok = 0;
// Merge top level error and return
mergeBatchResults(batch, bulkOperation.s.bulkResult, err, undefined);
callback();
}
bulkOperation.s.batches.length = 0;
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
bulkOperation.handleWriteError(writeResult);
return writeResult;
}
function handleMongoWriteConcernError(batch, bulkResult, isOrdered, err, callback) {
mergeBatchResults(batch, bulkResult, undefined, err.result);
callback(new MongoBulkWriteError({
message: err.result.writeConcernError.errmsg,
code: err.result.writeConcernError.code
}, new BulkWriteResult(bulkResult, isOrdered)));
}
/**

@@ -510,3 +503,2 @@ * An error indicating an unsuccessful Bulk Write

exports.FindOperators = FindOperators;
const executeCommandsAsync = (0, util_1.promisify)(executeCommands);
/**

@@ -527,3 +519,3 @@ * TODO(NODE-4063)

}
execute(_server, session) {
async execute(_server, session) {
if (this.options.session == null) {

@@ -536,3 +528,3 @@ // An implicit session could have been created by 'executeOperation'

}
return executeCommandsAsync(this.bulkOperation, this.options);
return await executeCommands(this.bulkOperation, this.options);
}

@@ -828,3 +820,3 @@ }

*/
handleWriteError(callback, writeResult) {
handleWriteError(writeResult) {
if (this.s.bulkResult.writeErrors.length > 0) {

@@ -834,15 +826,12 @@ const msg = this.s.bulkResult.writeErrors[0].errmsg

: 'write operation failed';
callback(new MongoBulkWriteError({
throw new MongoBulkWriteError({
message: msg,
code: this.s.bulkResult.writeErrors[0].code,
writeErrors: this.s.bulkResult.writeErrors
}, writeResult));
return true;
}, writeResult);
}
const writeConcernError = writeResult.getWriteConcernError();
if (writeConcernError) {
callback(new MongoBulkWriteError(writeConcernError, writeResult));
return true;
throw new MongoBulkWriteError(writeConcernError, writeResult);
}
return false;
}

@@ -849,0 +838,0 @@ shouldForceServerObjectId() {

@@ -13,7 +13,7 @@ "use strict";

}
handleWriteError(callback, writeResult) {
handleWriteError(writeResult) {
if (this.s.batches.length) {
return false;
return;
}
return super.handleWriteError(callback, writeResult);
return super.handleWriteError(writeResult);
}

@@ -20,0 +20,0 @@ addToOperationsList(batchType, document) {

@@ -334,3 +334,3 @@ "use strict";

return;
if ((0, error_1.isResumableError)(changeStreamError, this.cursor.maxWireVersion)) {
if (this.cursor.id != null && (0, error_1.isResumableError)(changeStreamError, this.cursor.maxWireVersion)) {
this._endStream();

@@ -357,3 +357,4 @@ this.cursor.close().then(undefined, utils_1.squashError);

}
if (!(0, error_1.isResumableError)(changeStreamError, this.cursor.maxWireVersion)) {
if (this.cursor.id == null ||
!(0, error_1.isResumableError)(changeStreamError, this.cursor.maxWireVersion)) {
try {

@@ -360,0 +361,0 @@ await this.close();

@@ -17,9 +17,5 @@ "use strict";

}
// Fallback to legacy selection method. If wire version >= 3, use scram-sha-1
if (hello.maxWireVersion >= 3) {
return providers_1.AuthMechanism.MONGODB_SCRAM_SHA1;
}
}
// Default for wireprotocol < 3
return providers_1.AuthMechanism.MONGODB_CR;
// Default auth mechanism for 4.0 and higher.
return providers_1.AuthMechanism.MONGODB_SCRAM_SHA256;
}

@@ -110,3 +106,2 @@ const ALLOWED_ENVIRONMENT_NAMES = [

if ((this.mechanism === providers_1.AuthMechanism.MONGODB_GSSAPI ||
this.mechanism === providers_1.AuthMechanism.MONGODB_CR ||
this.mechanism === providers_1.AuthMechanism.MONGODB_PLAIN ||

@@ -113,0 +108,0 @@ this.mechanism === providers_1.AuthMechanism.MONGODB_SCRAM_SHA1 ||

@@ -5,3 +5,3 @@ "use strict";

exports.startCommandDocument = startCommandDocument;
const bson_1 = require("bson");
const bson_1 = require("../../../bson");
const providers_1 = require("../providers");

@@ -8,0 +8,0 @@ /**

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.HumanCallbackWorkflow = void 0;
const bson_1 = require("bson");
const bson_1 = require("../../../bson");
const error_1 = require("../../../error");

@@ -6,0 +6,0 @@ const timeout_1 = require("../../../timeout");

@@ -171,3 +171,12 @@ "use strict";

if (command instanceof commands_1.OpMsgRequest) {
return (0, utils_1.deepCopy)(command.command);
const cmd = (0, utils_1.deepCopy)(command.command);
// For OP_MSG with payload type 1 we need to pull the documents
// array out of the document sequence for monitoring.
if (cmd.ops instanceof commands_1.DocumentSequence) {
cmd.ops = cmd.ops.documents;
}
if (cmd.nsInfo instanceof commands_1.DocumentSequence) {
cmd.nsInfo = cmd.nsInfo.documents;
}
return cmd;
}

@@ -174,0 +183,0 @@ if (command.query?.$query) {

@@ -29,2 +29,4 @@ "use strict";

this.query = query;
/** moreToCome is an OP_MSG only concept */
this.moreToCome = false;
// Basic options needed to be passed in

@@ -270,5 +272,49 @@ // TODO(NODE-3483): Replace with MongoCommandError

class DocumentSequence {
constructor(documents) {
this.documents = documents;
/**
* Create a new document sequence for the provided field.
* @param field - The field it will replace.
*/
constructor(field, documents) {
this.field = field;
this.documents = [];
this.chunks = [];
this.serializedDocumentsLength = 0;
// Document sequences starts with type 1 at the first byte.
// Field strings must always be UTF-8.
const buffer = Buffer.allocUnsafe(1 + 4 + this.field.length + 1);
buffer[0] = 1;
// Third part is the field name at offset 5 with trailing null byte.
encodeUTF8Into(buffer, `${this.field}\0`, 5);
this.chunks.push(buffer);
this.header = buffer;
if (documents) {
for (const doc of documents) {
this.push(doc, BSON.serialize(doc));
}
}
}
/**
* Push a document to the document sequence. Will serialize the document
* as well and return the current serialized length of all documents.
* @param document - The document to add.
* @param buffer - The serialized document in raw BSON.
* @returns The new total document sequence length.
*/
push(document, buffer) {
this.serializedDocumentsLength += buffer.length;
// Push the document.
this.documents.push(document);
// Push the document raw bson.
this.chunks.push(buffer);
// Write the new length.
this.header?.writeInt32LE(4 + this.field.length + 1 + this.serializedDocumentsLength, 1);
return this.serializedDocumentsLength + this.header.length;
}
/**
* Get the fully serialized bytes for the document sequence section.
* @returns The section bytes.
*/
toBin() {
return Buffer.concat(this.chunks);
}
}

@@ -300,3 +346,3 @@ exports.DocumentSequence = DocumentSequence;

this.checksumPresent = false;
this.moreToCome = options.moreToCome || false;
this.moreToCome = options.moreToCome ?? command.writeConcern?.w === 0;
this.exhaustAllowed =

@@ -356,17 +402,3 @@ typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false;

if (value instanceof DocumentSequence) {
// Document sequences starts with type 1 at the first byte.
const buffer = Buffer.allocUnsafe(1 + 4 + key.length);
buffer[0] = 1;
// Third part is the field name at offset 5.
encodeUTF8Into(buffer, key, 5);
chunks.push(buffer);
// Fourth part are the documents' bytes.
let docsLength = 0;
for (const doc of value.documents) {
const docBson = this.serializeBson(doc);
docsLength += docBson.length;
chunks.push(docBson);
}
// Second part of the sequence is the length at offset 1;
buffer.writeInt32LE(key.length + docsLength, 1);
chunks.push(value.toBin());
// Why are we removing the field from the command? This is because it needs to be

@@ -373,0 +405,0 @@ // removed in the OP_MSG request first section, and DocumentSequence is not a

@@ -69,2 +69,3 @@ "use strict";

this.socket.on('timeout', this.onTimeout.bind(this));
this.messageStream.pause();
}

@@ -235,3 +236,3 @@ get hello() {

});
if (options.noResponse) {
if (options.noResponse || message.moreToCome) {
yield responses_1.MongoDBResponse.empty;

@@ -294,3 +295,7 @@ return;

if (this.shouldEmitAndLogCommand) {
this.emitAndLogCommand(this.monitorCommands, Connection.COMMAND_SUCCEEDED, message.databaseName, this.established, new command_monitoring_events_1.CommandSucceededEvent(this, message, options.noResponse ? undefined : (object ??= document.toObject(bsonOptions)), started, this.description.serverConnectionId));
this.emitAndLogCommand(this.monitorCommands, Connection.COMMAND_SUCCEEDED, message.databaseName, this.established, new command_monitoring_events_1.CommandSucceededEvent(this, message, options.noResponse
? undefined
: message.moreToCome
? { ok: 1 }
: (object ??= document.toObject(bsonOptions)), started, this.description.serverConnectionId));
}

@@ -365,2 +370,3 @@ if (responseType == null) {

this.dataEvents = (0, on_data_1.onData)(this.messageStream);
this.messageStream.resume();
for await (const message of this.dataEvents) {

@@ -376,2 +382,3 @@ const response = await (0, compression_1.decompressResponse)(message);

this.dataEvents = null;
this.messageStream.pause();
this.throwIfAborted();

@@ -378,0 +385,0 @@ }

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.OP_MSG = exports.OP_COMPRESSED = exports.OP_DELETE = exports.OP_QUERY = exports.OP_INSERT = exports.OP_UPDATE = exports.OP_REPLY = exports.MIN_SUPPORTED_QE_SERVER_VERSION = exports.MIN_SUPPORTED_QE_WIRE_VERSION = exports.MAX_SUPPORTED_WIRE_VERSION = exports.MIN_SUPPORTED_WIRE_VERSION = exports.MAX_SUPPORTED_SERVER_VERSION = exports.MIN_SUPPORTED_SERVER_VERSION = void 0;
exports.MIN_SUPPORTED_SERVER_VERSION = '3.6';
exports.MIN_SUPPORTED_SERVER_VERSION = '4.0';
exports.MAX_SUPPORTED_SERVER_VERSION = '8.0';
exports.MIN_SUPPORTED_WIRE_VERSION = 6;
exports.MIN_SUPPORTED_WIRE_VERSION = 7;
exports.MAX_SUPPORTED_WIRE_VERSION = 25;

@@ -8,0 +8,0 @@ exports.MIN_SUPPORTED_QE_WIRE_VERSION = 21;

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ExplainedCursorResponse = exports.CursorResponse = exports.MongoDBResponse = void 0;
exports.ClientBulkWriteCursorResponse = exports.ExplainedCursorResponse = exports.CursorResponse = exports.MongoDBResponse = void 0;
exports.isErrorResponse = isErrorResponse;

@@ -255,2 +255,27 @@ const bson_1 = require("../../bson");

exports.ExplainedCursorResponse = ExplainedCursorResponse;
/**
* Client bulk writes have some extra metadata at the top level that needs to be
* included in the result returned to the user.
*/
class ClientBulkWriteCursorResponse extends CursorResponse {
get insertedCount() {
return this.get('nInserted', bson_1.BSONType.int, true);
}
get upsertedCount() {
return this.get('nUpserted', bson_1.BSONType.int, true);
}
get matchedCount() {
return this.get('nMatched', bson_1.BSONType.int, true);
}
get modifiedCount() {
return this.get('nModified', bson_1.BSONType.int, true);
}
get deletedCount() {
return this.get('nDeleted', bson_1.BSONType.int, true);
}
get writeConcernError() {
return this.get('writeConcernError', bson_1.BSONType.object, false);
}
}
exports.ClientBulkWriteCursorResponse = ClientBulkWriteCursorResponse;
//# sourceMappingURL=responses.js.map

@@ -37,6 +37,2 @@ "use strict";

}
if (options.srvHost.split('.').length < 3) {
// TODO(NODE-3484): Replace with MongoConnectionStringError
throw new error_1.MongoAPIError('URI must include hostname, domain name, and tld');
}
// Asynchronously start TXT resolution so that we do not have to wait until

@@ -53,5 +49,3 @@ // the SRV record is resolved before starting a second DNS query.

for (const { name } of addresses) {
if (!(0, utils_1.matchesParentDomain)(name, lookupAddress)) {
throw new error_1.MongoAPIError('Server record does not share hostname with parent URI');
}
(0, utils_1.checkParentDomainMatch)(name, lookupAddress);
}

@@ -58,0 +52,0 @@ const hostAddresses = addresses.map(r => utils_1.HostAddress.fromString(`${r.name}:${r.port ?? 27017}`));

@@ -193,3 +193,3 @@ "use strict";

/**
* Execute an aggregation framework pipeline against the database, needs MongoDB \>= 3.6
* Execute an aggregation framework pipeline against the database.
*

@@ -196,0 +196,0 @@ * @param pipeline - An array of aggregation stages to be executed

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.MongoWriteConcernError = exports.MongoServerSelectionError = exports.MongoSystemError = exports.MongoMissingDependencyError = exports.MongoMissingCredentialsError = exports.MongoCompatibilityError = exports.MongoInvalidArgumentError = exports.MongoParseError = exports.MongoNetworkTimeoutError = exports.MongoNetworkError = exports.MongoTopologyClosedError = exports.MongoCursorExhaustedError = exports.MongoServerClosedError = exports.MongoCursorInUseError = exports.MongoUnexpectedServerResponseError = exports.MongoGridFSChunkError = exports.MongoGridFSStreamError = exports.MongoTailableCursorError = exports.MongoChangeStreamError = exports.MongoGCPError = exports.MongoAzureError = exports.MongoOIDCError = exports.MongoAWSError = exports.MongoKerberosError = exports.MongoExpiredSessionError = exports.MongoTransactionError = exports.MongoNotConnectedError = exports.MongoDecompressionError = exports.MongoBatchReExecutionError = exports.MongoRuntimeError = exports.MongoAPIError = exports.MongoDriverError = exports.MongoServerError = exports.MongoError = exports.MongoErrorLabel = exports.GET_MORE_RESUMABLE_CODES = exports.MONGODB_ERROR_CODES = exports.NODE_IS_RECOVERING_ERROR_MESSAGE = exports.LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = exports.LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = void 0;
exports.MongoWriteConcernError = exports.MongoServerSelectionError = exports.MongoSystemError = exports.MongoMissingDependencyError = exports.MongoMissingCredentialsError = exports.MongoCompatibilityError = exports.MongoInvalidArgumentError = exports.MongoParseError = exports.MongoNetworkTimeoutError = exports.MongoNetworkError = exports.MongoTopologyClosedError = exports.MongoCursorExhaustedError = exports.MongoServerClosedError = exports.MongoCursorInUseError = exports.MongoUnexpectedServerResponseError = exports.MongoGridFSChunkError = exports.MongoGridFSStreamError = exports.MongoTailableCursorError = exports.MongoChangeStreamError = exports.MongoClientBulkWriteExecutionError = exports.MongoClientBulkWriteCursorError = exports.MongoClientBulkWriteError = exports.MongoGCPError = exports.MongoAzureError = exports.MongoOIDCError = exports.MongoAWSError = exports.MongoKerberosError = exports.MongoExpiredSessionError = exports.MongoTransactionError = exports.MongoNotConnectedError = exports.MongoDecompressionError = exports.MongoBatchReExecutionError = exports.MongoRuntimeError = exports.MongoAPIError = exports.MongoDriverError = exports.MongoServerError = exports.MongoError = exports.MongoErrorLabel = exports.GET_MORE_RESUMABLE_CODES = exports.MONGODB_ERROR_CODES = exports.NODE_IS_RECOVERING_ERROR_MESSAGE = exports.LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = exports.LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = void 0;
exports.isNetworkErrorBeforeHandshake = isNetworkErrorBeforeHandshake;

@@ -554,2 +554,75 @@ exports.needsRetryableWriteLabel = needsRetryableWriteLabel;

/**
* An error indicating that an error occurred when executing the bulk write.
*
* @public
* @category Error
*/
class MongoClientBulkWriteError extends MongoServerError {
/**
* Initialize the client bulk write error.
* @param message - The error message.
*/
constructor(message) {
super(message);
this.writeConcernErrors = [];
this.writeErrors = new Map();
}
get name() {
return 'MongoClientBulkWriteError';
}
}
exports.MongoClientBulkWriteError = MongoClientBulkWriteError;
/**
* An error indicating that an error occurred when processing bulk write results.
*
* @public
* @category Error
*/
class MongoClientBulkWriteCursorError extends MongoRuntimeError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message) {
super(message);
}
get name() {
return 'MongoClientBulkWriteCursorError';
}
}
exports.MongoClientBulkWriteCursorError = MongoClientBulkWriteCursorError;
/**
* An error indicating that an error occurred on the client when executing a client bulk write.
*
* @public
* @category Error
*/
class MongoClientBulkWriteExecutionError extends MongoRuntimeError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message) {
super(message);
}
get name() {
return 'MongoClientBulkWriteExecutionError';
}
}
exports.MongoClientBulkWriteExecutionError = MongoClientBulkWriteExecutionError;
/**
* An error generated when a ChangeStream operation fails to execute.

@@ -907,4 +980,4 @@ *

**/
constructor(message) {
super(message);
constructor(message, options) {
super(message, options);
}

@@ -911,0 +984,0 @@ get name() {

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Explain = exports.ExplainVerbosity = void 0;
const error_1 = require("./error");
/** @public */

@@ -14,3 +13,3 @@ exports.ExplainVerbosity = Object.freeze({

class Explain {
constructor(verbosity) {
constructor(verbosity, maxTimeMS) {
if (typeof verbosity === 'boolean') {

@@ -24,11 +23,12 @@ this.verbosity = verbosity

}
this.maxTimeMS = maxTimeMS;
}
static fromOptions(options) {
if (options?.explain == null)
static fromOptions({ explain } = {}) {
if (explain == null)
return;
const explain = options.explain;
if (typeof explain === 'boolean' || typeof explain === 'string') {
return new Explain(explain);
}
throw new error_1.MongoInvalidArgumentError('Field "explain" must be a string or a boolean');
const { verbosity, maxTimeMS } = explain;
return new Explain(verbosity, maxTimeMS);
}

@@ -35,0 +35,0 @@ }

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.MongoTopologyClosedError = exports.MongoTailableCursorError = exports.MongoSystemError = exports.MongoServerSelectionError = exports.MongoServerError = exports.MongoServerClosedError = exports.MongoRuntimeError = exports.MongoParseError = exports.MongoOIDCError = exports.MongoNotConnectedError = exports.MongoNetworkTimeoutError = exports.MongoNetworkError = exports.MongoMissingDependencyError = exports.MongoMissingCredentialsError = exports.MongoKerberosError = exports.MongoInvalidArgumentError = exports.MongoGridFSStreamError = exports.MongoGridFSChunkError = exports.MongoGCPError = exports.MongoExpiredSessionError = exports.MongoError = exports.MongoDriverError = exports.MongoDecompressionError = exports.MongoCursorInUseError = exports.MongoCursorExhaustedError = exports.MongoCompatibilityError = exports.MongoChangeStreamError = exports.MongoBatchReExecutionError = exports.MongoAzureError = exports.MongoAWSError = exports.MongoAPIError = exports.ChangeStreamCursor = exports.ClientEncryption = exports.MongoBulkWriteError = exports.UUID = exports.Timestamp = exports.ObjectId = exports.MinKey = exports.MaxKey = exports.Long = exports.Int32 = exports.Double = exports.Decimal128 = exports.DBRef = exports.Code = exports.BSONType = exports.BSONSymbol = exports.BSONRegExp = exports.Binary = exports.BSON = void 0;
exports.ConnectionPoolClosedEvent = exports.ConnectionPoolClearedEvent = exports.ConnectionCreatedEvent = exports.ConnectionClosedEvent = exports.ConnectionCheckOutStartedEvent = exports.ConnectionCheckOutFailedEvent = exports.ConnectionCheckedOutEvent = exports.ConnectionCheckedInEvent = exports.CommandSucceededEvent = exports.CommandStartedEvent = exports.CommandFailedEvent = exports.WriteConcern = exports.ReadPreference = exports.ReadConcern = exports.TopologyType = exports.ServerType = exports.ReadPreferenceMode = exports.ReadConcernLevel = exports.ProfilingLevel = exports.ReturnDocument = exports.ServerApiVersion = exports.ExplainVerbosity = exports.MongoErrorLabel = exports.CURSOR_FLAGS = exports.Compressor = exports.AuthMechanism = exports.GSSAPICanonicalizationValue = exports.AutoEncryptionLoggerLevel = exports.BatchType = exports.UnorderedBulkOperation = exports.OrderedBulkOperation = exports.MongoClient = exports.ListIndexesCursor = exports.ListCollectionsCursor = exports.GridFSBucketWriteStream = exports.GridFSBucketReadStream = exports.GridFSBucket = exports.FindCursor = exports.Db = exports.Collection = exports.ClientSession = exports.ChangeStream = exports.CancellationToken = exports.AggregationCursor = exports.Admin = exports.AbstractCursor = exports.configureExplicitResourceManagement = exports.MongoWriteConcernError = exports.MongoUnexpectedServerResponseError = exports.MongoTransactionError = void 0;
exports.MongoClientAuthProviders = exports.MongoCryptKMSRequestNetworkTimeoutError = exports.MongoCryptInvalidArgumentError = exports.MongoCryptError = exports.MongoCryptCreateEncryptedCollectionError = exports.MongoCryptCreateDataKeyError = exports.MongoCryptAzureKMSRequestError = exports.SrvPollingEvent = exports.WaitingForSuitableServerEvent = exports.ServerSelectionSucceededEvent = exports.ServerSelectionStartedEvent = exports.ServerSelectionFailedEvent = exports.ServerSelectionEvent = exports.TopologyOpeningEvent = exports.TopologyDescriptionChangedEvent = exports.TopologyClosedEvent = exports.ServerOpeningEvent = exports.ServerHeartbeatSucceededEvent = exports.ServerHeartbeatStartedEvent = exports.ServerHeartbeatFailedEvent = exports.ServerDescriptionChangedEvent = exports.ServerClosedEvent = exports.ConnectionReadyEvent = exports.ConnectionPoolReadyEvent = exports.ConnectionPoolMonitoringEvent = exports.ConnectionPoolCreatedEvent = void 0;
exports.MongoServerSelectionError = exports.MongoServerError = exports.MongoServerClosedError = exports.MongoRuntimeError = exports.MongoParseError = exports.MongoOIDCError = exports.MongoNotConnectedError = exports.MongoNetworkTimeoutError = exports.MongoNetworkError = exports.MongoMissingDependencyError = exports.MongoMissingCredentialsError = exports.MongoKerberosError = exports.MongoInvalidArgumentError = exports.MongoGridFSStreamError = exports.MongoGridFSChunkError = exports.MongoGCPError = exports.MongoExpiredSessionError = exports.MongoError = exports.MongoDriverError = exports.MongoDecompressionError = exports.MongoCursorInUseError = exports.MongoCursorExhaustedError = exports.MongoCompatibilityError = exports.MongoClientBulkWriteExecutionError = exports.MongoClientBulkWriteError = exports.MongoClientBulkWriteCursorError = exports.MongoChangeStreamError = exports.MongoBatchReExecutionError = exports.MongoAzureError = exports.MongoAWSError = exports.MongoAPIError = exports.ChangeStreamCursor = exports.ClientEncryption = exports.MongoBulkWriteError = exports.UUID = exports.Timestamp = exports.ObjectId = exports.MinKey = exports.MaxKey = exports.Long = exports.Int32 = exports.Double = exports.Decimal128 = exports.DBRef = exports.Code = exports.BSONType = exports.BSONSymbol = exports.BSONRegExp = exports.Binary = exports.BSON = void 0;
exports.ConnectionClosedEvent = exports.ConnectionCheckOutStartedEvent = exports.ConnectionCheckOutFailedEvent = exports.ConnectionCheckedOutEvent = exports.ConnectionCheckedInEvent = exports.CommandSucceededEvent = exports.CommandStartedEvent = exports.CommandFailedEvent = exports.WriteConcern = exports.ReadPreference = exports.ReadConcern = exports.TopologyType = exports.ServerType = exports.ReadPreferenceMode = exports.ReadConcernLevel = exports.ProfilingLevel = exports.ReturnDocument = exports.ServerApiVersion = exports.ExplainVerbosity = exports.MongoErrorLabel = exports.CURSOR_FLAGS = exports.Compressor = exports.AuthMechanism = exports.GSSAPICanonicalizationValue = exports.AutoEncryptionLoggerLevel = exports.BatchType = exports.UnorderedBulkOperation = exports.OrderedBulkOperation = exports.MongoClient = exports.ListIndexesCursor = exports.ListCollectionsCursor = exports.GridFSBucketWriteStream = exports.GridFSBucketReadStream = exports.GridFSBucket = exports.FindCursor = exports.Db = exports.Collection = exports.ClientSession = exports.ChangeStream = exports.CancellationToken = exports.AggregationCursor = exports.Admin = exports.AbstractCursor = exports.configureExplicitResourceManagement = exports.MongoWriteConcernError = exports.MongoUnexpectedServerResponseError = exports.MongoTransactionError = exports.MongoTopologyClosedError = exports.MongoTailableCursorError = exports.MongoSystemError = void 0;
exports.MongoClientAuthProviders = exports.MongoCryptKMSRequestNetworkTimeoutError = exports.MongoCryptInvalidArgumentError = exports.MongoCryptError = exports.MongoCryptCreateEncryptedCollectionError = exports.MongoCryptCreateDataKeyError = exports.MongoCryptAzureKMSRequestError = exports.SrvPollingEvent = exports.WaitingForSuitableServerEvent = exports.ServerSelectionSucceededEvent = exports.ServerSelectionStartedEvent = exports.ServerSelectionFailedEvent = exports.ServerSelectionEvent = exports.TopologyOpeningEvent = exports.TopologyDescriptionChangedEvent = exports.TopologyClosedEvent = exports.ServerOpeningEvent = exports.ServerHeartbeatSucceededEvent = exports.ServerHeartbeatStartedEvent = exports.ServerHeartbeatFailedEvent = exports.ServerDescriptionChangedEvent = exports.ServerClosedEvent = exports.ConnectionReadyEvent = exports.ConnectionPoolReadyEvent = exports.ConnectionPoolMonitoringEvent = exports.ConnectionPoolCreatedEvent = exports.ConnectionPoolClosedEvent = exports.ConnectionPoolClearedEvent = exports.ConnectionCreatedEvent = void 0;
const admin_1 = require("./admin");

@@ -71,2 +71,5 @@ Object.defineProperty(exports, "Admin", { enumerable: true, get: function () { return admin_1.Admin; } });

Object.defineProperty(exports, "MongoChangeStreamError", { enumerable: true, get: function () { return error_1.MongoChangeStreamError; } });
Object.defineProperty(exports, "MongoClientBulkWriteCursorError", { enumerable: true, get: function () { return error_1.MongoClientBulkWriteCursorError; } });
Object.defineProperty(exports, "MongoClientBulkWriteError", { enumerable: true, get: function () { return error_1.MongoClientBulkWriteError; } });
Object.defineProperty(exports, "MongoClientBulkWriteExecutionError", { enumerable: true, get: function () { return error_1.MongoClientBulkWriteExecutionError; } });
Object.defineProperty(exports, "MongoCompatibilityError", { enumerable: true, get: function () { return error_1.MongoCompatibilityError; } });

@@ -73,0 +76,0 @@ Object.defineProperty(exports, "MongoCursorExhaustedError", { enumerable: true, get: function () { return error_1.MongoCursorExhaustedError; } });

@@ -5,3 +5,2 @@ "use strict";

const gssapi_1 = require("./cmap/auth/gssapi");
const mongocr_1 = require("./cmap/auth/mongocr");
const mongodb_aws_1 = require("./cmap/auth/mongodb_aws");

@@ -20,3 +19,8 @@ const mongodb_oidc_1 = require("./cmap/auth/mongodb_oidc");

[providers_1.AuthMechanism.MONGODB_AWS, () => new mongodb_aws_1.MongoDBAWS()],
[providers_1.AuthMechanism.MONGODB_CR, () => new mongocr_1.MongoCR()],
[
providers_1.AuthMechanism.MONGODB_CR,
() => {
throw new error_1.MongoInvalidArgumentError('MONGODB-CR is no longer a supported auth mechanism in MongoDB 4.0+');
}
],
[providers_1.AuthMechanism.MONGODB_GSSAPI, () => new gssapi_1.GSSAPI()],

@@ -23,0 +27,0 @@ [providers_1.AuthMechanism.MONGODB_OIDC, (workflow) => new mongodb_oidc_1.MongoDBOIDC(workflow)],

@@ -16,2 +16,3 @@ "use strict";

const mongo_types_1 = require("./mongo_types");
const executor_1 = require("./operations/client_bulk_write/executor");
const execute_operation_1 = require("./operations/execute_operation");

@@ -138,2 +139,15 @@ const run_command_1 = require("./operations/run_command");

/**
* Executes a client bulk write operation, available on server 8.0+.
* @param models - The client bulk write models.
* @param options - The client bulk write options.
* @returns A ClientBulkWriteResult for acknowledged writes and ok: 1 for unacknowledged writes.
*/
async bulkWrite(models, options) {
if (this.autoEncrypter) {
throw new error_1.MongoInvalidArgumentError('MongoClient bulkWrite does not currently support automatic encryption.');
}
// We do not need schema type information past this point ("as any" is fine)
return await new executor_1.ClientBulkWriteExecutor(this, models, (0, utils_1.resolveOptions)(this, options)).execute();
}
/**
* Connect to MongoDB using a url

@@ -140,0 +154,0 @@ *

@@ -5,3 +5,10 @@ "use strict";

exports.buildOperation = buildOperation;
const bson_1 = require("../../bson");
const commands_1 = require("../../cmap/commands");
const error_1 = require("../../error");
const utils_1 = require("../../utils");
/**
* The bytes overhead for the extra fields added post command generation.
*/
const MESSAGE_OVERHEAD_BYTES = 1000;
/** @internal */

@@ -13,5 +20,10 @@ class ClientBulkWriteCommandBuilder {

*/
constructor(models, options) {
constructor(models, options, pkFactory) {
this.models = models;
this.options = options;
this.pkFactory = pkFactory ?? utils_1.DEFAULT_PK_FACTORY;
this.currentModelIndex = 0;
this.previousModelIndex = 0;
this.lastOperations = [];
this.isBatchRetryable = true;
}

@@ -29,23 +41,112 @@ /**

/**
* Build the bulk write commands from the models.
* Determines if there is another batch to process.
* @returns True if not all batches have been built.
*/
buildCommands() {
// Iterate the models to build the ops and nsInfo fields.
const operations = [];
hasNextBatch() {
return this.currentModelIndex < this.models.length;
}
/**
* When we need to retry a command we need to set the current
* model index back to its previous value.
*/
resetBatch() {
this.currentModelIndex = this.previousModelIndex;
return true;
}
/**
* Build a single batch of a client bulk write command.
* @param maxMessageSizeBytes - The max message size in bytes.
* @param maxWriteBatchSize - The max write batch size.
* @returns The client bulk write command.
*/
buildBatch(maxMessageSizeBytes, maxWriteBatchSize, maxBsonObjectSize) {
// We start by assuming the batch has no multi-updates, so it is retryable
// until we find them.
this.isBatchRetryable = true;
let commandLength = 0;
let currentNamespaceIndex = 0;
const command = this.baseCommand();
const namespaces = new Map();
for (const model of this.models) {
// In the case of retries we need to mark where we started this batch.
this.previousModelIndex = this.currentModelIndex;
while (this.currentModelIndex < this.models.length) {
const model = this.models[this.currentModelIndex];
const ns = model.namespace;
const index = namespaces.get(ns);
if (index != null) {
operations.push(buildOperation(model, index));
const nsIndex = namespaces.get(ns);
// Multi updates are not retryable.
if (model.name === 'deleteMany' || model.name === 'updateMany') {
this.isBatchRetryable = false;
}
if (nsIndex != null) {
// Build the operation and serialize it to get the bytes buffer.
const operation = buildOperation(model, nsIndex, this.pkFactory);
let operationBuffer;
try {
operationBuffer = bson_1.BSON.serialize(operation);
}
catch (cause) {
throw new error_1.MongoInvalidArgumentError(`Could not serialize operation to BSON`, { cause });
}
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
// Check if the operation buffer can fit in the command. If it can,
// then add the operation to the document sequence and increment the
// current length as long as the ops don't exceed the maxWriteBatchSize.
if (commandLength + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer);
// Increment the builder's current model index.
this.currentModelIndex++;
}
else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
else {
// The namespace is not already in the nsInfo so we will set it in the map, and
// construct our nsInfo and ops documents and buffers.
namespaces.set(ns, currentNamespaceIndex);
operations.push(buildOperation(model, currentNamespaceIndex));
currentNamespaceIndex++;
const nsInfo = { ns: ns };
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
let nsInfoBuffer;
let operationBuffer;
try {
nsInfoBuffer = bson_1.BSON.serialize(nsInfo);
operationBuffer = bson_1.BSON.serialize(operation);
}
catch (cause) {
throw new error_1.MongoInvalidArgumentError(`Could not serialize ns info to BSON`, { cause });
}
validateBufferSize('nsInfo', nsInfoBuffer, maxBsonObjectSize);
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
// Check if the operation and nsInfo buffers can fit in the command. If they
// can, then add the operation and nsInfo to their respective document
// sequences and increment the current length as long as the ops don't exceed
// the maxWriteBatchSize.
if (commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength =
MESSAGE_OVERHEAD_BYTES +
command.nsInfo.push(nsInfo, nsInfoBuffer) +
command.ops.push(operation, operationBuffer);
// We've added a new namespace, increment the namespace index.
currentNamespaceIndex++;
// Increment the builder's current model index.
this.currentModelIndex++;
}
else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
}
const nsInfo = Array.from(namespaces.keys(), ns => ({ ns }));
// The base command.
// Set the last operations and return the command.
this.lastOperations = command.ops.documents;
return command;
}
baseCommand() {
const command = {

@@ -55,4 +156,4 @@ bulkWrite: 1,

ordered: this.options.ordered ?? true,
ops: new commands_1.DocumentSequence(operations),
nsInfo: new commands_1.DocumentSequence(nsInfo)
ops: new commands_1.DocumentSequence('ops'),
nsInfo: new commands_1.DocumentSequence('nsInfo')
};

@@ -67,6 +168,16 @@ // Add bypassDocumentValidation if it was present in the options.

}
return [command];
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (this.options.comment !== undefined) {
command.comment = this.options.comment;
}
return command;
}
}
exports.ClientBulkWriteCommandBuilder = ClientBulkWriteCommandBuilder;
function validateBufferSize(name, buffer, maxBsonObjectSize) {
if (buffer.length > maxBsonObjectSize) {
throw new error_1.MongoInvalidArgumentError(`Client bulk write operation ${name} of length ${buffer.length} exceeds the max bson object size of ${maxBsonObjectSize}`);
}
}
/**

@@ -78,3 +189,3 @@ * Build the insert one operation.

*/
const buildInsertOneOperation = (model, index) => {
const buildInsertOneOperation = (model, index, pkFactory) => {
const document = {

@@ -84,2 +195,3 @@ insert: index,

};
document.document._id = model.document._id ?? pkFactory.createPk();
return document;

@@ -146,5 +258,19 @@ };

/**
* Validate the update document.
* @param update - The update document.
*/
function validateUpdate(update) {
if (!(0, utils_1.hasAtomicOperators)(update)) {
throw new error_1.MongoAPIError('Client bulk write update models must only contain atomic modifiers (start with $) and must not be empty.');
}
}
/**
* Creates a delete operation based on the parameters.
*/
function createUpdateOperation(model, index, multi) {
// Update documents provided in UpdateOne and UpdateMany write models are
// required only to contain atomic modifiers (i.e. keys that start with "$").
// Drivers MUST throw an error if an update document is empty or if the
// document's first key does not start with "$".
validateUpdate(model.update);
const document = {

@@ -165,2 +291,5 @@ update: index,

}
if (model.collation) {
document.collation = model.collation;
}
return document;

@@ -175,2 +304,5 @@ }

const buildReplaceOneOperation = (model, index) => {
if ((0, utils_1.hasAtomicOperators)(model.replacement)) {
throw new error_1.MongoAPIError('Client bulk write replace models must not contain atomic modifiers (start with $) and must not be empty.');
}
const document = {

@@ -188,2 +320,5 @@ update: index,

}
if (model.collation) {
document.collation = model.collation;
}
return document;

@@ -193,6 +328,6 @@ };

/** @internal */
function buildOperation(model, index) {
function buildOperation(model, index, pkFactory) {
switch (model.name) {
case 'insertOne':
return (0, exports.buildInsertOneOperation)(model, index);
return (0, exports.buildInsertOneOperation)(model, index, pkFactory);
case 'deleteOne':

@@ -199,0 +334,0 @@ return (0, exports.buildDeleteOneOperation)(model, index);

@@ -171,2 +171,5 @@ "use strict";

}
if (operation.hasAspect(operation_1.Aspect.COMMAND_BATCHING) && !operation.canRetryWrite) {
throw previousOperationError;
}
if (hasWriteAspect && !(0, error_1.isRetryableWriteError)(previousOperationError))

@@ -193,2 +196,6 @@ throw previousOperationError;

try {
// If tries > 0 and we are command batching we need to reset the batch.
if (tries > 0 && operation.hasAspect(operation_1.Aspect.COMMAND_BATCHING)) {
operation.resetBatch();
}
return await operation.execute(server, session);

@@ -195,0 +202,0 @@ }

@@ -14,3 +14,4 @@ "use strict";

CURSOR_CREATING: Symbol('CURSOR_CREATING'),
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER')
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER'),
COMMAND_BATCHING: Symbol('COMMAND_BATCHING')
};

@@ -51,2 +52,5 @@ /** @internal */

}
resetBatch() {
return true;
}
get canRetryRead() {

@@ -53,0 +57,0 @@ return this.hasAspect(exports.Aspect.RETRYABLE) && this.hasAspect(exports.Aspect.READ_OPERATION);

@@ -63,2 +63,5 @@ "use strict";

this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null;
this.maxMessageSizeBytes = hello?.maxMessageSizeBytes ?? null;
this.maxWriteBatchSize = hello?.maxWriteBatchSize ?? null;
this.maxBsonObjectSize = hello?.maxBsonObjectSize ?? null;
this.primary = hello?.primary ?? null;

@@ -65,0 +68,0 @@ this.me = hello?.me?.toLowerCase() ?? null;

@@ -338,3 +338,4 @@ "use strict";

'listCollections' in cmd ||
'listIndexes' in cmd);
'listIndexes' in cmd ||
'bulkWrite' in cmd);
}

@@ -341,0 +342,0 @@ return false;

@@ -89,5 +89,9 @@ "use strict";

for (const record of srvRecords) {
if ((0, utils_1.matchesParentDomain)(record.name, this.srvHost)) {
try {
(0, utils_1.checkParentDomainMatch)(record.name, this.srvHost);
finalAddresses.push(record);
}
catch (error) {
(0, utils_1.squashError)(error);
}
}

@@ -94,0 +98,0 @@ if (!finalAddresses.length) {

@@ -266,2 +266,3 @@ "use strict";

}
this.commitAttempted = false;
// increment txnNumber

@@ -305,3 +306,3 @@ this.incrementTransactionNumber();

}
if (this.transaction.state === transactions_1.TxnState.TRANSACTION_COMMITTED) {
if (this.transaction.state === transactions_1.TxnState.TRANSACTION_COMMITTED || this.commitAttempted) {
write_concern_1.WriteConcern.apply(command, { wtimeoutMS: 10000, ...wc, w: 'majority' });

@@ -322,5 +323,7 @@ }

await (0, execute_operation_1.executeOperation)(this.client, operation);
this.commitAttempted = undefined;
return;
}
catch (firstCommitError) {
this.commitAttempted = true;
if (firstCommitError instanceof error_1.MongoError && (0, error_1.isRetryableWriteError)(firstCommitError)) {

@@ -332,3 +335,7 @@ // SPEC-1185: apply majority write concern when retrying commitTransaction

try {
await (0, execute_operation_1.executeOperation)(this.client, operation);
await (0, execute_operation_1.executeOperation)(this.client, new run_command_1.RunAdminCommandOperation(command, {
session: this,
readPreference: read_preference_1.ReadPreference.primary,
bypassPinningCheck: true
}));
return;

@@ -335,0 +342,0 @@ }

@@ -41,3 +41,3 @@ "use strict";

exports.parseUnsignedInteger = parseUnsignedInteger;
exports.matchesParentDomain = matchesParentDomain;
exports.checkParentDomainMatch = checkParentDomainMatch;
exports.get = get;

@@ -238,6 +238,8 @@ exports.request = request;

function decorateWithExplain(command, explain) {
if (command.explain) {
return command;
const { verbosity, maxTimeMS } = explain;
const baseCommand = { explain: command, verbosity };
if (typeof maxTimeMS === 'number') {
baseCommand.maxTimeMS = maxTimeMS;
}
return { explain: command, verbosity: explain.verbosity };
return baseCommand;
}

@@ -929,3 +931,5 @@ /**

/**
* Determines whether a provided address matches the provided parent domain.
* This function throws a MongoAPIError in the event that either of the following is true:
* * If the provided address domain does not match the provided parent domain
* * If the parent domain contains less than three `.` separated parts and the provided address does not contain at least one more domain level than its parent
*

@@ -937,5 +941,5 @@ * If a DNS server were to become compromised SRV records would still need to

* @param srvHost - The domain to check the provided address against
* @returns Whether the provided address matches the parent domain
* @returns void
*/
function matchesParentDomain(address, srvHost) {
function checkParentDomainMatch(address, srvHost) {
// Remove trailing dot if exists on either the resolved address or the srv hostname

@@ -945,2 +949,3 @@ const normalizedAddress = address.endsWith('.') ? address.slice(0, address.length - 1) : address;

const allCharacterBeforeFirstDot = /^.*?\./;
const srvIsLessThanThreeParts = normalizedSrvHost.split('.').length < 3;
// Remove all characters before first dot

@@ -951,4 +956,15 @@ // Add leading dot back to string so

const addressDomain = `.${normalizedAddress.replace(allCharacterBeforeFirstDot, '')}`;
const srvHostDomain = `.${normalizedSrvHost.replace(allCharacterBeforeFirstDot, '')}`;
return addressDomain.endsWith(srvHostDomain);
let srvHostDomain = srvIsLessThanThreeParts
? normalizedSrvHost
: `.${normalizedSrvHost.replace(allCharacterBeforeFirstDot, '')}`;
if (!srvHostDomain.startsWith('.')) {
srvHostDomain = '.' + srvHostDomain;
}
if (srvIsLessThanThreeParts &&
normalizedAddress.split('.').length <= normalizedSrvHost.split('.').length) {
throw new error_1.MongoAPIError('Server record does not have at least one more domain level than parent URI');
}
if (!addressDomain.endsWith(srvHostDomain)) {
throw new error_1.MongoAPIError('Server record does not share hostname with parent URI');
}
}

@@ -955,0 +971,0 @@ /**

{
"name": "mongodb",
"version": "6.9.0",
"version": "6.10.0",
"description": "The official MongoDB driver for Node.js",

@@ -5,0 +5,0 @@ "main": "lib/index.js",

@@ -1,2 +0,2 @@

import { type Document } from 'bson';
import { type Document } from './bson';

@@ -3,0 +3,0 @@ export * from './index';

@@ -0,1 +1,2 @@

/* eslint-disable no-restricted-imports */
import { BSON, type DeserializeOptions, type SerializeOptions } from 'bson';

@@ -15,2 +16,3 @@

deserialize,
type DeserializeOptions,
Document,

@@ -25,2 +27,3 @@ Double,

ObjectId,
type ObjectIdLike,
serialize,

@@ -27,0 +30,0 @@ Timestamp,

@@ -1,3 +0,1 @@

import { promisify } from 'util';
import { type BSONSerializeOptions, type Document, EJSON, resolveBSONOptions } from '../bson';

@@ -10,2 +8,3 @@ import type { Collection } from '../collection';

MongoInvalidArgumentError,
MongoRuntimeError,
MongoServerError,

@@ -26,3 +25,2 @@ MongoWriteConcernError

applyRetryableWrites,
type Callback,
getTopology,

@@ -505,82 +503,42 @@ hasAtomicOperators,

function executeCommands(
async function executeCommands(
bulkOperation: BulkOperationBase,
options: BulkWriteOptions,
callback: Callback<BulkWriteResult>
) {
options: BulkWriteOptions
): Promise<BulkWriteResult> {
if (bulkOperation.s.batches.length === 0) {
return callback(
undefined,
new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered)
);
return new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
}
const batch = bulkOperation.s.batches.shift() as Batch;
for (const batch of bulkOperation.s.batches) {
const finalOptions = resolveOptions(bulkOperation, {
...options,
ordered: bulkOperation.isOrdered
});
function resultHandler(err?: AnyError, result?: Document) {
// Error is a driver related error not a bulk op error, return early
if (err && 'message' in err && !(err instanceof MongoWriteConcernError)) {
return callback(
new MongoBulkWriteError(
err,
new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered)
)
);
if (finalOptions.bypassDocumentValidation !== true) {
delete finalOptions.bypassDocumentValidation;
}
if (err instanceof MongoWriteConcernError) {
return handleMongoWriteConcernError(
batch,
bulkOperation.s.bulkResult,
bulkOperation.isOrdered,
err,
callback
);
// Is the bypassDocumentValidation options specific
if (bulkOperation.s.bypassDocumentValidation === true) {
finalOptions.bypassDocumentValidation = true;
}
// Merge the results together
mergeBatchResults(batch, bulkOperation.s.bulkResult, err, result);
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
if (bulkOperation.handleWriteError(callback, writeResult)) return;
// Is the checkKeys option disabled
if (bulkOperation.s.checkKeys === false) {
finalOptions.checkKeys = false;
}
// Execute the next command in line
executeCommands(bulkOperation, options, callback);
}
if (finalOptions.retryWrites) {
if (isUpdateBatch(batch)) {
finalOptions.retryWrites =
finalOptions.retryWrites && !batch.operations.some(op => op.multi);
}
const finalOptions = resolveOptions(bulkOperation, {
...options,
ordered: bulkOperation.isOrdered
});
if (finalOptions.bypassDocumentValidation !== true) {
delete finalOptions.bypassDocumentValidation;
}
// Set an operationIf if provided
if (bulkOperation.operationId) {
resultHandler.operationId = bulkOperation.operationId;
}
// Is the bypassDocumentValidation options specific
if (bulkOperation.s.bypassDocumentValidation === true) {
finalOptions.bypassDocumentValidation = true;
}
// Is the checkKeys option disabled
if (bulkOperation.s.checkKeys === false) {
finalOptions.checkKeys = false;
}
if (finalOptions.retryWrites) {
if (isUpdateBatch(batch)) {
finalOptions.retryWrites = finalOptions.retryWrites && !batch.operations.some(op => op.multi);
if (isDeleteBatch(batch)) {
finalOptions.retryWrites =
finalOptions.retryWrites && !batch.operations.some(op => op.limit === 0);
}
}
if (isDeleteBatch(batch)) {
finalOptions.retryWrites =
finalOptions.retryWrites && !batch.operations.some(op => op.limit === 0);
}
}
try {
const operation = isInsertBatch(batch)

@@ -594,35 +552,46 @@ ? new InsertOperation(bulkOperation.s.namespace, batch.operations, finalOptions)

if (operation != null) {
executeOperation(bulkOperation.s.collection.client, operation).then(
result => resultHandler(undefined, result),
error => resultHandler(error)
);
if (operation == null) throw new MongoRuntimeError(`Unknown batchType: ${batch.batchType}`);
let thrownError = null;
let result;
try {
result = await executeOperation(bulkOperation.s.collection.client, operation);
} catch (error) {
thrownError = error;
}
} catch (err) {
// Force top level error
err.ok = 0;
// Merge top level error and return
mergeBatchResults(batch, bulkOperation.s.bulkResult, err, undefined);
callback();
if (thrownError != null) {
if (thrownError instanceof MongoWriteConcernError) {
mergeBatchResults(batch, bulkOperation.s.bulkResult, thrownError, result);
const writeResult = new BulkWriteResult(
bulkOperation.s.bulkResult,
bulkOperation.isOrdered
);
throw new MongoBulkWriteError(
{
message: thrownError.result.writeConcernError.errmsg,
code: thrownError.result.writeConcernError.code
},
writeResult
);
} else {
// Error is a driver related error not a bulk op error, return early
throw new MongoBulkWriteError(
thrownError,
new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered)
);
}
}
mergeBatchResults(batch, bulkOperation.s.bulkResult, thrownError, result);
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
bulkOperation.handleWriteError(writeResult);
}
}
function handleMongoWriteConcernError(
batch: Batch,
bulkResult: BulkResult,
isOrdered: boolean,
err: MongoWriteConcernError,
callback: Callback<BulkWriteResult>
) {
mergeBatchResults(batch, bulkResult, undefined, err.result);
bulkOperation.s.batches.length = 0;
callback(
new MongoBulkWriteError(
{
message: err.result.writeConcernError.errmsg,
code: err.result.writeConcernError.code
},
new BulkWriteResult(bulkResult, isOrdered)
)
);
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered);
bulkOperation.handleWriteError(writeResult);
return writeResult;
}

@@ -882,4 +851,2 @@

const executeCommandsAsync = promisify(executeCommands);
/**

@@ -903,3 +870,3 @@ * TODO(NODE-4063)

execute(_server: Server, session: ClientSession | undefined): Promise<any> {
async execute(_server: Server, session: ClientSession | undefined): Promise<any> {
if (this.options.session == null) {

@@ -912,3 +879,3 @@ // An implicit session could have been created by 'executeOperation'

}
return executeCommandsAsync(this.bulkOperation, this.options);
return await executeCommands(this.bulkOperation, this.options);
}

@@ -1249,3 +1216,3 @@ }

*/
handleWriteError(callback: Callback<BulkWriteResult>, writeResult: BulkWriteResult): boolean {
handleWriteError(writeResult: BulkWriteResult): void {
if (this.s.bulkResult.writeErrors.length > 0) {

@@ -1256,14 +1223,10 @@ const msg = this.s.bulkResult.writeErrors[0].errmsg

callback(
new MongoBulkWriteError(
{
message: msg,
code: this.s.bulkResult.writeErrors[0].code,
writeErrors: this.s.bulkResult.writeErrors
},
writeResult
)
throw new MongoBulkWriteError(
{
message: msg,
code: this.s.bulkResult.writeErrors[0].code,
writeErrors: this.s.bulkResult.writeErrors
},
writeResult
);
return true;
}

@@ -1273,7 +1236,4 @@

if (writeConcernError) {
callback(new MongoBulkWriteError(writeConcernError, writeResult));
return true;
throw new MongoBulkWriteError(writeConcernError, writeResult);
}
return false;
}

@@ -1280,0 +1240,0 @@

@@ -7,3 +7,2 @@ import type { Document } from '../bson';

import type { UpdateStatement } from '../operations/update';
import { type Callback } from '../utils';
import {

@@ -24,8 +23,8 @@ Batch,

override handleWriteError(callback: Callback, writeResult: BulkWriteResult): boolean {
override handleWriteError(writeResult: BulkWriteResult): void {
if (this.s.batches.length) {
return false;
return;
}
return super.handleWriteError(callback, writeResult);
return super.handleWriteError(writeResult);
}

@@ -32,0 +31,0 @@

@@ -949,3 +949,3 @@ import type { Readable } from 'stream';

if (isResumableError(changeStreamError, this.cursor.maxWireVersion)) {
if (this.cursor.id != null && isResumableError(changeStreamError, this.cursor.maxWireVersion)) {
this._endStream();

@@ -979,3 +979,6 @@

if (!isResumableError(changeStreamError, this.cursor.maxWireVersion)) {
if (
this.cursor.id == null ||
!isResumableError(changeStreamError, this.cursor.maxWireVersion)
) {
try {

@@ -982,0 +985,0 @@ await this.close();

@@ -23,11 +23,6 @@ // Resolves the default auth mechanism according to

}
// Fallback to legacy selection method. If wire version >= 3, use scram-sha-1
if (hello.maxWireVersion >= 3) {
return AuthMechanism.MONGODB_SCRAM_SHA1;
}
}
// Default for wireprotocol < 3
return AuthMechanism.MONGODB_CR;
// Default auth mechanism for 4.0 and higher.
return AuthMechanism.MONGODB_SCRAM_SHA256;
}

@@ -177,3 +172,2 @@

(this.mechanism === AuthMechanism.MONGODB_GSSAPI ||
this.mechanism === AuthMechanism.MONGODB_CR ||
this.mechanism === AuthMechanism.MONGODB_PLAIN ||

@@ -180,0 +174,0 @@ this.mechanism === AuthMechanism.MONGODB_SCRAM_SHA1 ||

@@ -1,3 +0,2 @@

import type { Document } from 'bson';
import type { Document } from '../../bson';
import { MongoInvalidArgumentError, MongoMissingCredentialsError } from '../../error';

@@ -4,0 +3,0 @@ import type { HandshakeDocument } from '../connect';

@@ -1,4 +0,4 @@

import { type Document } from 'bson';
import { setTimeout } from 'timers/promises';
import { type Document } from '../../../bson';
import { MongoMissingCredentialsError } from '../../../error';

@@ -5,0 +5,0 @@ import { ns } from '../../../utils';

@@ -1,3 +0,2 @@

import { Binary, BSON, type Document } from 'bson';
import { Binary, BSON, type Document } from '../../../bson';
import { type MongoCredentials } from '../mongo_credentials';

@@ -4,0 +3,0 @@ import { AuthMechanism } from '../providers';

@@ -1,3 +0,2 @@

import { BSON } from 'bson';
import { BSON } from '../../../bson';
import { MONGODB_ERROR_CODES, MongoError, MongoOIDCError } from '../../../error';

@@ -4,0 +3,0 @@ import { Timeout, TimeoutError } from '../../../timeout';

@@ -1,4 +0,4 @@

import { type Document } from 'bson';
import { setTimeout } from 'timers/promises';
import { type Document } from '../../../bson';
import { ns } from '../../../utils';

@@ -5,0 +5,0 @@ import type { Connection } from '../../connection';

@@ -10,3 +10,8 @@ import { type Document, type ObjectId } from '../bson';

import { calculateDurationInMs, deepCopy } from '../utils';
import { OpMsgRequest, type OpQueryRequest, type WriteProtocolMessageType } from './commands';
import {
DocumentSequence,
OpMsgRequest,
type OpQueryRequest,
type WriteProtocolMessageType
} from './commands';
import type { Connection } from './connection';

@@ -253,3 +258,12 @@

if (command instanceof OpMsgRequest) {
return deepCopy(command.command);
const cmd = deepCopy(command.command);
// For OP_MSG with payload type 1 we need to pull the documents
// array out of the document sequence for monitoring.
if (cmd.ops instanceof DocumentSequence) {
cmd.ops = cmd.ops.documents;
}
if (cmd.nsInfo instanceof DocumentSequence) {
cmd.nsInfo = cmd.nsInfo.documents;
}
return cmd;
}

@@ -256,0 +270,0 @@

@@ -77,2 +77,4 @@ import type { BSONSerializeOptions, Document, Long } from '../bson';

partial: boolean;
/** moreToCome is an OP_MSG only concept */
moreToCome = false;

@@ -411,9 +413,17 @@ constructor(

export interface OpMsgOptions {
requestId: number;
serializeFunctions: boolean;
ignoreUndefined: boolean;
checkKeys: boolean;
maxBsonSize: number;
moreToCome: boolean;
exhaustAllowed: boolean;
socketTimeoutMS?: number;
session?: ClientSession;
numberToSkip?: number;
numberToReturn?: number;
returnFieldSelector?: Document;
pre32Limit?: number;
serializeFunctions?: boolean;
ignoreUndefined?: boolean;
maxBsonSize?: number;
checkKeys?: boolean;
secondaryOk?: boolean;
requestId?: number;
moreToCome?: boolean;
exhaustAllowed?: boolean;
readPreference: ReadPreference;

@@ -424,7 +434,57 @@ }

export class DocumentSequence {
field: string;
documents: Document[];
serializedDocumentsLength: number;
private chunks: Uint8Array[];
private header: Buffer;
constructor(documents: Document[]) {
this.documents = documents;
/**
* Create a new document sequence for the provided field.
* @param field - The field it will replace.
*/
constructor(field: string, documents?: Document[]) {
this.field = field;
this.documents = [];
this.chunks = [];
this.serializedDocumentsLength = 0;
// Document sequences starts with type 1 at the first byte.
// Field strings must always be UTF-8.
const buffer = Buffer.allocUnsafe(1 + 4 + this.field.length + 1);
buffer[0] = 1;
// Third part is the field name at offset 5 with trailing null byte.
encodeUTF8Into(buffer, `${this.field}\0`, 5);
this.chunks.push(buffer);
this.header = buffer;
if (documents) {
for (const doc of documents) {
this.push(doc, BSON.serialize(doc));
}
}
}
/**
* Push a document to the document sequence. Will serialize the document
* as well and return the current serialized length of all documents.
* @param document - The document to add.
* @param buffer - The serialized document in raw BSON.
* @returns The new total document sequence length.
*/
push(document: Document, buffer: Uint8Array): number {
this.serializedDocumentsLength += buffer.length;
// Push the document.
this.documents.push(document);
// Push the document raw bson.
this.chunks.push(buffer);
// Write the new length.
this.header?.writeInt32LE(4 + this.field.length + 1 + this.serializedDocumentsLength, 1);
return this.serializedDocumentsLength + this.header.length;
}
/**
* Get the fully serialized bytes for the document sequence section.
* @returns The section bytes.
*/
toBin(): Uint8Array {
return Buffer.concat(this.chunks);
}
}

@@ -471,3 +531,3 @@

this.checksumPresent = false;
this.moreToCome = options.moreToCome || false;
this.moreToCome = options.moreToCome ?? command.writeConcern?.w === 0;
this.exhaustAllowed =

@@ -540,17 +600,3 @@ typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false;

if (value instanceof DocumentSequence) {
// Document sequences starts with type 1 at the first byte.
const buffer = Buffer.allocUnsafe(1 + 4 + key.length);
buffer[0] = 1;
// Third part is the field name at offset 5.
encodeUTF8Into(buffer, key, 5);
chunks.push(buffer);
// Fourth part are the documents' bytes.
let docsLength = 0;
for (const doc of value.documents) {
const docBson = this.serializeBson(doc);
docsLength += docBson.length;
chunks.push(docBson);
}
// Second part of the sequence is the length at offset 1;
buffer.writeInt32LE(key.length + docsLength, 1);
chunks.push(value.toBin());
// Why are we removing the field from the command? This is because it needs to be

@@ -557,0 +603,0 @@ // removed in the OP_MSG request first section, and DocumentSequence is not a

@@ -1,6 +0,11 @@

import { type DeserializeOptions } from 'bson';
import { type Readable, Transform, type TransformCallback } from 'stream';
import { clearTimeout, setTimeout } from 'timers';
import { type BSONSerializeOptions, deserialize, type Document, type ObjectId } from '../bson';
import {
type BSONSerializeOptions,
deserialize,
type DeserializeOptions,
type Document,
type ObjectId
} from '../bson';
import { type AutoEncrypter } from '../client-side-encryption/auto_encrypter';

@@ -240,2 +245,4 @@ import {

this.socket.on('timeout', this.onTimeout.bind(this));
this.messageStream.pause();
}

@@ -443,3 +450,3 @@

if (options.noResponse) {
if (options.noResponse || message.moreToCome) {
yield MongoDBResponse.empty;

@@ -532,3 +539,7 @@ return;

message,
options.noResponse ? undefined : (object ??= document.toObject(bsonOptions)),
options.noResponse
? undefined
: message.moreToCome
? { ok: 1 }
: (object ??= document.toObject(bsonOptions)),
started,

@@ -653,2 +664,3 @@ this.description.serverConnectionId

this.dataEvents = onData(this.messageStream);
this.messageStream.resume();
for await (const message of this.dataEvents) {

@@ -664,2 +676,3 @@ const response = await decompressResponse(message);

this.dataEvents = null;
this.messageStream.pause();
this.throwIfAborted();

@@ -666,0 +679,0 @@ }

@@ -1,4 +0,4 @@

export const MIN_SUPPORTED_SERVER_VERSION = '3.6';
export const MIN_SUPPORTED_SERVER_VERSION = '4.0';
export const MAX_SUPPORTED_SERVER_VERSION = '8.0';
export const MIN_SUPPORTED_WIRE_VERSION = 6;
export const MIN_SUPPORTED_WIRE_VERSION = 7;
export const MAX_SUPPORTED_WIRE_VERSION = 25;

@@ -5,0 +5,0 @@ export const MIN_SUPPORTED_QE_WIRE_VERSION = 21;

@@ -1,3 +0,1 @@

import { type DeserializeOptions } from 'bson';
import {

@@ -9,2 +7,3 @@ Binary,

deserialize,
type DeserializeOptions,
getBigInt64LE,

@@ -11,0 +10,0 @@ getFloat64LE,

@@ -1,3 +0,1 @@

import { type DeserializeOptions } from 'bson';
import {

@@ -7,2 +5,3 @@ type BSONElement,

BSONType,
type DeserializeOptions,
type Document,

@@ -333,1 +332,31 @@ Long,

}
/**
* Client bulk writes have some extra metadata at the top level that needs to be
* included in the result returned to the user.
*/
export class ClientBulkWriteCursorResponse extends CursorResponse {
get insertedCount() {
return this.get('nInserted', BSONType.int, true);
}
get upsertedCount() {
return this.get('nUpserted', BSONType.int, true);
}
get matchedCount() {
return this.get('nMatched', BSONType.int, true);
}
get modifiedCount() {
return this.get('nModified', BSONType.int, true);
}
get deletedCount() {
return this.get('nDeleted', BSONType.int, true);
}
get writeConcernError() {
return this.get('writeConcernError', BSONType.object, false);
}
}

@@ -37,2 +37,3 @@ import * as dns from 'dns';

import {
checkParentDomainMatch,
DEFAULT_PK_FACTORY,

@@ -42,3 +43,2 @@ emitWarning,

isRecord,
matchesParentDomain,
parseInteger,

@@ -69,7 +69,2 @@ setDifference,

if (options.srvHost.split('.').length < 3) {
// TODO(NODE-3484): Replace with MongoConnectionStringError
throw new MongoAPIError('URI must include hostname, domain name, and tld');
}
// Asynchronously start TXT resolution so that we do not have to wait until

@@ -92,5 +87,3 @@ // the SRV record is resolved before starting a second DNS query.

for (const { name } of addresses) {
if (!matchesParentDomain(name, lookupAddress)) {
throw new MongoAPIError('Server record does not share hostname with parent URI');
}
checkParentDomainMatch(name, lookupAddress);
}

@@ -97,0 +90,0 @@

import type { Document } from '../bson';
import type { ExplainVerbosityLike } from '../explain';
import type { ExplainCommandOptions, ExplainVerbosityLike } from '../explain';
import type { MongoClient } from '../mongo_client';

@@ -69,3 +69,3 @@ import { AggregateOperation, type AggregateOptions } from '../operations/aggregate';

/** Execute the explain for the cursor */
async explain(verbosity?: ExplainVerbosityLike): Promise<Document> {
async explain(verbosity?: ExplainVerbosityLike | ExplainCommandOptions): Promise<Document> {
return (

@@ -72,0 +72,0 @@ await executeOperation(

import { type Document } from '../bson';
import { CursorResponse } from '../cmap/wire_protocol/responses';
import { MongoInvalidArgumentError, MongoTailableCursorError } from '../error';
import { type ExplainVerbosityLike } from '../explain';
import { type ExplainCommandOptions, type ExplainVerbosityLike } from '../explain';
import type { MongoClient } from '../mongo_client';

@@ -136,3 +136,3 @@ import type { CollationOptions } from '../operations/command';

/** Execute the explain for the cursor */
async explain(verbosity?: ExplainVerbosityLike): Promise<Document> {
async explain(verbosity?: ExplainVerbosityLike | ExplainCommandOptions): Promise<Document> {
return (

@@ -139,0 +139,0 @@ await executeOperation(

@@ -282,3 +282,3 @@ import { Admin } from './admin';

/**
* Execute an aggregation framework pipeline against the database, needs MongoDB \>= 3.6
* Execute an aggregation framework pipeline against the database.
*

@@ -285,0 +285,0 @@ * @param pipeline - An array of aggregation stages to be executed

import type { Document } from './bson';
import {
type ClientBulkWriteError,
type ClientBulkWriteResult
} from './operations/client_bulk_write/common';
import type { ServerType } from './sdam/common';

@@ -620,2 +624,94 @@ import type { TopologyVersion } from './sdam/server_description';

/**
* An error indicating that an error occurred when executing the bulk write.
*
* @public
* @category Error
*/
export class MongoClientBulkWriteError extends MongoServerError {
/**
* Write concern errors that occurred while executing the bulk write. This list may have
* multiple items if more than one server command was required to execute the bulk write.
*/
writeConcernErrors: Document[];
/**
* Errors that occurred during the execution of individual write operations. This map will
* contain at most one entry if the bulk write was ordered.
*/
writeErrors: Map<number, ClientBulkWriteError>;
/**
* The results of any successful operations that were performed before the error was
* encountered.
*/
partialResult?: ClientBulkWriteResult;
/**
* Initialize the client bulk write error.
* @param message - The error message.
*/
constructor(message: ErrorDescription) {
super(message);
this.writeConcernErrors = [];
this.writeErrors = new Map();
}
override get name(): string {
return 'MongoClientBulkWriteError';
}
}
/**
* An error indicating that an error occurred when processing bulk write results.
*
* @public
* @category Error
*/
export class MongoClientBulkWriteCursorError extends MongoRuntimeError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message: string) {
super(message);
}
override get name(): string {
return 'MongoClientBulkWriteCursorError';
}
}
/**
* An error indicating that an error occurred on the client when executing a client bulk write.
*
* @public
* @category Error
*/
export class MongoClientBulkWriteExecutionError extends MongoRuntimeError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message: string) {
super(message);
}
override get name(): string {
return 'MongoClientBulkWriteExecutionError';
}
}
/**
* An error generated when a ChangeStream operation fails to execute.

@@ -997,4 +1093,4 @@ *

**/
constructor(message: string) {
super(message);
constructor(message: string, options?: { cause?: Error }) {
super(message, options);
}

@@ -1001,0 +1097,0 @@

@@ -1,3 +0,1 @@

import { MongoInvalidArgumentError } from './error';
/** @public */

@@ -16,4 +14,3 @@ export const ExplainVerbosity = Object.freeze({

* For backwards compatibility, true is interpreted as "allPlansExecution"
* and false as "queryPlanner". Prior to server version 3.6, aggregate()
* ignores the verbosity parameter and executes in "queryPlanner".
* and false as "queryPlanner".
* @public

@@ -24,5 +21,41 @@ */

/** @public */
export interface ExplainCommandOptions {
/** The explain verbosity for the command. */
verbosity: ExplainVerbosity;
/** The maxTimeMS setting for the command. */
maxTimeMS?: number;
}
/**
* @public
*
* When set, this configures an explain command. Valid values are boolean (for legacy compatibility,
* see {@link ExplainVerbosityLike}), a string containing the explain verbosity, or an object containing the verbosity and
* an optional maxTimeMS.
*
* Examples of valid usage:
*
* ```typescript
* collection.find({ name: 'john doe' }, { explain: true });
* collection.find({ name: 'john doe' }, { explain: false });
* collection.find({ name: 'john doe' }, { explain: 'queryPlanner' });
* collection.find({ name: 'john doe' }, { explain: { verbosity: 'queryPlanner' } });
* ```
*
* maxTimeMS can be configured to limit the amount of time the server
* spends executing an explain by providing an object:
*
* ```typescript
* // limits the `explain` command to no more than 2 seconds
* collection.find({ name: 'john doe' }, {
* explain: {
* verbosity: 'queryPlanner',
* maxTimeMS: 2000
* }
* });
* ```
*/
export interface ExplainOptions {
/** Specifies the verbosity mode for the explain output. */
explain?: ExplainVerbosityLike;
explain?: ExplainVerbosityLike | ExplainCommandOptions;
}

@@ -32,5 +65,6 @@

export class Explain {
verbosity: ExplainVerbosity;
readonly verbosity: ExplainVerbosity;
readonly maxTimeMS?: number;
constructor(verbosity: ExplainVerbosityLike) {
private constructor(verbosity: ExplainVerbosityLike, maxTimeMS?: number) {
if (typeof verbosity === 'boolean') {

@@ -43,8 +77,9 @@ this.verbosity = verbosity

}
this.maxTimeMS = maxTimeMS;
}
static fromOptions(options?: ExplainOptions): Explain | undefined {
if (options?.explain == null) return;
static fromOptions({ explain }: ExplainOptions = {}): Explain | undefined {
if (explain == null) return;
const explain = options.explain;
if (typeof explain === 'boolean' || typeof explain === 'string') {

@@ -54,4 +89,5 @@ return new Explain(explain);

throw new MongoInvalidArgumentError('Field "explain" must be a string or a boolean');
const { verbosity, maxTimeMS } = explain;
return new Explain(verbosity, maxTimeMS);
}
}

@@ -48,2 +48,5 @@ import { Admin } from './admin';

MongoChangeStreamError,
MongoClientBulkWriteCursorError,
MongoClientBulkWriteError,
MongoClientBulkWriteExecutionError,
MongoCompatibilityError,

@@ -372,4 +375,9 @@ MongoCursorExhaustedError,

export type { AnyError, ErrorDescription, MongoNetworkErrorOptions } from './error';
export type { Explain, ExplainOptions, ExplainVerbosityLike } from './explain';
export type {
Explain,
ExplainCommandOptions,
ExplainOptions,
ExplainVerbosityLike
} from './explain';
export type {
GridFSBucketReadStreamOptions,

@@ -474,2 +482,19 @@ GridFSBucketReadStreamOptionsWithRevision,

export type {
AnyClientBulkWriteModel,
ClientBulkWriteError,
ClientBulkWriteModel,
ClientBulkWriteOptions,
ClientBulkWriteResult,
ClientDeleteManyModel,
ClientDeleteOneModel,
ClientDeleteResult,
ClientInsertOneModel,
ClientInsertOneResult,
ClientReplaceOneModel,
ClientUpdateManyModel,
ClientUpdateOneModel,
ClientUpdateResult,
ClientWriteModel
} from './operations/client_bulk_write/common';
export type {
CollationOptions,

@@ -476,0 +501,0 @@ CommandOperation,

import { type AuthProvider } from './cmap/auth/auth_provider';
import { GSSAPI } from './cmap/auth/gssapi';
import { type AuthMechanismProperties } from './cmap/auth/mongo_credentials';
import { MongoCR } from './cmap/auth/mongocr';
import { MongoDBAWS } from './cmap/auth/mongodb_aws';

@@ -19,3 +18,10 @@ import { MongoDBOIDC, OIDC_WORKFLOWS, type Workflow } from './cmap/auth/mongodb_oidc';

[AuthMechanism.MONGODB_AWS, () => new MongoDBAWS()],
[AuthMechanism.MONGODB_CR, () => new MongoCR()],
[
AuthMechanism.MONGODB_CR,
() => {
throw new MongoInvalidArgumentError(
'MONGODB-CR is no longer a supported auth mechanism in MongoDB 4.0+'
);
}
],
[AuthMechanism.MONGODB_GSSAPI, () => new GSSAPI()],

@@ -22,0 +28,0 @@ [AuthMechanism.MONGODB_OIDC, (workflow?: Workflow) => new MongoDBOIDC(workflow)],

@@ -33,2 +33,8 @@ import { promises as fs } from 'fs';

import { TypedEventEmitter } from './mongo_types';
import {
type ClientBulkWriteModel,
type ClientBulkWriteOptions,
type ClientBulkWriteResult
} from './operations/client_bulk_write/common';
import { ClientBulkWriteExecutor } from './operations/client_bulk_write/executor';
import { executeOperation } from './operations/execute_operation';

@@ -329,3 +335,2 @@ import { RunAdminCommandOperation } from './operations/run_command';

/** @internal */
const kOptions = Symbol('options');

@@ -483,2 +488,25 @@

/**
* Executes a client bulk write operation, available on server 8.0+.
* @param models - The client bulk write models.
* @param options - The client bulk write options.
* @returns A ClientBulkWriteResult for acknowledged writes and ok: 1 for unacknowledged writes.
*/
async bulkWrite<SchemaMap extends Record<string, Document> = Record<string, Document>>(
models: ReadonlyArray<ClientBulkWriteModel<SchemaMap>>,
options?: ClientBulkWriteOptions
): Promise<ClientBulkWriteResult> {
if (this.autoEncrypter) {
throw new MongoInvalidArgumentError(
'MongoClient bulkWrite does not currently support automatic encryption.'
);
}
// We do not need schema type information past this point ("as any" is fine)
return await new ClientBulkWriteExecutor(
this,
models as any,
resolveOptions(this, options)
).execute();
}
/**
* Connect to MongoDB using a url

@@ -485,0 +513,0 @@ *

@@ -1,2 +0,1 @@

import type { BSONType, ObjectIdLike } from 'bson';
import { EventEmitter } from 'events';

@@ -7,2 +6,3 @@

BSONRegExp,
BSONType,
Decimal128,

@@ -14,2 +14,3 @@ Document,

ObjectId,
ObjectIdLike,
Timestamp

@@ -16,0 +17,0 @@ } from './bson';

import type { Document } from '../bson';
import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses';
import { MongoInvalidArgumentError } from '../error';
import { type ExplainOptions } from '../explain';
import type { Server } from '../sdam/server';

@@ -17,3 +18,3 @@ import type { ClientSession } from '../sessions';

/** @public */
export interface AggregateOptions extends CommandOperationOptions {
export interface AggregateOptions extends Omit<CommandOperationOptions, 'explain'> {
/** allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 \>). */

@@ -39,2 +40,9 @@ allowDiskUse?: boolean;

out?: string;
/**
* Specifies the verbosity mode for the explain output.
* @deprecated This API is deprecated in favor of `collection.aggregate().explain()`
* or `db.aggregate().explain()`.
*/
explain?: ExplainOptions['explain'];
}

@@ -41,0 +49,0 @@

@@ -1,4 +0,7 @@

import { type Document } from '../../bson';
import { BSON, type Document } from '../../bson';
import { DocumentSequence } from '../../cmap/commands';
import { MongoAPIError, MongoInvalidArgumentError } from '../../error';
import { type PkFactory } from '../../mongo_client';
import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types';
import { DEFAULT_PK_FACTORY, hasAtomicOperators } from '../../utils';
import { type CollationOptions } from '../command';

@@ -26,8 +29,23 @@ import { type Hint } from '../operation';

let?: Document;
comment?: any;
}
/**
* The bytes overhead for the extra fields added post command generation.
*/
const MESSAGE_OVERHEAD_BYTES = 1000;
/** @internal */
export class ClientBulkWriteCommandBuilder {
models: AnyClientBulkWriteModel[];
models: ReadonlyArray<AnyClientBulkWriteModel<Document>>;
options: ClientBulkWriteOptions;
pkFactory: PkFactory;
/** The current index in the models array that is being processed. */
currentModelIndex: number;
/** The model index that the builder was on when it finished the previous batch. Used for resets when retrying. */
previousModelIndex: number;
/** The last array of operations that were created. Used by the results merger for indexing results. */
lastOperations: Document[];
/** Returns true if the current batch being created has no multi-updates. */
isBatchRetryable: boolean;

@@ -38,5 +56,14 @@ /**

*/
constructor(models: AnyClientBulkWriteModel[], options: ClientBulkWriteOptions) {
constructor(
models: ReadonlyArray<AnyClientBulkWriteModel<Document>>,
options: ClientBulkWriteOptions,
pkFactory?: PkFactory
) {
this.models = models;
this.options = options;
this.pkFactory = pkFactory ?? DEFAULT_PK_FACTORY;
this.currentModelIndex = 0;
this.previousModelIndex = 0;
this.lastOperations = [];
this.isBatchRetryable = true;
}

@@ -56,24 +83,125 @@

/**
* Build the bulk write commands from the models.
* Determines if there is another batch to process.
* @returns True if not all batches have been built.
*/
buildCommands(): ClientBulkWriteCommand[] {
// Iterate the models to build the ops and nsInfo fields.
const operations = [];
hasNextBatch(): boolean {
return this.currentModelIndex < this.models.length;
}
/**
* When we need to retry a command we need to set the current
* model index back to its previous value.
*/
resetBatch(): boolean {
this.currentModelIndex = this.previousModelIndex;
return true;
}
/**
* Build a single batch of a client bulk write command.
* @param maxMessageSizeBytes - The max message size in bytes.
* @param maxWriteBatchSize - The max write batch size.
* @returns The client bulk write command.
*/
buildBatch(
maxMessageSizeBytes: number,
maxWriteBatchSize: number,
maxBsonObjectSize: number
): ClientBulkWriteCommand {
// We start by assuming the batch has no multi-updates, so it is retryable
// until we find them.
this.isBatchRetryable = true;
let commandLength = 0;
let currentNamespaceIndex = 0;
const command: ClientBulkWriteCommand = this.baseCommand();
const namespaces = new Map<string, number>();
for (const model of this.models) {
// In the case of retries we need to mark where we started this batch.
this.previousModelIndex = this.currentModelIndex;
while (this.currentModelIndex < this.models.length) {
const model = this.models[this.currentModelIndex];
const ns = model.namespace;
const index = namespaces.get(ns);
if (index != null) {
operations.push(buildOperation(model, index));
const nsIndex = namespaces.get(ns);
// Multi updates are not retryable.
if (model.name === 'deleteMany' || model.name === 'updateMany') {
this.isBatchRetryable = false;
}
if (nsIndex != null) {
// Build the operation and serialize it to get the bytes buffer.
const operation = buildOperation(model, nsIndex, this.pkFactory);
let operationBuffer;
try {
operationBuffer = BSON.serialize(operation);
} catch (cause) {
throw new MongoInvalidArgumentError(`Could not serialize operation to BSON`, { cause });
}
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
// Check if the operation buffer can fit in the command. If it can,
// then add the operation to the document sequence and increment the
// current length as long as the ops don't exceed the maxWriteBatchSize.
if (
commandLength + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize
) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer);
// Increment the builder's current model index.
this.currentModelIndex++;
} else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
} else {
// The namespace is not already in the nsInfo so we will set it in the map, and
// construct our nsInfo and ops documents and buffers.
namespaces.set(ns, currentNamespaceIndex);
operations.push(buildOperation(model, currentNamespaceIndex));
currentNamespaceIndex++;
const nsInfo = { ns: ns };
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
let nsInfoBuffer;
let operationBuffer;
try {
nsInfoBuffer = BSON.serialize(nsInfo);
operationBuffer = BSON.serialize(operation);
} catch (cause) {
throw new MongoInvalidArgumentError(`Could not serialize ns info to BSON`, { cause });
}
validateBufferSize('nsInfo', nsInfoBuffer, maxBsonObjectSize);
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
// Check if the operation and nsInfo buffers can fit in the command. If they
// can, then add the operation and nsInfo to their respective document
// sequences and increment the current length as long as the ops don't exceed
// the maxWriteBatchSize.
if (
commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize
) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength =
MESSAGE_OVERHEAD_BYTES +
command.nsInfo.push(nsInfo, nsInfoBuffer) +
command.ops.push(operation, operationBuffer);
// We've added a new namespace, increment the namespace index.
currentNamespaceIndex++;
// Increment the builder's current model index.
this.currentModelIndex++;
} else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
}
// Set the last operations and return the command.
this.lastOperations = command.ops.documents;
return command;
}
const nsInfo = Array.from(namespaces.keys(), ns => ({ ns }));
// The base command.
private baseCommand(): ClientBulkWriteCommand {
const command: ClientBulkWriteCommand = {

@@ -83,4 +211,4 @@ bulkWrite: 1,

ordered: this.options.ordered ?? true,
ops: new DocumentSequence(operations),
nsInfo: new DocumentSequence(nsInfo)
ops: new DocumentSequence('ops'),
nsInfo: new DocumentSequence('nsInfo')
};

@@ -95,6 +223,21 @@ // Add bypassDocumentValidation if it was present in the options.

}
return [command];
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (this.options.comment !== undefined) {
command.comment = this.options.comment;
}
return command;
}
}
function validateBufferSize(name: string, buffer: Uint8Array, maxBsonObjectSize: number) {
if (buffer.length > maxBsonObjectSize) {
throw new MongoInvalidArgumentError(
`Client bulk write operation ${name} of length ${buffer.length} exceeds the max bson object size of ${maxBsonObjectSize}`
);
}
}
/** @internal */

@@ -113,4 +256,5 @@ interface ClientInsertOperation {

export const buildInsertOneOperation = (
model: ClientInsertOneModel,
index: number
model: ClientInsertOneModel<Document>,
index: number,
pkFactory: PkFactory
): ClientInsertOperation => {

@@ -121,2 +265,3 @@ const document: ClientInsertOperation = {

};
document.document._id = model.document._id ?? pkFactory.createPk();
return document;

@@ -140,3 +285,6 @@ };

*/
export const buildDeleteOneOperation = (model: ClientDeleteOneModel, index: number): Document => {
export const buildDeleteOneOperation = (
model: ClientDeleteOneModel<Document>,
index: number
): Document => {
return createDeleteOperation(model, index, false);

@@ -151,3 +299,6 @@ };

*/
export const buildDeleteManyOperation = (model: ClientDeleteManyModel, index: number): Document => {
export const buildDeleteManyOperation = (
model: ClientDeleteManyModel<Document>,
index: number
): Document => {
return createDeleteOperation(model, index, true);

@@ -160,3 +311,3 @@ };

function createDeleteOperation(
model: ClientDeleteOneModel | ClientDeleteManyModel,
model: ClientDeleteOneModel<Document> | ClientDeleteManyModel<Document>,
index: number,

@@ -188,2 +339,3 @@ multi: boolean

arrayFilters?: Document[];
collation?: CollationOptions;
}

@@ -198,3 +350,3 @@

export const buildUpdateOneOperation = (
model: ClientUpdateOneModel,
model: ClientUpdateOneModel<Document>,
index: number

@@ -212,3 +364,3 @@ ): ClientUpdateOperation => {

export const buildUpdateManyOperation = (
model: ClientUpdateManyModel,
model: ClientUpdateManyModel<Document>,
index: number

@@ -220,9 +372,26 @@ ): ClientUpdateOperation => {

/**
* Validate the update document.
* @param update - The update document.
*/
function validateUpdate(update: Document) {
if (!hasAtomicOperators(update)) {
throw new MongoAPIError(
'Client bulk write update models must only contain atomic modifiers (start with $) and must not be empty.'
);
}
}
/**
* Creates a delete operation based on the parameters.
*/
function createUpdateOperation(
model: ClientUpdateOneModel | ClientUpdateManyModel,
model: ClientUpdateOneModel<Document> | ClientUpdateManyModel<Document>,
index: number,
multi: boolean
): ClientUpdateOperation {
// Update documents provided in UpdateOne and UpdateMany write models are
// required only to contain atomic modifiers (i.e. keys that start with "$").
// Drivers MUST throw an error if an update document is empty or if the
// document's first key does not start with "$".
validateUpdate(model.update);
const document: ClientUpdateOperation = {

@@ -243,2 +412,5 @@ update: index,

}
if (model.collation) {
document.collation = model.collation;
}
return document;

@@ -255,2 +427,3 @@ }

upsert?: boolean;
collation?: CollationOptions;
}

@@ -265,5 +438,11 @@

export const buildReplaceOneOperation = (
model: ClientReplaceOneModel,
model: ClientReplaceOneModel<Document>,
index: number
): ClientReplaceOneOperation => {
if (hasAtomicOperators(model.replacement)) {
throw new MongoAPIError(
'Client bulk write replace models must not contain atomic modifiers (start with $) and must not be empty.'
);
}
const document: ClientReplaceOneOperation = {

@@ -281,2 +460,5 @@ update: index,

}
if (model.collation) {
document.collation = model.collation;
}
return document;

@@ -286,6 +468,10 @@ };

/** @internal */
export function buildOperation(model: AnyClientBulkWriteModel, index: number): Document {
export function buildOperation(
model: AnyClientBulkWriteModel<Document>,
index: number,
pkFactory: PkFactory
): Document {
switch (model.name) {
case 'insertOne':
return buildInsertOneOperation(model, index);
return buildInsertOneOperation(model, index, pkFactory);
case 'deleteOne':

@@ -292,0 +478,0 @@ return buildDeleteOneOperation(model, index);

@@ -30,3 +30,10 @@ import { type Document } from '../../bson';

export interface ClientWriteModel {
/** The namespace for the write. */
/**
* The namespace for the write.
*
* A namespace is a combination of the database name and the name of the collection: `<database-name>.<collection>`.
* All documents belong to a namespace.
*
* @see https://www.mongodb.com/docs/manual/reference/limits/#std-label-faq-dev-namespace
*/
namespace: string;

@@ -36,10 +43,10 @@ }

/** @public */
export interface ClientInsertOneModel extends ClientWriteModel {
export interface ClientInsertOneModel<TSchema> extends ClientWriteModel {
name: 'insertOne';
/** The document to insert. */
document: OptionalId<Document>;
document: OptionalId<TSchema>;
}
/** @public */
export interface ClientDeleteOneModel extends ClientWriteModel {
export interface ClientDeleteOneModel<TSchema> extends ClientWriteModel {
name: 'deleteOne';

@@ -50,3 +57,3 @@ /**

*/
filter: Filter<Document>;
filter: Filter<TSchema>;
/** Specifies a collation. */

@@ -59,3 +66,3 @@ collation?: CollationOptions;

/** @public */
export interface ClientDeleteManyModel extends ClientWriteModel {
export interface ClientDeleteManyModel<TSchema> extends ClientWriteModel {
name: 'deleteMany';

@@ -66,3 +73,3 @@ /**

*/
filter: Filter<Document>;
filter: Filter<TSchema>;
/** Specifies a collation. */

@@ -75,3 +82,3 @@ collation?: CollationOptions;

/** @public */
export interface ClientReplaceOneModel extends ClientWriteModel {
export interface ClientReplaceOneModel<TSchema> extends ClientWriteModel {
name: 'replaceOne';

@@ -82,5 +89,5 @@ /**

*/
filter: Filter<Document>;
filter: Filter<TSchema>;
/** The document with which to replace the matched document. */
replacement: WithoutId<Document>;
replacement: WithoutId<TSchema>;
/** Specifies a collation. */

@@ -95,3 +102,3 @@ collation?: CollationOptions;

/** @public */
export interface ClientUpdateOneModel extends ClientWriteModel {
export interface ClientUpdateOneModel<TSchema> extends ClientWriteModel {
name: 'updateOne';

@@ -102,3 +109,3 @@ /**

*/
filter: Filter<Document>;
filter: Filter<TSchema>;
/**

@@ -109,3 +116,3 @@ * The modifications to apply. The value can be either:

*/
update: UpdateFilter<Document> | Document[];
update: UpdateFilter<TSchema> | Document[];
/** A set of filters specifying to which array elements an update should apply. */

@@ -122,3 +129,3 @@ arrayFilters?: Document[];

/** @public */
export interface ClientUpdateManyModel extends ClientWriteModel {
export interface ClientUpdateManyModel<TSchema> extends ClientWriteModel {
name: 'updateMany';

@@ -129,3 +136,3 @@ /**

*/
filter: Filter<Document>;
filter: Filter<TSchema>;
/**

@@ -136,3 +143,3 @@ * The modifications to apply. The value can be either:

*/
update: UpdateFilter<Document> | Document[];
update: UpdateFilter<TSchema> | Document[];
/** A set of filters specifying to which array elements an update should apply. */

@@ -153,8 +160,126 @@ arrayFilters?: Document[];

*/
export type AnyClientBulkWriteModel =
| ClientInsertOneModel
| ClientReplaceOneModel
| ClientUpdateOneModel
| ClientUpdateManyModel
| ClientDeleteOneModel
| ClientDeleteManyModel;
export type AnyClientBulkWriteModel<TSchema extends Document> =
| ClientInsertOneModel<TSchema>
| ClientReplaceOneModel<TSchema>
| ClientUpdateOneModel<TSchema>
| ClientUpdateManyModel<TSchema>
| ClientDeleteOneModel<TSchema>
| ClientDeleteManyModel<TSchema>;
/**
* A mapping of namespace strings to collections schemas.
* @public
*
* @example
* ```ts
* type MongoDBSchemas = {
* 'db.books': Book;
* 'db.authors': Author;
* }
*
* const model: ClientBulkWriteModel<MongoDBSchemas> = {
* namespace: 'db.books'
* name: 'insertOne',
* document: { title: 'Practical MongoDB Aggregations', authorName: 3 } // error `authorName` cannot be number
* };
* ```
*
* The type of the `namespace` field narrows other parts of the BulkWriteModel to use the correct schema for type assertions.
*
*/
export type ClientBulkWriteModel<
SchemaMap extends Record<string, Document> = Record<string, Document>
> = {
[Namespace in keyof SchemaMap]: AnyClientBulkWriteModel<SchemaMap[Namespace]> & {
namespace: Namespace;
};
}[keyof SchemaMap];
/** @public */
export interface ClientBulkWriteResult {
/**
* Whether the bulk write was acknowledged.
*/
readonly acknowledged: boolean;
/**
* The total number of documents inserted across all insert operations.
*/
readonly insertedCount: number;
/**
* The total number of documents upserted across all update operations.
*/
readonly upsertedCount: number;
/**
* The total number of documents matched across all update operations.
*/
readonly matchedCount: number;
/**
* The total number of documents modified across all update operations.
*/
readonly modifiedCount: number;
/**
* The total number of documents deleted across all delete operations.
*/
readonly deletedCount: number;
/**
* The results of each individual insert operation that was successfully performed.
*/
readonly insertResults?: ReadonlyMap<number, ClientInsertOneResult>;
/**
* The results of each individual update operation that was successfully performed.
*/
readonly updateResults?: ReadonlyMap<number, ClientUpdateResult>;
/**
* The results of each individual delete operation that was successfully performed.
*/
readonly deleteResults?: ReadonlyMap<number, ClientDeleteResult>;
}
/** @public */
export interface ClientBulkWriteError {
code: number;
message: string;
}
/** @public */
export interface ClientInsertOneResult {
/**
* The _id of the inserted document.
*/
insertedId: any;
}
/** @public */
export interface ClientUpdateResult {
/**
* The number of documents that matched the filter.
*/
matchedCount: number;
/**
* The number of documents that were modified.
*/
modifiedCount: number;
/**
* The _id field of the upserted document if an upsert occurred.
*
* It MUST be possible to discern between a BSON Null upserted ID value and this field being
* unset. If necessary, drivers MAY add a didUpsert boolean field to differentiate between
* these two cases.
*/
upsertedId?: any;
/**
* Determines if the upsert did include an _id, which includes the case of the _id being null.
*/
didUpsert: boolean;
}
/** @public */
export interface ClientDeleteResult {
/**
* The number of documents that were deleted.
*/
deletedCount: number;
}

@@ -233,2 +233,6 @@ import {

if (operation.hasAspect(Aspect.COMMAND_BATCHING) && !operation.canRetryWrite) {
throw previousOperationError;
}
if (hasWriteAspect && !isRetryableWriteError(previousOperationError))

@@ -264,2 +268,6 @@ throw previousOperationError;

try {
// If tries > 0 and we are command batching we need to reset the batch.
if (tries > 0 && operation.hasAspect(Aspect.COMMAND_BATCHING)) {
operation.resetBatch();
}
return await operation.execute(server, session);

@@ -266,0 +274,0 @@ } catch (operationError) {

import type { Document } from '../bson';
import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses';
import { MongoInvalidArgumentError } from '../error';
import { type ExplainOptions } from '../explain';
import { ReadConcern } from '../read_concern';

@@ -18,3 +19,3 @@ import type { Server } from '../sdam/server';

export interface FindOptions<TSchema extends Document = Document>
extends Omit<CommandOperationOptions, 'writeConcern'> {
extends Omit<CommandOperationOptions, 'writeConcern' | 'explain'> {
/** Sets the limit of documents returned in the query. */

@@ -67,2 +68,8 @@ limit?: number;

oplogReplay?: boolean;
/**
* Specifies the verbosity mode for the explain output.
* @deprecated This API is deprecated in favor of `collection.find().explain()`.
*/
explain?: ExplainOptions['explain'];
}

@@ -69,0 +76,0 @@

@@ -14,3 +14,4 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '../bson';

CURSOR_CREATING: Symbol('CURSOR_CREATING'),
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER')
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER'),
COMMAND_BATCHING: Symbol('COMMAND_BATCHING')
} as const;

@@ -102,2 +103,6 @@

resetBatch(): boolean {
return true;
}
get canRetryRead(): boolean {

@@ -104,0 +109,0 @@ return this.hasAspect(Aspect.RETRYABLE) && this.hasAspect(Aspect.READ_OPERATION);

@@ -1,3 +0,2 @@

import type { Document } from 'bson';
import type { Document } from '../../bson';
import type { Collection } from '../../collection';

@@ -4,0 +3,0 @@ import type { Server } from '../../sdam/server';

@@ -1,3 +0,2 @@

import type { Document } from 'bson';
import type { Document } from '../../bson';
import type { Collection } from '../../collection';

@@ -4,0 +3,0 @@ import { MONGODB_ERROR_CODES, MongoServerError } from '../../error';

@@ -1,3 +0,2 @@

import type { Document } from 'bson';
import type { Document } from '../../bson';
import type { Collection } from '../../collection';

@@ -4,0 +3,0 @@ import type { Server } from '../../sdam/server';

@@ -72,2 +72,8 @@ import { type Document, Long, type ObjectId } from '../bson';

logicalSessionTimeoutMinutes: number | null;
/** The max message size in bytes for the server. */
maxMessageSizeBytes: number | null;
/** The max number of writes in a bulk write command. */
maxWriteBatchSize: number | null;
/** The max bson object size. */
maxBsonObjectSize: number | null;

@@ -115,2 +121,5 @@ // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level

this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null;
this.maxMessageSizeBytes = hello?.maxMessageSizeBytes ?? null;
this.maxWriteBatchSize = hello?.maxWriteBatchSize ?? null;
this.maxBsonObjectSize = hello?.maxBsonObjectSize ?? null;
this.primary = hello?.primary ?? null;

@@ -117,0 +126,0 @@ this.me = hello?.me?.toLowerCase() ?? null;

@@ -516,3 +516,4 @@ import type { Document } from '../bson';

'listCollections' in cmd ||
'listIndexes' in cmd
'listIndexes' in cmd ||
'bulkWrite' in cmd
);

@@ -519,0 +520,0 @@ }

@@ -6,3 +6,3 @@ import * as dns from 'dns';

import { TypedEventEmitter } from '../mongo_types';
import { HostAddress, matchesParentDomain, squashError } from '../utils';
import { checkParentDomainMatch, HostAddress, squashError } from '../utils';

@@ -131,4 +131,7 @@ /**

for (const record of srvRecords) {
if (matchesParentDomain(record.name, this.srvHost)) {
try {
checkParentDomainMatch(record.name, this.srvHost);
finalAddresses.push(record);
} catch (error) {
squashError(error);
}

@@ -135,0 +138,0 @@ }

@@ -46,3 +46,2 @@ import { EJSON, type ObjectId } from '../bson';

commonWireVersion: number;
/**

@@ -49,0 +48,0 @@ * Create a TopologyDescription

@@ -127,2 +127,6 @@ import { Binary, type Document, Long, type Timestamp } from './bson';

transaction: Transaction;
/** @internal
* Keeps track of whether or not the current transaction has attempted to be committed. Is
* initially undefined. Gets set to false when startTransaction is called. When commitTransaction is sent to server, if the commitTransaction succeeds, it is then set to undefined, otherwise, set to true */
commitAttempted?: boolean;
/** @internal */

@@ -421,2 +425,3 @@ [kServerSession]: ServerSession | null;

this.commitAttempted = false;
// increment txnNumber

@@ -479,3 +484,3 @@ this.incrementTransactionNumber();

if (this.transaction.state === TxnState.TRANSACTION_COMMITTED) {
if (this.transaction.state === TxnState.TRANSACTION_COMMITTED || this.commitAttempted) {
WriteConcern.apply(command, { wtimeoutMS: 10000, ...wc, w: 'majority' });

@@ -500,4 +505,6 @@ }

await executeOperation(this.client, operation);
this.commitAttempted = undefined;
return;
} catch (firstCommitError) {
this.commitAttempted = true;
if (firstCommitError instanceof MongoError && isRetryableWriteError(firstCommitError)) {

@@ -510,3 +517,10 @@ // SPEC-1185: apply majority write concern when retrying commitTransaction

try {
await executeOperation(this.client, operation);
await executeOperation(
this.client,
new RunAdminCommandOperation(command, {
session: this,
readPreference: ReadPreference.primary,
bypassPinningCheck: true
})
);
return;

@@ -513,0 +527,0 @@ } catch (retryCommitError) {

@@ -21,2 +21,3 @@ import * as crypto from 'crypto';

type AnyError,
MongoAPIError,
MongoCompatibilityError,

@@ -29,3 +30,3 @@ MongoInvalidArgumentError,

} from './error';
import type { Explain } from './explain';
import type { Explain, ExplainVerbosity } from './explain';
import type { MongoClient } from './mongo_client';

@@ -256,8 +257,19 @@ import type { CommandOperationOptions, OperationParent } from './operations/command';

*/
export function decorateWithExplain(command: Document, explain: Explain): Document {
if (command.explain) {
return command;
export function decorateWithExplain(
command: Document,
explain: Explain
): {
explain: Document;
verbosity: ExplainVerbosity;
maxTimeMS?: number;
} {
type ExplainCommand = ReturnType<typeof decorateWithExplain>;
const { verbosity, maxTimeMS } = explain;
const baseCommand: ExplainCommand = { explain: command, verbosity };
if (typeof maxTimeMS === 'number') {
baseCommand.maxTimeMS = maxTimeMS;
}
return { explain: command, verbosity: explain.verbosity };
return baseCommand;
}

@@ -1137,3 +1149,5 @@

/**
* Determines whether a provided address matches the provided parent domain.
* This function throws a MongoAPIError in the event that either of the following is true:
* * If the provided address domain does not match the provided parent domain
* * If the parent domain contains less than three `.` separated parts and the provided address does not contain at least one more domain level than its parent
*

@@ -1145,5 +1159,5 @@ * If a DNS server were to become compromised SRV records would still need to

* @param srvHost - The domain to check the provided address against
* @returns Whether the provided address matches the parent domain
* @returns void
*/
export function matchesParentDomain(address: string, srvHost: string): boolean {
export function checkParentDomainMatch(address: string, srvHost: string): void {
// Remove trailing dot if exists on either the resolved address or the srv hostname

@@ -1154,2 +1168,3 @@ const normalizedAddress = address.endsWith('.') ? address.slice(0, address.length - 1) : address;

const allCharacterBeforeFirstDot = /^.*?\./;
const srvIsLessThanThreeParts = normalizedSrvHost.split('.').length < 3;
// Remove all characters before first dot

@@ -1160,5 +1175,20 @@ // Add leading dot back to string so

const addressDomain = `.${normalizedAddress.replace(allCharacterBeforeFirstDot, '')}`;
const srvHostDomain = `.${normalizedSrvHost.replace(allCharacterBeforeFirstDot, '')}`;
let srvHostDomain = srvIsLessThanThreeParts
? normalizedSrvHost
: `.${normalizedSrvHost.replace(allCharacterBeforeFirstDot, '')}`;
return addressDomain.endsWith(srvHostDomain);
if (!srvHostDomain.startsWith('.')) {
srvHostDomain = '.' + srvHostDomain;
}
if (
srvIsLessThanThreeParts &&
normalizedAddress.split('.').length <= normalizedSrvHost.split('.').length
) {
throw new MongoAPIError(
'Server record does not have at least one more domain level than parent URI'
);
}
if (!addressDomain.endsWith(srvHostDomain)) {
throw new MongoAPIError('Server record does not share hostname with parent URI');
}
}

@@ -1165,0 +1195,0 @@

@@ -61,3 +61,6 @@ import { type Document } from './bson';

export class WriteConcern {
/** Request acknowledgment that the write operation has propagated to a specified number of mongod instances or to mongod instances with specified tags. */
/**
* Request acknowledgment that the write operation has propagated to a specified number of mongod instances or to mongod instances with specified tags.
* If w is 0 and is set on a write operation, the server will not send a response.
*/
readonly w?: W;

@@ -64,0 +67,0 @@ /** Request acknowledgment that the write operation has been written to the on-disk journal */

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc