Socket
Socket
Sign inDemoInstall

mongodb

Package Overview
Dependencies
Maintainers
1
Versions
562
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

mongodb - npm Package Compare versions

Comparing version 1.1.4 to 1.1.5

Readme.md

18

lib/mongodb/collection.js

@@ -647,4 +647,2 @@ /**

// console.dir(errorOptions)
// commands = DbCommand.createDbCommand(this.db, queryObject, options)
// Fire commands and

@@ -704,3 +702,4 @@ this.db._executeQueryCommand(commands, {read:false}, function(err, result) {

, 'timeout' : 1, 'tailable' : 1, 'batchSize' : 1, 'raw' : 1, 'read' : 1
, 'returnKey' : 1, 'maxScan' : 1, 'min' : 1, 'max' : 1, 'showDiskLoc' : 1, 'comment' : 1, 'dbName' : 1};
, 'returnKey' : 1, 'maxScan' : 1, 'min' : 1, 'max' : 1, 'showDiskLoc' : 1, 'comment' : 1, 'dbName' : 1, 'exhaust': 1
, 'tailableRetryInterval': 1};

@@ -729,3 +728,5 @@ /**

* - **tailable** {Boolean, default:false}, specify if the cursor is tailable.
* - **tailableRetryInterval** {Number, default:100}, specify the miliseconds between getMores on tailable cursor.
* - **awaitdata** {Boolean, default:false} allow the cursor to wait for data, only applicable for tailable cursor.
* - **exhaust** {Boolean, default:false} have the server send all the documents at once as getMore packets, not recommended.
* - **batchSize** {Number, default:0}, set the batchSize for the getMoreCommand when iterating over the query results.

@@ -740,3 +741,3 @@ * - **returnKey** {Boolean, default:false}, only return the index key.

* - **readPreference** {String}, the preferred read preference ((Server.PRIMARY, Server.PRIMARY_PREFERRED, Server.SECONDARY, Server.SECONDARY_PREFERRED, Server.NEAREST).
* - **numberOfRetries** {Number, default:1}, if using awaidata specifies the number of times to retry on timeout.
* - **numberOfRetries** {Number, default:5}, if using awaidata specifies the number of times to retry on timeout.
*

@@ -867,3 +868,3 @@ * @param {Object} query query object to locate the object to modify

, o.slaveOk, o.raw, o.read, o.returnKey, o.maxScan, o.min, o.max, o.showDiskLoc, o.comment, o.awaitdata
, o.numberOfRetries, o.dbName));
, o.numberOfRetries, o.dbName, o.tailableRetryInterval, o.exhaust));
} else {

@@ -873,3 +874,3 @@ return new Cursor(this.db, this, selector, fields, o.skip, o.limit

, o.slaveOk, o.raw, o.read, o.returnKey, o.maxScan, o.min, o.max, o.showDiskLoc, o.comment, o.awaitdata
, o.numberOfRetries, o.dbName);
, o.numberOfRetries, o.dbName, o.tailableRetryInterval, o.exhaust);
}

@@ -974,2 +975,5 @@ };

* - **max** {Number}, for geospatial indexes set the high bound for the co-ordinates.
* - **v** {Number}, specify the format version of the indexes.
* - **expireAfterSeconds** {Number}, allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)
* - **name** {String}, override the autogenerated index name (useful if the resulting name is larger than 128 bytes)
*

@@ -1014,2 +1018,4 @@ * @param {Object} fieldOrSpec fieldOrSpec that defines the index.

* - **v** {Number}, specify the format version of the indexes.
* - **expireAfterSeconds** {Number}, allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)
* - **name** {String}, override the autogenerated index name (useful if the resulting name is larger than 128 bytes)
*

@@ -1016,0 +1022,0 @@ * @param {Object} fieldOrSpec fieldOrSpec that defines the index.

@@ -159,3 +159,3 @@ var QueryCommand = require('./query_command').QueryCommand,

// Generate the index name
var indexName = indexes.join("_");
var indexName = typeof options.name == 'string' ? options.name : indexes.join("_");
// Build the selector

@@ -162,0 +162,0 @@ var selector = {'ns':(db.databaseName + "." + collectionName), 'key':fieldHash, 'name':indexName};

@@ -61,2 +61,9 @@ var BaseCommand = require('./base_command').BaseCommand,

QueryCommand.prototype.setMongosReadPreference = function(readPreference, tags) {
// If we have readPreference set to true set to secondary prefered
if(readPreference == true) {
readPreference = 'secondaryPreferred';
} else if(readPreference == 'false') {
readPreference = 'primary';
}
// Force the slave ok flag to be set if we are not using primary read preference

@@ -63,0 +70,0 @@ if(readPreference != false && readPreference != 'primary') {

@@ -50,4 +50,4 @@ var Connection = require('./connection').Connection,

if(readPreference != null) {
if(readPreference != ReadPreference.PRIMARY && readPreference != ReadPreference.SECONDARY
&& readPreference != ReadPreference.SECONDARY_PREFERRED) {
if(readPreference != ReadPreference.PRIMARY && readPreference != ReadPreference.SECONDARY && readPreference != ReadPreference.NEAREST
&& readPreference != ReadPreference.SECONDARY_PREFERRED && readPreference != ReadPreference.PRIMARY_PREFERRED) {
throw new Error("Illegal readPreference mode specified, " + readPreference);

@@ -147,3 +147,3 @@ }

*/
Server.prototype.isConnected = function() {
Server.prototype.isConnected = function() {
return this._serverState == 'connected';

@@ -290,5 +290,10 @@ }

// The command executed another request, log the handler again under that request id
if(mongoReply.requestId > 0 && mongoReply.cursorId.toString() != "0" && callbackInfo.info && callbackInfo.info.exhaust) {
dbInstance._reRegisterHandler(mongoReply.requestId, callbackInfo);
}
// Only execute callback if we have a caller
// chained is for findAndModify as it does not respect write concerns
if(callbackInfo.callback && Array.isArray(callbackInfo.info.chained)) {
if(callbackInfo && callbackInfo.callback && Array.isArray(callbackInfo.info.chained)) {
// Check if callback has already been fired (missing chain command)

@@ -380,3 +385,3 @@ var chained = callbackInfo.info.chained;

});
} else if(callbackInfo.callback) {
} else if(callbackInfo && callbackInfo.callback) {
// Parse the body

@@ -424,3 +429,2 @@ mongoReply.parseBody(message, connectionPool.bson, callbackInfo.info.raw, function(err) {

// Trigger the callback
dbInstanceObject._callHandler(mongoReply.responseTo, mongoReply, null);

@@ -427,0 +431,0 @@ });

@@ -124,3 +124,3 @@ var Server = require("../server").Server;

new function(serverInstance) {
var server = new Server(serverInstance.host, serverInstance.port, {poolSize:1, timeout:500});
var server = new Server(serverInstance.host, serverInstance.port, {poolSize:1, timeout:500, auto_reconnect:false});
var db = new self.Db(self.replicaset.db.databaseName, server);

@@ -127,0 +127,0 @@ // Add error listener

@@ -5,2 +5,3 @@ var QueryCommand = require('./commands/query_command').QueryCommand,

Long = require('bson').Long,
ReadPreference = require('./connection/read_preference').ReadPreference,
CursorStream = require('./cursorstream'),

@@ -37,9 +38,10 @@ utils = require('./utils');

* @param {String} comment you can put a $comment field on a query to make looking in the profiler logs simpler.
* @param {Boolean} awaitdata allow the cursor to wait for data, only applicable for tailable cursor.
* @param {Number} numberOfRetries if using awaidata specifies the number of times to retry on timeout.
* @param {String} dbName override the default dbName.
* @param {Number} tailableRetryInterval specify the miliseconds between getMores on tailable cursor.
* @param {Boolean} have the server send all the documents at once as getMore packets.
*/
function Cursor(db, collection, selector, fields, skip, limit
, sort, hint, explain, snapshot, timeout, tailable, batchSize, slaveOk, raw, read
, returnKey, maxScan, min, max, showDiskLoc, comment, awaitdata, numberOfRetries, dbName) {
, returnKey, maxScan, min, max, showDiskLoc, comment, awaitdata, numberOfRetries, dbName, tailableRetryInterval, exhaust) {
this.db = db;

@@ -58,7 +60,8 @@ this.collection = collection;

this.awaitdata = awaitdata;
this.numberOfRetries = numberOfRetries == null ? 1 : numberOfRetries;
this.numberOfRetries = numberOfRetries == null ? 5 : numberOfRetries;
this.currentNumberOfRetries = this.numberOfRetries;
this.batchSizeValue = batchSize == null ? 0 : batchSize;
this.slaveOk = slaveOk == null ? collection.slaveOk : slaveOk;
this.raw = raw == null ? false : raw;
this.read = read == null ? true : read;
this.read = read == null ? ReadPreference.PRIMARY : read;
this.returnKey = returnKey;

@@ -70,2 +73,4 @@ this.maxScan = maxScan;

this.comment = comment;
this.tailableRetryInterval = tailableRetryInterval || 100;
this.exhaust = exhaust || false;

@@ -145,3 +150,3 @@ this.totalNumberOfRecords = 0;

if (item != null) {
if(item != null && Array.isArray(items)) {
items.push(item);

@@ -412,2 +417,6 @@ } else {

if(self.exhaust) {
queryOptions |= QueryCommand.OPTS_EXHAUST;
}
if(self.slaveOk) {

@@ -509,4 +518,8 @@ queryOptions |= QueryCommand.OPTS_SLAVE;

result = null;
self.nextObject(callback);
// Ignore callbacks until the cursor is dead for exhausted
if(self.exhaust && result.cursorId.toString() == "0") {
self.nextObject(callback);
} else if(self.exhaust == false || self.exhaust == null) {
self.nextObject(callback);
}
};

@@ -517,3 +530,3 @@

try {
self.connection = this.read == null ? self.db.serverConfig.checkoutWriter() : self.db.serverConfig.checkoutReader(this.read);
self.connection = this.read == null ? self.db.serverConfig.checkoutWriter() : self.db.serverConfig.checkoutReader(this.read);
} catch(err) {

@@ -525,3 +538,3 @@ return callback(err, null);

// Execute the command
self.db._executeQueryCommand(cmd, {raw:self.raw, read:this.read, connection:self.connection}, commandHandler);
self.db._executeQueryCommand(cmd, {exhaust: self.exhaust, raw:self.raw, read:this.read, connection:self.connection}, commandHandler);
// Set the command handler to null

@@ -565,3 +578,3 @@ commandHandler = null;

// Set up options
var options = { read: self.read, raw: self.raw, connection:self.connection };
var options = {read: self.read, raw: self.raw, connection:self.connection };

@@ -590,3 +603,9 @@ // Execute the command

self.items = self.items.concat(result.documents);
// Reset the tries for awaitdata if we are using it
self.currentNumberOfRetries = self.numberOfRetries;
// Get the documents
for(var i = 0; i < result.documents.length; i++) {
self.items.push(result.documents[i]);
}
// result = null;

@@ -596,4 +615,4 @@ callback(null, self.items.shift());

// Excute the tailable cursor once more, will timeout after ~4 sec if awaitdata used
self.numberOfRetries = self.numberOfRetries - 1;
if(self.numberOfRetries == 0) {
self.currentNumberOfRetries = self.currentNumberOfRetries - 1;
if(self.currentNumberOfRetries == 0) {
self.close(function() {

@@ -603,8 +622,8 @@ callback(new Error("tailable cursor timed out"), null);

} else {
process.nextTick(function() {getMore(self, callback);});
process.nextTick(function() { getMore(self, callback); });
}
} else if(self.tailable && !isDead) {
self.getMoreTimer = setTimeout(function() {getMore(self, callback);}, 100);
self.getMoreTimer = setTimeout(function() { getMore(self, callback); }, self.tailableRetryInterval);
} else {
self.close(function() {callback(null, null);});
self.close(function() {callback(null, null); });
}

@@ -687,3 +706,3 @@

function execute(command) {
self.db._executeQueryCommand(command, {read:self.read, raw:self.raw, connection:self.connection}, function(err,result) {
self.db._executeQueryCommand(command, {exhaust: self.exhaust, read:self.read, raw:self.raw, connection:self.connection}, function(err,result) {
if(err) {

@@ -690,0 +709,0 @@ stream.emit('error', err);

@@ -36,3 +36,3 @@ var Long = require('bson').Long;

this.numberReturned = binary_reply[this.index] | binary_reply[this.index + 1] << 8 | binary_reply[this.index + 2] << 16 | binary_reply[this.index + 3] << 24;
this.index = this.index + 4;
this.index = this.index + 4;
}

@@ -43,4 +43,4 @@

// Just set a doc limit for deserializing
var docLimitSize = 1024*20;
var docLimitSize = 1024*20;
// If our message length is very long, let's switch to process.nextTick for messages

@@ -50,3 +50,3 @@ if(this.messageLength > docLimitSize) {

this.documents = new Array(this.numberReturned);
// Just walk down until we get a positive number >= 1

@@ -57,8 +57,8 @@ for(var i = 50; i > 0; i--) {

break;
}
}
}
}
// Actual main creator of the processFunction setting internal state to control the flow
var parseFunction = function(_self, _binary_reply, _batchSize, _numberReturned) {
var object_index = 0;
var object_index = 0;
// Internal loop process that will use nextTick to ensure we yield some time

@@ -70,3 +70,3 @@ var processFunction = function() {

}
// If raw just process the entries

@@ -78,6 +78,6 @@ if(raw) {

if(object_index <= _numberReturned) {
// Read the size of the bson object
// Read the size of the bson object
var bsonObjectSize = _binary_reply[_self.index] | _binary_reply[_self.index + 1] << 8 | _binary_reply[_self.index + 2] << 16 | _binary_reply[_self.index + 3] << 24;
// If we are storing the raw responses to pipe straight through
_self.documents[object_index] = binary_reply.slice(_self.index, _self.index + bsonObjectSize);
_self.documents[object_index] = binary_reply.slice(_self.index, _self.index + bsonObjectSize);
// Adjust binary index to point to next block of binary bson data

@@ -88,3 +88,3 @@ _self.index = _self.index + bsonObjectSize;

}
}
}
} else {

@@ -95,3 +95,3 @@ try {

// Adjust index
object_index = object_index + _batchSize;
object_index = object_index + _batchSize;
} catch (err) {

@@ -104,3 +104,3 @@ return callback(err);

if(object_index < _numberReturned) {
process.nextTick(processFunction);
process.nextTick(processFunction);
} else {

@@ -110,30 +110,30 @@ callback(null);

}
// Return the process function
return processFunction;
}(this, binary_reply, batchSize, this.numberReturned)();
} else {
try {
} else {
try {
// Let's unpack all the bson documents, deserialize them and store them
for(var object_index = 0; object_index < this.numberReturned; object_index++) {
// Read the size of the bson object
var bsonObjectSize = binary_reply[this.index] | binary_reply[this.index + 1] << 8 | binary_reply[this.index + 2] << 16 | binary_reply[this.index + 3] << 24;
// Read the size of the bson object
var bsonObjectSize = binary_reply[this.index] | binary_reply[this.index + 1] << 8 | binary_reply[this.index + 2] << 16 | binary_reply[this.index + 3] << 24;
// If we are storing the raw responses to pipe straight through
if(raw) {
// Deserialize the object and add to the documents array
this.documents.push(binary_reply.slice(this.index, this.index + bsonObjectSize));
this.documents.push(binary_reply.slice(this.index, this.index + bsonObjectSize));
} else {
// Deserialize the object and add to the documents array
this.documents.push(bson.deserialize(binary_reply.slice(this.index, this.index + bsonObjectSize)));
this.documents.push(bson.deserialize(binary_reply.slice(this.index, this.index + bsonObjectSize)));
}
// Adjust binary index to point to next block of binary bson data
this.index = this.index + bsonObjectSize;
}
}
} catch(err) {
return callback(err);
}
// No error return
callback(null);
}
callback(null);
}
}

@@ -140,0 +140,0 @@

{ "name" : "mongodb"
, "description" : "A node.js driver for MongoDB"
, "keywords" : ["mongodb", "mongo", "driver", "db"]
, "version" : "1.1.4"
, "version" : "1.1.5"
, "author" : "Christian Amor Kvalheim <christkv@gmail.com>"

@@ -62,3 +62,3 @@ , "contributors" : [ "Aaron Heckmann",

, "dependencies" : {
"bson": "0.1.1"
"bson": "0.1.3"
}

@@ -65,0 +65,0 @@ , "devDependencies": {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc