async-logging
Advanced tools
Comparing version 0.2.2 to 0.3.0
@@ -38,9 +38,17 @@ 'use strict'; | ||
connection.scheduler = connection.scheduler || setInterval(function drain(){ | ||
connection.scheduled = connection.scheduled || setTimeout(function drain(){ | ||
_this.drain(connection); | ||
_this.reconnect(url, protocol); | ||
}, _this.drainInterval); | ||
} | ||
}; | ||
HttpTransport.prototype.reconnect = function(url, protocol){ | ||
//no need to reconnect for http | ||
this.connect(url, protocol); | ||
}; | ||
HttpTransport.prototype.log = function(message){ | ||
@@ -47,0 +55,0 @@ |
@@ -8,3 +8,4 @@ 'use strict'; | ||
'WebSocketTransport' : require('./ws-transport.js').WebSocketTransport, | ||
'HttpTransport' : require('./http-transport.js').HttpTransport | ||
'HttpTransport' : require('./http-transport.js').HttpTransport, | ||
'WebSocketOtherwiseHttpTransport' : require('./ws-http-transport.js').WebSocketOtherwiseHttpTransport, | ||
}; |
@@ -42,21 +42,21 @@ 'use strict'; | ||
var mapped = { | ||
type : log.type || 'URL', | ||
name: log.name, | ||
request: log.uuid, | ||
parent: log.parent || '0', | ||
clazz: log.clazz || 'atomicEvent', | ||
event : log.event, | ||
duration: log.duration, | ||
pid: log.pid, | ||
tid: log.tid || assignThread(log.pid, log.uuid), | ||
machine: log.machine, | ||
ipAddress: log.ipAddress || log.ip, | ||
pool: log.pool, | ||
level: log.level, | ||
msg: log.msg, | ||
rlogid: log.rlogid, | ||
timestamp: log.timestamp || Date.now() | ||
'type' : log.type || 'URL', | ||
'name': log.name, | ||
'request': log.uuid, | ||
'parent': log.parent || '0', | ||
'clazz': log.clazz || 'atomicEvent', | ||
'event' : log.event, | ||
'duration': log.duration, | ||
'pid': log.pid, | ||
'tid': log.tid || assignThread(log.pid, log.uuid), | ||
'machine': log.machine, | ||
'ipAddress': log.ipAddress || log.ip, | ||
'pool': log.pool, | ||
'level': log.level, | ||
'msg': log.msg, | ||
'rlogid': log.rlogid, | ||
'timestamp': log.timestamp || Date.now() | ||
}; | ||
//in case there's anything else not mapped. | ||
_.extend(mapped, _.omit(log, 'type', 'uuid', 'parent', 'begin', 'end', 'event', 'duration', 'name', 'pid', 'tid', 'machine', 'ipAddress', 'pool', 'level', 'msg', 'rlogid', 'timestamp')); | ||
_.defaults(mapped, log); | ||
@@ -70,9 +70,9 @@ return mapped; | ||
} | ||
else if(_.isEqual('atomicEvent', node.clazz)){ | ||
else if('atomicEvent' === node.clazz){ | ||
return node; | ||
} | ||
else if(_.isEqual('heartbeat', node.clazz)){ | ||
else if('heartbeat' === node.clazz){ | ||
return node; | ||
} | ||
else if(_.isEqual('end', node.clazz)){ | ||
else if('end' === node.clazz){ | ||
return node; | ||
@@ -94,5 +94,5 @@ } | ||
'msg': c.msg, | ||
'duration': c.duration | ||
//not timestamp | ||
}); | ||
'duration': c.duration, | ||
'status': c.status | ||
});//https://jirap.corp.ebay.com/browse/POLYGLOT-337 | ||
} | ||
@@ -109,3 +109,3 @@ return memoize; | ||
} | ||
else if(_.isEqual('atomicEvent', node.clazz) || _.isEqual('heartbeat', node.clazz)){ | ||
else if('atomicEvent' === node.clazz || 'heartbeat' === node.clazz){ | ||
return 'end'; | ||
@@ -115,15 +115,14 @@ } | ||
var complete = _.reduce(family[node.event] || [], function(memoize, c){ | ||
if(!memoize || !validateTree(family, c)){ | ||
return null; | ||
} | ||
if(!memoize || !validateTree(family, c)){ | ||
return null; | ||
} | ||
if(_.isEqual(c.clazz, 'begin')){ | ||
memoize[c.event] = 'begin'; | ||
} | ||
else{ | ||
memoize[c.event] = 'end'; | ||
} | ||
return memoize; | ||
}, | ||
{}); | ||
if(c.clazz === 'begin'){ | ||
memoize[c.event] = 'begin'; | ||
} | ||
else{ | ||
memoize[c.event] = 'end'; | ||
} | ||
return memoize; | ||
}, {}); | ||
@@ -136,3 +135,3 @@ if(!complete){ | ||
return _.isEmpty(children) || _.every(children, function(elem){ | ||
return _.isEqual(elem, 'end'); | ||
return elem === 'end'; | ||
}); | ||
@@ -156,4 +155,4 @@ }; | ||
clazz = mapped.clazz, | ||
begin = _.isEqual('begin', clazz), | ||
end = _.isEqual('end', clazz), | ||
begin = 'begin' === clazz, | ||
end = 'end' === clazz, | ||
event = mapped.event, | ||
@@ -168,3 +167,3 @@ request= mapped.request; | ||
if(parent === '0' && !begin && !end){ | ||
if(_.isEqual('atomicEvent', clazz)){ | ||
if('atomicEvent' === clazz){ | ||
_this.atomicEvents.push(mapped); | ||
@@ -174,3 +173,3 @@ | ||
} | ||
else if(_.isEqual('heartbeat', clazz)){ | ||
else if('heartbeat' === clazz){ | ||
_this.heartbeats.push(mapped); | ||
@@ -193,4 +192,9 @@ | ||
transaction.duration = mapped.duration || Date.now() - transaction.timestamp; | ||
transaction.msg = mapped.msg; | ||
//bugfix for missing name, status updates of root transaction | ||
_.extend(transaction, { | ||
'name': mapped.name, | ||
'msg': mapped.msg, | ||
'duration': mapped.duration || Date.now() - transaction.timestamp, | ||
'status': mapped.status || '-1' | ||
});//indicate it's incomplete | ||
@@ -255,4 +259,9 @@ if(validateTree(transaction.family, transaction)){ | ||
emitter.emit('cleaned', {till: till}); | ||
emitter.emit('cleaned', { | ||
'till': till, | ||
'atomicEvents' : _this.atomicEvents.length, | ||
'heartbeats' : _this.heartbeats.length, | ||
'transactions': _.keys(_this.transactions).length | ||
}); | ||
}); | ||
}; |
'use strict'; | ||
var WebSocketTransport = require("./index.js").WebSocketTransport, | ||
assert = require("assert"); | ||
var WebSocketTransport = require('./index.js').WebSocketTransport, | ||
assert = require('assert'); | ||
@@ -23,3 +23,6 @@ var LogClient = exports.LogClient = function(options){ | ||
emitter.removeListener('log', listener); | ||
transport.reconnect = function(){ | ||
//stop the transport's reconnect immediately | ||
}; | ||
}; | ||
}; |
@@ -21,3 +21,3 @@ 'use strict'; | ||
var server = options.app || http.createServer(function(request, response) { | ||
var server = http.createServer(options.app || function(request, response) { | ||
//try not closing the http connection. | ||
@@ -28,25 +28,39 @@ console.log('[log-cluster] received: ' + request.url); | ||
wss = options.websocket ? new WebSocketServer({ | ||
httpServer: server, | ||
// You should not use autoAcceptConnections for production | ||
// applications, as it defeats all standard cross-origin protection | ||
// facilities built into the protocol and the browser. You should | ||
// *always* verify the connection's origin and decide whether or not | ||
// to accept it. | ||
autoAcceptConnections: false | ||
}) : null, | ||
httpServer: server, | ||
// You should not use autoAcceptConnections for production | ||
// applications, as it defeats all standard cross-origin protection | ||
// facilities built into the protocol and the browser. You should | ||
// *always* verify the connection's origin and decide whether or not | ||
// to accept it. | ||
maxReceivedMessageSize: 4 * 1024 * 1024,//4mb for the max message size | ||
autoAcceptConnections: false | ||
}) | ||
:{ | ||
'on': function(){ | ||
}, | ||
'shutDown': function(){ | ||
} | ||
}, | ||
actualOptions = { | ||
port: 3000, | ||
monPort: 3001, | ||
cluster: true, | ||
noWorkers: os.cpus().length + 1, | ||
connThreshold: 1024,//concurrent connections shouldn't exceed 1k, otherwise performance degradation would be obvious. | ||
ecv: { | ||
'port': 3000, | ||
'monPort': 3001, | ||
'cluster': true, | ||
'noWorkers': os.cpus().length + 1, | ||
'connThreshold': 1024,//concurrent connections shouldn't exceed 1k, otherwise performance degradation would be obvious. | ||
'ecv': { | ||
control: true | ||
}, | ||
heartbeatInterval : 60000, | ||
LogListener : LogListener, | ||
LogBuffer : LogBuffer, | ||
LogPublisher: WinstonPublisher, | ||
cleanDuration : 1000 * 60,//one min | ||
'heartbeatInterval' : 60000, | ||
'LogListener' : LogListener, | ||
'LogBuffer' : LogBuffer, | ||
'LogPublisher': WinstonPublisher, | ||
'cleanDuration' : 1000 * 60,//one min | ||
'machineName' : os.hostname(), | ||
'maxLife' : 1000 * 3600, //[1, 2) hours | ||
'maxMessages' : 1024, //0.5 msg/pec, 1 hour | ||
'waitForPublisher': 3000//3s | ||
}; | ||
//let user options overwrite defaults | ||
_.extend(actualOptions, options); | ||
@@ -61,9 +75,9 @@ | ||
var logCluster = new Cluster({ | ||
port: actualOptions.port, | ||
monPort: actualOptions.monPort, | ||
cluster: actualOptions.cluster, | ||
noWorkers: actualOptions.noWorkers, | ||
connThreshold: actualOptions.connThreshold, | ||
ecv: actualOptions.ecv, | ||
heartbeatInterval: actualOptions.heartbeatInterval | ||
'port': actualOptions.port, | ||
'monPort': actualOptions.monPort, | ||
'cluster': actualOptions.cluster, | ||
'noWorkers': actualOptions.noWorkers, | ||
'connThreshold': actualOptions.connThreshold, | ||
'ecv': actualOptions.ecv, | ||
'heartbeatInterval': actualOptions.heartbeatInterval | ||
}); | ||
@@ -77,5 +91,5 @@ | ||
//either a constructor or an instance | ||
_.isFunction(actualOptions.LogListener) ? actualOptions.LogListener(wss, emitter) : actualOptions.LogListener; | ||
_.isFunction(actualOptions.LogListener) ? actualOptions.LogListener(wss, emitter, actualOptions) : actualOptions.LogListener; | ||
_.isFunction(actualOptions.LogBuffer) ? actualOptions.LogBuffer(emitter) : actualOptions.LogBuffer; | ||
_.isFunction(actualOptions.LogPublisher) ? actualOptions.LogPublisher(emitter) : actualOptions.LogPublisher; | ||
_.isFunction(actualOptions.LogPublisher) ? actualOptions.LogPublisher(emitter, actualOptions) : actualOptions.LogPublisher; | ||
@@ -98,3 +112,1 @@ //a cleanup logic that is added to avoid logs never closed taking too much memory, threshold set to one day for now | ||
}; | ||
exports.LogClient = require('./log-client.js').LogClient; |
@@ -21,5 +21,56 @@ 'use strict'; | ||
var LogListener = exports.LogListener = function(wss, emitter){ | ||
var LogListener = exports.LogListener = function(wss, emitter, options){ | ||
var _this = this; | ||
var _this = this, | ||
app = options.app, | ||
middleware = options.middleware, | ||
machineName = options.machineName, | ||
totalMessages = 0, | ||
maxMessages = options.maxMessages, | ||
waitForPublisher = options.waitForPublisher || 2000,//2 sec | ||
maxLife = options.maxLife, | ||
suicide = _.once(function suicide(){ | ||
if(require('cluster').isMaster){ | ||
//not sure why master got here too... | ||
return; | ||
} | ||
wss.shutDown(); | ||
function wait(retries){ | ||
if(_this.connections.length === 0 || retries === 0){ | ||
var publisherTimeout = setTimeout(function(){ | ||
console.log('forced suicide:' + process.pid); | ||
process.exit(-1); | ||
}, | ||
waitForPublisher); | ||
emitter.emit('clean', Date.now());//force everything in the buffer to be flushed | ||
emitter.emit('clean', Date.now()); | ||
emitter.emit('clean', Date.now());//emit 3 times to make sure it exceeds the max age of buffered transactions too | ||
emitter.once('suicide-confirmed-by-publisher', function(){ | ||
console.log('gracefully suicide:' + process.pid); | ||
clearTimeout(publisherTimeout); | ||
process.exit(-1); | ||
}); | ||
emitter.emit('suicide-wait-for-publisher'); | ||
} | ||
else{ | ||
//wait for cluster to revive me | ||
setTimeout(function(){ | ||
wait(retries - 1); | ||
}, 1000);//try in 1 second | ||
} | ||
} | ||
wait(5);//max 5s shutdown flow | ||
}); | ||
_this.connections = []; | ||
@@ -47,10 +98,17 @@ | ||
try{ | ||
var bytes = message.binaryData, | ||
buf = new Buffer(bytes.length); | ||
bytes.copy(buf, 0, 0, bytes.length); | ||
var logs = []; | ||
if (message.type === 'utf8') { | ||
var unpack = msgpack.unpack(buf), | ||
logs = JSON.parse(message.utf8Data); | ||
logs = _.isArray(logs) ? logs : [logs]; | ||
} | ||
else{ | ||
var bytes = message.binaryData, | ||
buf = new Buffer(bytes.length); | ||
bytes.copy(buf, 0, 0, bytes.length); | ||
var unpack = msgpack.unpack(buf); | ||
logs = _.isArray(unpack) ? unpack : [unpack]; | ||
console.log(util.format('[log-websocket] message received:\n%j', logs)); | ||
} | ||
_.each(logs, function(log){ | ||
@@ -66,2 +124,8 @@ emitter.emit('log', log); | ||
connection.lastMessageReceivedAt = Date.now(); | ||
totalMessages += 1; | ||
if(totalMessages >= maxMessages){ | ||
suicide(); | ||
} | ||
} | ||
@@ -87,2 +151,40 @@ }); | ||
}); | ||
}; | ||
if(maxLife){ | ||
setTimeout(suicide, maxLife + Math.floor((Math.random() * maxLife))); | ||
} | ||
if(app){ | ||
var connect = require('connect'); | ||
app.post('/log', middleware || connect.bodyParser(), function(req, res){ | ||
try{ | ||
_.each(req.body.logs || [], function(log){ | ||
//console.log(util.format('[listener] received messages:\n %j', log)); | ||
//seems like a bug from body parser where duration is parsed as string instead of number; | ||
log.duration = log.duration && _.isString(log.duration) ? parseInt(log.duration, 10) : log.duration; | ||
log.timestamp = log.timestamp && _.isString(log.timestamp) ? parseInt(log.timestamp, 10) : log.timestamp; | ||
emitter.emit('log', log); | ||
}); | ||
} | ||
catch(e){ | ||
console.log('error handling log post:\n' + e); | ||
} | ||
res.send(200, ''); | ||
}); | ||
app.get('/ws', function(req, res){ | ||
console.log('[listener] accepted websocket request'); | ||
res.send(util.format('ws://%s:8080/', machineName, 200)); | ||
}); | ||
} | ||
}; | ||
'use strict'; | ||
var winston = require("winston"); | ||
var winston = require('winston'); | ||
var WinstonPublisher = exports.WinstonPublisher = function(emitter){ | ||
var WinstonPublisher = exports.WinstonPublisher = function(emitter, options){ | ||
//a specific publisher, could be as simple as log file appender | ||
options = options || {}; | ||
var logger = new (winston.Logger)({ | ||
transports: [ | ||
new winston.transports.File({ filename: './logs/all.log' }) | ||
], | ||
exceptionHandlers: [ | ||
new winston.transports.File({ filename: './logs/exceptions.log' }) | ||
] | ||
transports: options.transports || [ | ||
new winston.transports.File({ filename: './log/all.log' }) | ||
], | ||
exceptionHandlers: options.exceptionHandlers || [ | ||
new winston.transports.File({ filename: './log/exceptions.log' }) | ||
] | ||
}); | ||
emitter.on("atomicEvent", function(atomicEvent){ | ||
emitter.on('atomicEvent', function(atomicEvent){ | ||
logger.log(atomicEvent.level, atomicEvent.msg, atomicEvent); | ||
}); | ||
emitter.on("heartbeat", function(heartbeat){ | ||
logger.log(atomicEvent.level, heartbeat.msg, heartbeat); | ||
emitter.on('heartbeat', function(heartbeat){ | ||
logger.log(heartbeat.level, heartbeat.msg, heartbeat); | ||
}); | ||
emitter.on("transaction", function(tx){ | ||
logger.log(atomicEvent.level, tx.msg, tx); | ||
emitter.on('transaction', function(tx){ | ||
logger.log(tx.level, tx.msg, tx); | ||
}); | ||
}; |
@@ -14,3 +14,4 @@ 'use strict'; | ||
'connection': null, | ||
'queue': [], | ||
'adhocs': [], | ||
'groups': {}, | ||
'url': options.url, | ||
@@ -33,3 +34,3 @@ 'protocol': options.protocol || 'log-protocol', | ||
_this.connection = null; | ||
setTimeout(_.bind(_this.connect, _this, url, protocol), _this.reconnectInterval);//wait for 1 sec and continue reconnect | ||
setTimeout(_.bind(_this.reconnect, _this, url, protocol), _this.reconnectInterval);//wait for 1 sec and continue reconnect | ||
}); | ||
@@ -59,3 +60,3 @@ | ||
_this.connection = null; | ||
_this.connect(url, protocol);//reconnect | ||
_this.reconnect(url, protocol);//reconnect | ||
}); | ||
@@ -65,6 +66,19 @@ }); | ||
client.connect(url, protocol); | ||
} | ||
}; | ||
WebSocketTransport.prototype.reconnect = function(url, protocol){ | ||
this.connect(url, protocol); | ||
}; | ||
WebSocketTransport.prototype.log = function(message){ | ||
this.queue.push(message); | ||
var uuid = message.uuid; | ||
if(!uuid){ | ||
this.adhocs.push(message); | ||
} | ||
else{ | ||
var group = this.groups[uuid] || []; | ||
group.push(message); | ||
this.groups[uuid] = group; | ||
} | ||
}; | ||
@@ -74,29 +88,30 @@ | ||
if(!_.isEmpty(this.queue)){ | ||
try{ | ||
//this is an enhancement which tries to mitigate the problem of possible cutoff of transactions | ||
//the transactional messages will only be sent after they've been baked for 1 minute, and in the same batch | ||
var threshold = Date.now() - 60000, groups = {}, adhocs = [], devide = {'graduates':[], 'youths':[]}; | ||
_.each(this.queue, function(elem){ | ||
if(elem.uuid){ | ||
groups[elem.uuid] = groups[elem.uuid] || []; | ||
groups[elem.uuid].push(elem); | ||
var _this = this, | ||
threshold = Date.now() - _this.readyThreshold, | ||
graduates = []; | ||
if(_this.adhocs.length){ | ||
connection.sendBytes(msgpack.pack(_this.adhocs)); | ||
_this.adhocs = []; | ||
} | ||
//split the groups to graduates and youths, graduates are the messages (transactional) older than 1 minute | ||
var groups = _this.groups; | ||
_.each(groups, function(group, uuid){ | ||
if(group[0].timestamp < threshold){ | ||
graduates = graduates.concat(group); | ||
delete groups[uuid]; | ||
} | ||
else{ | ||
adhocs.push(elem); | ||
} | ||
}); | ||
connection.sendBytes(msgpack.pack(adhocs)); | ||
//split the groups to graduates and youths, graduates are the messages (transactional) older than 1 minute | ||
_.reduce(_.values(groups), function(memoize, group){ | ||
var team = group[0].timestamp < threshold ? 'graduates' : 'youths'; | ||
memoize[team] = memoize[team].concat(gorup); | ||
return memoize; | ||
}, devide); | ||
connection.sendBytes(msgpack.pack(devide.graduates)); | ||
this.queue = devide.youths; | ||
if(graduates.length){ | ||
connection.sendBytes(msgpack.pack(graduates)); | ||
} | ||
} | ||
catch(e){ | ||
console.trace(e); | ||
} | ||
}; |
{ | ||
"author": "cubejs", | ||
"name": "async-logging", | ||
"version": "0.2.2", | ||
"version": "0.3.0", | ||
"description": "0.1.6 is the same as 0.2.2 just to get around ebay-logging-client vs. async-logging-client change", | ||
"repository": { | ||
@@ -17,8 +18,9 @@ "type": "git", | ||
"underscore": "~1.4.4", | ||
"cluster2": "git://github.com/cubejs/cluster2.git", | ||
"cluster2": "~0.5.0", | ||
"winston": "~0.7.1", | ||
"request": "~2.22.0" | ||
"request": "~2.22.0", | ||
"connect": "~2.7.5" | ||
}, | ||
"devDependencies": { | ||
"express": "~3.2", | ||
"express": "~3.1", | ||
"mocha": "~1.11.0", | ||
@@ -29,4 +31,5 @@ "should": "~1.2.2", | ||
"scripts":{ | ||
"start":"node ./lib/log-server.js" | ||
"start":"node ./lib/log-server.js", | ||
"test": "mocha --ui bdd --timeout 10s --reporter spec ./test/*-test.js" | ||
} | ||
} |
@@ -1,33 +0,64 @@ | ||
## What is async-logging | ||
async-logging-node | ||
================== | ||
This module is trying to solve the discrepency between the async model & the common synchronous logging model | ||
This module is trying to solve the discrepancy between the async model & the common synchronous logging model | ||
* Logging types: heartbeat, atomic, transaction | ||
* Logging params: type, level, msg, uuid, event, parent | ||
* Transaction: this is the key structure we're trying to restore from async events model | ||
* Logging proxy: part of this module is trying to build a proxy service running websocket server accepting log payload as message pack | ||
* Logging client: the other part is a client which runs in the application runtime, connecting to websocket, transfer the log event over | ||
* **Logging types**: heartbeat, atomic, transaction | ||
* **Logging params**: type, level, msg, uuid, event, parent | ||
* **Transaction**: this is the key structure we're trying to restore from async events model | ||
* **Logging proxy**: part of this module builds a proxy service which is a websocket server accepting log payload as message pack | ||
* **Logging client**: the other part is a client which runs in the application runtime, connecting to websocket, and transferring the log event over | ||
* **MonApp**: optional monitoring app which can generate heartbeat logs | ||
## API | ||
|Function | Description | | ||
|---|---| | ||
|**LogCluster**| | ||
|`require('log-cluster').LogCluster`|importing constructor| | ||
|`new LogCluster(options, emitter)`|constructor accepts two parameters; | | ||
|**LogBuffer**| | ||
|`require('log-buffer').LogBuffer`|importing constructor| | ||
|`new LogBuffer(emitter,mapper)`|constructor accepts two parameters, emitter which emits 'log' events and optional mapper which can map log properties to correct format| | ||
|**LogListener**| | ||
|require('log-listener').LogListener|importing constructor| | ||
## Installation | ||
``` | ||
npm install async-logging | ||
``` | ||
## Usage | ||
### Getting async-logging | ||
npm install async-logging | ||
### Start a proxy service | ||
var LogCluster = require("log-cluster.js").LogCluster, | ||
``` | ||
var LogCluster = require("log-cluster.js").LogCluster, | ||
CalPublisher = require("cal-publisher.js").CalPublisher; | ||
new LogCluster({LogPublisher:CalPublisher}); | ||
new LogCluster({LogPublisher:CalPublisher}); | ||
``` | ||
### Provide a Log Publisher | ||
var CalPublisher = exports.CalPublisher = function(emitter, calMapper, calSender, calCallback){ | ||
//a specific publisher, could be as simple as log file appender | ||
} | ||
``` | ||
var CalPublisher = exports.CalPublisher = function(emitter, calMapper, calSender, calCallback){ | ||
//a specific publisher, could be as simple as log file appender | ||
} | ||
``` | ||
### Start a client | ||
new require("log-client").LogClient({url:""}); //url must be given | ||
## Example | ||
Look at lib/log-server.js. You can start it by typing following commands from the root of the project | ||
``` | ||
npm install | ||
node lib/log-server.js | ||
``` | ||
## LogCluster constructor options | ||
* `port`: port on which the cluster will run(default 3000) | ||
* `monPort`: port of the monitoring app if any(default 3001) | ||
* `cluster`: (default true) | ||
* `noWorders` number of worker processes to create: | ||
* `connThreshold`: max number of connections to accept(default 1024) | ||
* `ecv`: | ||
* `heartbeatInterval`: | ||
* `LogListener`: defaults to 'log-listen.js' | ||
* `LogBuffer`: defaults to 'log-buffer.js' | ||
* `LogPublisher`: defautls to 'winston-publisher.js' |
Git dependency
Supply chain riskContains a dependency which resolves to a remote git URL. Dependencies fetched from git URLs are not immutable and can be used to inject untrusted code or reduce the likelihood of a reproducible install.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
35825
746
65
0
1
7
+ Addedconnect@~2.7.5
+ Addedbindings@1.5.0(transitive)
+ Addedbuffer-crc32@0.2.1(transitive)
+ Addedbytes@0.2.0(transitive)
+ Addedcluster2@0.5.0(transitive)
+ Addedconnect@1.9.22.7.11(transitive)
+ Addedcookie@0.0.5(transitive)
+ Addedcookie-signature@1.0.1(transitive)
+ Addeddebug@4.4.0(transitive)
+ Addedejs@0.8.8(transitive)
+ Addedexpress@2.5.11(transitive)
+ Addedfile-uri-to-path@1.0.0(transitive)
+ Addedformidable@1.0.14(transitive)
+ Addedfresh@0.1.0(transitive)
+ Addedmemwatch@0.2.2(transitive)
+ Addedmime@1.2.4(transitive)
+ Addedmkdirp@0.3.0(transitive)
+ Addedms@2.1.3(transitive)
+ Addednpm@1.3.26(transitive)
+ Addedpause@0.0.1(transitive)
+ Addedqs@0.4.20.6.5(transitive)
+ Addedrange-parser@0.0.4(transitive)
+ Addedsend@0.1.1(transitive)
+ Addedusage@0.3.10(transitive)
+ Addedwhen@2.4.0(transitive)
- Removedqs@0.6.6(transitive)
Updatedcluster2@~0.5.0