async-logging
Advanced tools
Comparing version 0.5.0-1125132700 to 0.5.0-SNAPSHOT.1386026585209
@@ -134,5 +134,8 @@ 'use strict'; | ||
var LogBuffer = exports.LogBuffer = function(emitter, mapper){ | ||
var LogBuffer = exports.LogBuffer = function(emitter, options){ | ||
var _this = this; | ||
options = options || {}; | ||
_.extend(this, { | ||
@@ -142,3 +145,3 @@ 'transactions': {}, | ||
'heartbeats': [], | ||
'mapper': mapper || DEFAULT_MAPPER | ||
'mapper': options.mapper || DEFAULT_MAPPER | ||
}); | ||
@@ -145,0 +148,0 @@ |
@@ -16,4 +16,3 @@ 'use strict'; | ||
os = require('os'), | ||
_ = require('underscore'), | ||
wss = null; | ||
_ = require('underscore'); | ||
@@ -31,5 +30,37 @@ var LogCluster = exports.LogCluster = function(options, emitter){ | ||
'configureApp': function(app){ | ||
wss = options.websocket | ||
? new WebSocketServer({ | ||
'httpServer': server, | ||
var cleanDuration = options.cleanDuration, | ||
cleanUpTimeout = null, | ||
cleanUpCycle = function(){ | ||
emitter.emit('clean', Date.now() - cleanDuration);//this is a bugfix, we used to use till += cleanDuration, which could grow bigger gap with each cycle | ||
cleanUpTimeout = setTimeout(cleanUpCycle, cleanDuration);//this guarantees that the next cycle is started only after the previous is finished. | ||
}; | ||
//init timeout | ||
cleanUpTimeout = setTimeout(cleanUpCycle, cleanDuration); | ||
//unregister the timer to unblock the cluster shutdown | ||
process.once('SIGINT', _.bind(clearTimeout, null, cleanUpTimeout)); | ||
process.once('SIGTERM', _.bind(clearTimeout, null, cleanUpTimeout)); | ||
return app; | ||
}, | ||
'app': options.app || function(req, res){ | ||
console.log('[log-cluster] received: ' + request.url); | ||
response.send('', 404); | ||
}, | ||
'port': options.port || 3000, | ||
'monApp': options.monApp, | ||
'monPort': options.monPort || 3001, | ||
'noWorkers': options.noWorkers || Math.min(4, os.cpus().length), | ||
'connThreshold': options.connThreshold || 1024, | ||
'heartbeatInterval': options.heartbeatInterval || 60000, | ||
'cache': { | ||
'enable': true, | ||
'mode': 'master' | ||
} | ||
}) | ||
.then(function(resolves){ | ||
if(!resolves.master){ | ||
var wss = options.websocket ? new WebSocketServer({ | ||
'httpServer': resolves.server, | ||
// You should not use autoAcceptConnections for production | ||
@@ -59,28 +90,4 @@ // applications, as it defeats all standard cross-origin protection | ||
} | ||
}, | ||
'app': options.app || function(req, res){ | ||
console.log('[log-cluster] received: ' + request.url); | ||
response.send('', 404); | ||
}, | ||
'port': options.port || 3000, | ||
'monApp': options.monApp, | ||
'monPort': options.monPort || 3001, | ||
'noWorkers': options.noWorkers || Math.min(4, os.cpus().length), | ||
'connThreshold': options.connThreshold || 1024, | ||
'heartbeatInterval': options.heartbeatInterval || 60000, | ||
'warmUp': function(app){ | ||
var cleanDuration = options.cleanDuration, | ||
cleanUpTimeout = null, | ||
cleanUpCycle = function(){ | ||
emitter.emit('clean', Date.now() - cleanDuration);//this is a bugfix, we used to use till += cleanDuration, which could grow bigger gap with each cycle | ||
cleanUpTimeout = setTimeout(cleanUpCycle, cleanDuration);//this guarantees that the next cycle is started only after the previous is finished. | ||
}; | ||
//init timeout | ||
cleanUpTimeout = setTimeout(cleanUpCycle, cleanDuration); | ||
//unregister the timer to unblock the cluster shutdown | ||
process.once('SIGINT', _.bind(clearTimeout, null, cleanUpTimeout)); | ||
process.once('SIGTERM', _.bind(clearTimeout, null, cleanUpTimeout)); | ||
} | ||
} | ||
}); | ||
}; |
{ | ||
"author": "cubejs", | ||
"name": "async-logging", | ||
"version": "0.5.0-1125132700", | ||
"description": "async-logging working with cluster ~0.5.0", | ||
"repository": { | ||
"type": "git", | ||
"url": "https://github.com/cubejs/async-logging-node.git" | ||
}, | ||
"engines": { | ||
"node": ">= 0.8.0" | ||
}, | ||
"main":"./lib/index.js", | ||
"dependencies": { | ||
"websocket": "~1.0.8", | ||
"msgpack": "0.1.8", | ||
"underscore": "~1.4.4", | ||
"cluster2": "~0.5.0", | ||
"winston": "~0.7.1", | ||
"request": "~2.22.0", | ||
"connect": "~2.7.5" | ||
}, | ||
"devDependencies": { | ||
"express": "~3.1", | ||
"mocha": "~1.11.0", | ||
"should": "~1.2.2", | ||
"when": "~2.4.0" | ||
}, | ||
"scripts":{ | ||
"start":"node ./lib/log-server.js", | ||
"pretest": "rm -rf ./cluster-cache-persist ./cluster-cache-domain", | ||
"test": "mocha --ui bdd --timeout 10s --reporter spec ./test/*-test.js" | ||
} | ||
} | ||
"author": "cubejs", | ||
"name": "async-logging", | ||
"version": "0.5.0-SNAPSHOT.1386026585209", | ||
"description": "async-logging working with cluster ~0.5.0", | ||
"repository": { | ||
"type": "git", | ||
"url": "https://github.com/cubejs/async-logging-node.git" | ||
}, | ||
"engines": { | ||
"node": ">= 0.8.0" | ||
}, | ||
"main": "./lib/index.js", | ||
"dependencies": { | ||
"websocket": "~1.0.8", | ||
"msgpack": "0.1.8", | ||
"underscore": "~1.4.4", | ||
"cluster2": "~0.5.0", | ||
"winston": "~0.7.1", | ||
"request": "~2.22.0", | ||
"connect": "~2.7.5" | ||
}, | ||
"devDependencies": { | ||
"express": "~3.1", | ||
"mocha": "~1.11.0", | ||
"should": "~1.2.2", | ||
"when": "~2.4.0" | ||
}, | ||
"scripts": { | ||
"start": "node ./lib/log-server.js", | ||
"pretest": "rm -rf ./cluster-cache-persist ./cluster-cache-domain", | ||
"test": "mocha --ui bdd --timeout 10s --reporter spec ./test/*-test.js" | ||
}, | ||
"publishConfig": { | ||
"registry": "https://registry.npmjs.org" | ||
}, | ||
"ebay": {} | ||
} |
@@ -1,33 +0,64 @@ | ||
## What is async-logging | ||
async-logging-node | ||
================== | ||
This module is trying to solve the discrepency between the async model & the common synchronous logging model | ||
This module is trying to solve the discrepancy between the async model & the common synchronous logging model | ||
* Logging types: heartbeat, atomic, transaction | ||
* Logging params: type, level, msg, uuid, event, parent | ||
* Transaction: this is the key structure we're trying to restore from async events model | ||
* Logging proxy: part of this module is trying to build a proxy service running websocket server accepting log payload as message pack | ||
* Logging client: the other part is a client which runs in the application runtime, connecting to websocket, transfer the log event over | ||
* **Logging types**: heartbeat, atomic, transaction | ||
* **Logging params**: type, level, msg, uuid, event, parent | ||
* **Transaction**: this is the key structure we're trying to restore from async events model | ||
* **Logging proxy**: part of this module builds a proxy service which is a websocket server accepting log payload as message pack | ||
* **Logging client**: the other part is a client which runs in the application runtime, connecting to websocket, and transferring the log event over | ||
* **MonApp**: optional monitoring app which can generate heartbeat logs | ||
## API | ||
|Function | Description | | ||
|---|---| | ||
|**LogCluster**| | ||
|`require('log-cluster').LogCluster`|importing constructor| | ||
|`new LogCluster(options, emitter)`|constructor accepts two parameters; | | ||
|**LogBuffer**| | ||
|`require('log-buffer').LogBuffer`|importing constructor| | ||
|`new LogBuffer(emitter,mapper)`|constructor accepts two parameters, emitter which emits 'log' events and optional mapper which can map log properties to correct format| | ||
|**LogListener**| | ||
|require('log-listener').LogListener|importing constructor| | ||
## Installation | ||
``` | ||
npm install async-logging | ||
``` | ||
## Usage | ||
### Getting async-logging | ||
npm install async-logging | ||
### Start a proxy service | ||
var LogCluster = require("log-cluster.js").LogCluster, | ||
``` | ||
var LogCluster = require("log-cluster.js").LogCluster, | ||
CalPublisher = require("cal-publisher.js").CalPublisher; | ||
new LogCluster({LogPublisher:CalPublisher}); | ||
new LogCluster({LogPublisher:CalPublisher}); | ||
``` | ||
### Provide a Log Publisher | ||
var CalPublisher = exports.CalPublisher = function(emitter, calMapper, calSender, calCallback){ | ||
//a specific publisher, could be as simple as log file appender | ||
} | ||
``` | ||
var CalPublisher = exports.CalPublisher = function(emitter, calMapper, calSender, calCallback){ | ||
//a specific publisher, could be as simple as log file appender | ||
} | ||
``` | ||
### Start a client | ||
new require("log-client").LogClient({url:""}); //url must be given | ||
## Example | ||
Look at lib/log-server.js. You can start it by typing following commands from the root of the project | ||
``` | ||
npm install | ||
node lib/log-server.js | ||
``` | ||
## LogCluster constructor options | ||
* `port`: port on which the cluster will run(default 3000) | ||
* `monPort`: port of the monitoring app if any(default 3001) | ||
* `cluster`: (default true) | ||
* `noWorders` number of worker processes to create: | ||
* `connThreshold`: max number of connections to accept(default 1024) | ||
* `ecv`: | ||
* `heartbeatInterval`: | ||
* `LogListener`: defaults to 'log-listen.js' | ||
* `LogBuffer`: defaults to 'log-buffer.js' | ||
* `LogPublisher`: defautls to 'winston-publisher.js' |
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
732
65
37525
22