cluster-service
Advanced tools
Comparing version 0.4.9 to 0.4.10
@@ -19,7 +19,8 @@ var | ||
onWorkerStop: true, | ||
restartOnFailure: true, | ||
options: { | ||
host: "localhost", | ||
port: 11987, | ||
restartOnFailure: true, | ||
restartDisabled: false, | ||
accessKey: undefined, | ||
workers: undefined, | ||
workerCount: os.cpus().length, | ||
@@ -29,4 +30,3 @@ restartDelayMs: 100, | ||
restartsPerMinute: 10, // not yet supported | ||
cliEnabled: true, | ||
workerReady: false, | ||
cli: true, | ||
silent: false, | ||
@@ -63,3 +63,3 @@ log: console.log, | ||
exports.start = function(workerPath, options, masterCb) { | ||
exports.start = function(options, masterCb) { | ||
if (cluster.isWorker === true) { | ||
@@ -78,3 +78,3 @@ // ignore starts if not master. do NOT invoke masterCb, as that is reserved for master callback | ||
if (ext === ".js") { // if js file, use as worker | ||
options.worker = options._[0]; | ||
options.workers = options._[0]; | ||
} else if (ext === ".json") { // if json file, use as config | ||
@@ -85,3 +85,3 @@ options.config = options._[0]; | ||
if (options.json === true) { | ||
options.cliEnabled = false; | ||
options.cli = false; | ||
} | ||
@@ -92,29 +92,18 @@ } | ||
if (workerPath && typeof workerPath === "object") { // worker | ||
masterCb = options; | ||
options = workerPath; | ||
workerPath = null; | ||
} | ||
options = options || {}; | ||
if (workerPath === null) { // NO worker option if explicit | ||
options.worker = null; | ||
} else if (typeof workerPath === "string") { // legacy support, and configuration file support | ||
if (path.extname(workerPath).toLowerCase() === ".json") { | ||
options = JSON.parse(fs.readFileSync(workerPath)); | ||
workerPath = null; | ||
} else { | ||
options.worker = workerPath; | ||
} | ||
} else if ("config" in options) { | ||
if ("config" in options) { | ||
options = JSON.parse(fs.readFileSync(options.config)); | ||
workerPath = null; | ||
} | ||
locals.options = options = extend(true, {}, locals.options, options); | ||
if (typeof options.worker === "undefined") { | ||
delete locals.options.workers; // always replace workers, not extend it | ||
locals.options = options = extend(true, locals.options, options); | ||
if (typeof options.workers === "undefined") { | ||
// only define default worker if worker is undefined (null is reserved for "no worker") | ||
options.worker = "./worker.js"; // default worker to execute | ||
options.workers = "./worker.js"; // default worker to execute | ||
} | ||
colors.setTheme(options.colors); | ||
require("./lib/legacy"); | ||
if (options.run) { | ||
@@ -206,3 +195,3 @@ require("./lib/run").start(options, function(err, result) { | ||
exports.debug = function () { | ||
if (locals.options.cliEnabled === true && locals.options.debug) { | ||
if (locals.options.cli === true && locals.options.debug) { | ||
var args = Array.prototype.slice.call(arguments); | ||
@@ -224,3 +213,3 @@ for (var i = 0; i < args.length; i++) { | ||
exports.log = function () { | ||
if (locals.options.cliEnabled === true && locals.options.log) { | ||
if (locals.options.cli === true && locals.options.log) { | ||
var args = Array.prototype.slice.call(arguments); | ||
@@ -237,3 +226,3 @@ if (args.length > 0 && typeof args[0] === "string" && args[0][0] === "{") { | ||
exports.error = function() { | ||
if (locals.options.cliEnabled === true && locals.options.error) { | ||
if (locals.options.cli === true && locals.options.error) { | ||
var args = Array.prototype.slice.call(arguments); | ||
@@ -294,3 +283,3 @@ for (var i = 0; i < args.length; i++) { | ||
// allow worker to inform the master when ready to speed up initialization | ||
process.send({ cservice: { cmd: "workerReady", onWorkerStop: (typeof options.onWorkerStop === "function") } }); | ||
process.send({ cservice: { cmd: "workerReady", onStop: (typeof options.onWorkerStop === "function") } }); | ||
}; | ||
@@ -336,22 +325,25 @@ | ||
exports.newWorker = function(workerPath, cwd, options, cb) { | ||
if (typeof options === "function") { | ||
cb = options; | ||
options = {}; | ||
} | ||
if (typeof cb !== "function") { | ||
throw new Error("Callback required"); | ||
} | ||
workerPath = workerPath || "./worker"; | ||
if (workerPath.indexOf(".") === 0 || (workerPath.indexOf("//") !== 0 && workerPath.indexOf(":\\") < 0)) { | ||
exports.newWorker = function(options, cb) { | ||
options = extend(true, {}, { | ||
worker: "./worker.js", | ||
ready: true, | ||
count: undefined, | ||
restart: true, | ||
cwd: undefined, | ||
onStop: false | ||
}, options); | ||
if (options.worker.indexOf(".") === 0 || (options.worker.indexOf("//") !== 0 && options.worker.indexOf(":\\") < 0)) { | ||
// resolve if not absolute | ||
workerPath = path.resolve(workerPath); | ||
options.worker = path.resolve(options.worker); | ||
} | ||
options = options || {}; | ||
var worker = cluster.fork({ "workerPath": workerPath, "cwd": (cwd || process.cwd()) }); | ||
worker.cservice = { workerReady: (options.workerReady === true ? false : true), onWorkerStop: false, onWorkerReady: cb, workerPath: workerPath, options: options }; | ||
options.cwd = options.cwd || process.cwd(); | ||
options.onReady = cb; | ||
if (options.wasReady === false) { | ||
options.ready = false; // preserve preference between restarts, etc | ||
} | ||
var worker = cluster.fork(options); | ||
worker.cservice = options; | ||
worker.on("message", onMessageFromWorker); | ||
if (worker.cservice.workerReady === true && typeof cb === "function") { | ||
setTimeout(cb, 10); // if worker already ready (default), invoke cb now | ||
// why async? to allow worker to be returned to caller | ||
if (worker.cservice.ready === true && typeof cb === "function") { | ||
cb(null, worker); | ||
} | ||
@@ -371,6 +363,7 @@ | ||
case "workerReady": | ||
if (worker.cservice.workerReady === false) { | ||
worker.cservice.workerReady = true; | ||
worker.cservice.onWorkerStop = (msg.cservice.onWorkerStop === true); | ||
worker.cservice.onWorkerReady && worker.cservice.onWorkerReady(); | ||
if (worker.cservice.ready === false) { | ||
worker.cservice.wasReady = false; // preserve preference between restarts, etc | ||
worker.cservice.ready = true; | ||
worker.cservice.onStop = (msg.cservice.onStop === true); | ||
worker.cservice.onReady && worker.cservice.onReady(null, worker); | ||
} | ||
@@ -399,3 +392,3 @@ break; | ||
// load the worker if not already loaded | ||
cluster.worker.module = require(process.env.workerPath); | ||
cluster.worker.module = require(process.env.worker); | ||
} |
@@ -28,3 +28,3 @@ var | ||
try { | ||
rl.close(); | ||
process.stdin.pause(); | ||
} catch (ex) { | ||
@@ -31,0 +31,0 @@ } |
@@ -7,5 +7,6 @@ var | ||
module.exports = function(evt, cb, cmd, timeout) { | ||
module.exports = function(evt, cb, cmd, options) { | ||
var pid = parseInt(cmd); | ||
timeout = parseInt(timeout) || 60000; | ||
options = options || {}; | ||
options.timeout = parseInt(options.timeout) || 60000; | ||
if (typeof cmd !== "string" || (cmd !== "all" && !pid)) { | ||
@@ -17,4 +18,4 @@ cb("Invalid request. Try help restart"); | ||
evt.locals.reason = "restart"; | ||
var originalAutoRestart = evt.locals.options.restartOnFailure; | ||
evt.locals.options.restartOnFailure = false; | ||
var originalAutoRestart = evt.locals.restartOnFailure; | ||
evt.locals.restartOnFailure = false; | ||
@@ -30,3 +31,3 @@ var workers = evt.service.workers; | ||
tasks.push(getTask(evt, worker, timeout, (pid ? true : false))); | ||
tasks.push(getTask(evt, worker, options, (pid ? true : false))); | ||
} | ||
@@ -37,6 +38,6 @@ | ||
} else { | ||
cservice.log("Restarting workers... timeout: " + timeout); | ||
cservice.log("Restarting workers... timeout: " + options.timeout); | ||
async.series(tasks, function(err) { | ||
evt.locals.options.restartOnFailure = originalAutoRestart; | ||
evt.locals.restartOnFailure = originalAutoRestart; | ||
@@ -55,9 +56,10 @@ if (err) { | ||
info: "Gracefully restart service, waiting up to timeout before terminating workers.", | ||
command: "restart all|pid [timeout]", | ||
command: "restart all|pid { \"option1\": \"value\" }", | ||
"all|pid": "Required. 'all' to force shutdown of all workers, otherwise the pid of the specific worker to restart", | ||
"timeout": "Timeout, in milliseconds, before terminating workers. 0 for infinite wait." | ||
"options": "An object of options.", | ||
"* timeout": "Timeout, in milliseconds, before terminating workers. 0 for infinite wait." | ||
}); | ||
}; | ||
function getTask(evt, worker, timeout, explicitRestart) { | ||
function getTask(evt, worker, options, explicitRestart) { | ||
return function(cb) { | ||
@@ -75,3 +77,3 @@ | ||
if (worker.cservice.options.restartDisabled === true && explicitRestart === false) { | ||
if (worker.cservice.restart === false && explicitRestart === false) { | ||
cservice.log("Worker process " + worker.process.pid + " immune to restarts"); | ||
@@ -82,3 +84,3 @@ cb(); | ||
if (timeout > 0) { // start timeout if specified | ||
if (options.timeout > 0) { // start timeout if specified | ||
new_killer = setTimeout(function() { | ||
@@ -92,7 +94,7 @@ var w = new_worker; | ||
cb("timed out"); | ||
}, timeout); | ||
}, options.timeout); | ||
} | ||
// lets start new worker | ||
new_worker = evt.service.newWorker(worker.cservice.workerPath, worker.cservice.cwd, worker.cservice.options, function(err) { | ||
var new_worker = evt.service.newWorker(worker.cservice, function(err, new_worker) { | ||
new_worker.removeListener("exit", exit_listener); // remove temp listener | ||
@@ -106,6 +108,6 @@ new_worker = null; | ||
var killer = null; | ||
if (timeout > 0) { // start timeout if specified | ||
if (options.timeout > 0) { // start timeout if specified | ||
killer = setTimeout(function() { | ||
worker.kill("SIGKILL"); // go get'em, killer | ||
}, timeout); | ||
}, options.timeout); | ||
} | ||
@@ -119,6 +121,6 @@ | ||
// exit complete, fire callback | ||
setTimeout(cb, 250); // slight delay in case other events are piled up | ||
setTimeout(cb, 100); // slight delay in case other events are piled up | ||
}); | ||
if (worker.cservice.onWorkerStop === true) { | ||
if (worker.cservice.onStop === true) { | ||
worker.send({ cservice: { cmd: "onWorkerStop" } }); | ||
@@ -125,0 +127,0 @@ } else { |
@@ -6,5 +6,6 @@ var | ||
module.exports = function(evt, cb, cmd, timeout) { | ||
module.exports = function(evt, cb, cmd, options) { | ||
var pid = parseInt(cmd); | ||
timeout = parseInt(timeout) || 60000; | ||
options = options || {}; | ||
options.timeout = parseInt(options.timeout) || 60000; | ||
if (typeof cmd !== "string" || (cmd !== "all" && !pid)) { | ||
@@ -30,3 +31,3 @@ cb("Invalid request. Try help shutdown"); | ||
worker.process.on("exit", getExitHandler(evt, worker, timeout > 0 ? setTimeout(getKiller(worker), timeout) : null, function() { | ||
worker.process.on("exit", getExitHandler(evt, worker, options.timeout > 0 ? setTimeout(getKiller(worker), options.timeout) : null, function() { | ||
workersToKill--; | ||
@@ -38,3 +39,3 @@ if (workersToKill === 0) { | ||
cservice.log("All workers shutdown. Exiting..."); | ||
evt.service.stop(timeout, cb); | ||
evt.service.stop(options.timeout, cb); | ||
} else { | ||
@@ -56,3 +57,3 @@ cb(null, "Worker shutdown"); // DONE | ||
cservice.log("All workers shutdown. Exiting..."); | ||
evt.service.stop(timeout, cb); | ||
evt.service.stop(options.timeout, cb); | ||
} else { | ||
@@ -62,3 +63,3 @@ cb("No workers were shutdown"); | ||
} else { | ||
cservice.log("Killing workers... timeout: " + (timeout || 0)); | ||
cservice.log("Killing workers... timeout: " + (options.timeout || 0)); | ||
} | ||
@@ -70,5 +71,6 @@ }; | ||
info: "Gracefully shutdown service, waiting up to timeout before terminating workers.", | ||
command: "shutdown all|pid [timeout]", | ||
command: "shutdown all|pid { \"option1\": \"value\" }", | ||
"all|pid": "Required. 'all' to force shutdown of all workers, otherwise the pid of the specific worker to shutdown", | ||
"timeout": "Timeout, in milliseconds, before terminating workers. 0 for infinite wait." | ||
"options": "An object of options.", | ||
"* timeout": "Timeout, in milliseconds, before terminating workers. 0 for infinite wait." | ||
}); | ||
@@ -75,0 +77,0 @@ }; |
@@ -10,5 +10,6 @@ var | ||
options.cwd = options.cwd || process.cwd(); | ||
options.workerCount = parseInt(options.workerCount) || 1; | ||
options.count = parseInt(options.count) || 1; | ||
options.timeout = parseInt(options.timeout) || 60000; | ||
if (typeof workerPath !== "string" || options.workerCount < 1) { | ||
options.worker = workerPath; | ||
if (typeof workerPath !== "string" || options.count < 1) { | ||
cb("Invalid request. Try help start"); | ||
@@ -19,3 +20,3 @@ return; | ||
evt.locals.reason = "start"; | ||
evt.locals.options.restartOnFailure = false; | ||
evt.locals.restartOnFailure = false; | ||
@@ -26,4 +27,4 @@ var tasks = []; | ||
for (var i = 0; i < options.workerCount; i++) { | ||
tasks.push(getTask(evt, workerPath, options)); | ||
for (var i = 0; i < options.count; i++) { | ||
tasks.push(getTask(evt, options)); | ||
} | ||
@@ -47,9 +48,9 @@ | ||
"* cwd": "Path to set as the current working directory. If not provided, existing cwd will be used.", | ||
"* workerCount": "The number of workers to start, or 1 if not specified.", | ||
"* count": "The number of workers to start, or 1 if not specified.", | ||
"* timeout": "Timeout, in milliseconds, before terminating replaced workers. 0 for infinite wait.", | ||
"* workerReady": "If true, will wait for workerReady event before assuming success." | ||
"* ready": "If false, will wait for workerReady event before assuming success." | ||
}); | ||
}; | ||
function getTask(evt, workerPath, options) { | ||
function getTask(evt, options) { | ||
return function(cb) { | ||
@@ -75,3 +76,3 @@ | ||
// lets start new worker | ||
var new_worker = evt.service.newWorker(workerPath, options.cwd, options, function(err) { | ||
var new_worker = evt.service.newWorker(options, function(err) { | ||
new_worker.removeListener("exit", exit_listener); // remove temp listener | ||
@@ -78,0 +79,0 @@ if (new_killer) { // timeout no longer needed |
var | ||
async = require("async"), | ||
util = require("util"), | ||
extend = require("extend"), | ||
cservice = require("../../cluster-service") | ||
@@ -10,4 +11,4 @@ ; | ||
options = options || {}; | ||
options.cwd = options.cwd || process.cwd(); | ||
options.timeout = parseInt(options.timeout) || 60000; | ||
options.worker = workerPath; | ||
if (typeof cmd !== "string" || typeof workerPath !== "string" || (cmd !== "all" && !pid)) { | ||
@@ -19,4 +20,4 @@ cb("Invalid request. Try help upgrade"); | ||
evt.locals.reason = "upgrade"; | ||
var originalAutoRestart = evt.locals.options.restartOnFailure; | ||
evt.locals.options.restartOnFailure = false; | ||
var originalAutoRestart = evt.locals.restartOnFailure; | ||
evt.locals.restartOnFailure = false; | ||
@@ -32,3 +33,6 @@ var workers = evt.service.workers; | ||
tasks.push(getTask(evt, worker, workerPath, options)); | ||
// use original worker options as default, by overwrite using new options | ||
var workerOptions = extend(true, {}, worker.cservice, options); | ||
tasks.push(getTask(evt, worker, workerOptions)); | ||
} | ||
@@ -42,3 +46,3 @@ | ||
async.series(tasks, function(err) { | ||
evt.locals.options.restartOnFailure = originalAutoRestart; | ||
evt.locals.restartOnFailure = originalAutoRestart; | ||
@@ -66,3 +70,3 @@ if (err) { | ||
function getTask(evt, worker, workerPath, options) { | ||
function getTask(evt, worker, options) { | ||
return function(cb) { | ||
@@ -88,4 +92,3 @@ | ||
// lets start new worker | ||
var new_worker = evt.service.newWorker(workerPath, options.cwd, worker.cservice.options, function(err) { | ||
var new_worker = evt.service.newWorker(options, function(err, new_worker) { | ||
new_worker.removeListener("exit", exit_listener); // remove temp listener | ||
@@ -113,3 +116,3 @@ if (new_killer) { // timeout no longer needed | ||
if (worker.cservice.onWorkerStop === true) { | ||
if (worker.cservice.onStop === true) { | ||
worker.send({ cservice: { cmd: "onWorkerStop" } }); | ||
@@ -116,0 +119,0 @@ } else { |
@@ -17,2 +17,6 @@ var | ||
require("./commands/version")({}, function(err, ver) { | ||
cservice.log("cluster-service v".info + ver.data + " starting...".info); | ||
}); | ||
/*process.on("uncaughtException", function(err) { | ||
@@ -46,11 +50,11 @@ cservice.log("uncaughtException", util.inspect(err)); | ||
// do not restart if there is a reason, or disabled | ||
/*if (typeof (cservice.locals.reason) === "undefined" && worker.suicide !== true && options.restartOnFailure === true) { | ||
if (typeof (cservice.locals.reason) === "undefined" && worker.suicide !== true && cservice.locals.restartOnFailure === true) { | ||
setTimeout(function() { | ||
// lets replace lost worker. | ||
cservice.newWorker(worker.cservice.worker, null, options); | ||
cservice.newWorker(worker.cservice); | ||
}, options.restartDelayMs); | ||
}*/ | ||
} | ||
}); | ||
if (options.cliEnabled === true) { | ||
if (options.cli === true) { | ||
// wire-up CLI | ||
@@ -74,13 +78,25 @@ cli = require("./cli"); | ||
}); | ||
} else if (cservice.locals.isAttached === false && typeof options.worker === "string") { // if we're NOT attached, we can spawn the workers now | ||
} else if (cservice.locals.isAttached === false && typeof options.workers !== null) { // if we're NOT attached, we can spawn the workers now | ||
// fork it, i'm out of here | ||
var workersRemaining = options.workerCount; | ||
for (var i = 0; i < options.workerCount; i++) { | ||
cservice.newWorker(options.worker, null, options, function() { | ||
workersRemaining--; | ||
if (workersRemaining === 0) { | ||
cb && cb(); | ||
} | ||
}); | ||
var workersRemaining = 0; | ||
var workersForked = 0; | ||
var workers = typeof options.workers === "string" ? { "worker": { worker: options.workers } } : options.workers; | ||
var i; | ||
for (var workerName in workers) { | ||
var worker = workers[workerName]; | ||
var workerCount = worker.count || options.workerCount; | ||
workersRemaining += workerCount; | ||
workersForked += workerCount; | ||
for (i = 0; i < workerCount; i++) { | ||
cservice.newWorker(worker, function() { | ||
workersRemaining--; | ||
if (workersRemaining === 0) { | ||
cb && cb(); | ||
} | ||
}); | ||
} | ||
} | ||
if (workersForked === 0) { // if no forking took place, make sure cb is invoked | ||
cb && cb(); | ||
} | ||
} else { // nothing else to do | ||
@@ -100,3 +116,3 @@ cb && cb(); | ||
if (!err) { | ||
cservice.log(("Listening at " + (options.host + ":" + options.port).data).info); | ||
cservice.log(("Listening at " + ((options.ssl ? "https://" : "http://") + options.host + ":" + options.port + "/cli").data).info); | ||
} | ||
@@ -103,0 +119,0 @@ |
{ | ||
"name": "cluster-service", | ||
"version": "0.4.9", | ||
"version": "0.4.10", | ||
"author": { | ||
@@ -5,0 +5,0 @@ "name": "Aaron Silvas", |
212
README.md
@@ -13,3 +13,3 @@ # cluster-service | ||
## What is it? | ||
## About | ||
@@ -24,3 +24,3 @@ The short answer: | ||
Adds the ability to execute worker processes over N cores for extra service resilience, | ||
includes worker process monitoring and restart on failure, continuous integration, | ||
includes worker process monitoring and restart on failure, continuous deployment, | ||
as well as HTTP & command-line interfaces for health checks, cluster commands, | ||
@@ -40,6 +40,24 @@ and custom service commands. | ||
Now, with a small addition of code, you can add considerably resilience and capabilities to your existing services: | ||
Leveraging ```cluster-service``` without adding a line of code: | ||
npm install -g cluster-service | ||
cservice "server.js" --accessKey "lksjdf982734" | ||
This can be done without a global install as well, by updating your ```package.json```: | ||
``` | ||
"scripts": { | ||
"start": "cservice server.js --accessKey lksjdf982734" | ||
}, | ||
"dependencies": { | ||
"cluster-service": ">=0.5.0" | ||
} | ||
``` | ||
npm start | ||
Or, if you prefer to control ```cluster-service``` within your code: | ||
// server.js | ||
require("cluster-service").start({ worker: "./worker.js", accessKey: "lksjdf982734" }); | ||
require("cluster-service").start({ workers: "./worker.js", accessKey: "lksjdf982734" }); | ||
@@ -49,8 +67,4 @@ // worker.js | ||
Or better yet, install ```cluster-service``` globally for stand-alone usage. No new code required: | ||
npm install -g cluster-service | ||
cservice "server.js" --accessKey "lksjdf982734" | ||
## Talk to it | ||
@@ -71,4 +85,2 @@ | ||
Check out Cluster Commands for more details. | ||
We can also issue commands from a seperate process, or even a remote machine (assuming proper access): | ||
@@ -79,17 +91,33 @@ | ||
This will output raw json, which can be parsed and processed by the caller. | ||
You can even pipe raw JSON, which can be parsed and processed by the caller: | ||
cservice "restart all" --accessKey "my_access_key" --json | ||
Check out Cluster Commands for more details. | ||
## Start Options | ||
When initializing your service, there are a number of options that expose various features: | ||
When initializing your service, you have a number of options available: | ||
require("cluster-service").start({ worker: "worker.js", accessKey: "123" }); | ||
cservice "config.json" | ||
* worker (default: "./worker.js") - Path of worker to start. | ||
Or within your node app: | ||
// server.js | ||
// inline options | ||
require("cluster-service").start({ workers: "worker.js", accessKey: "123" }); | ||
// or via config | ||
require("cluster-service").start({ config: "config.json" }); | ||
* workers (default: "./worker.js") - Path of worker to start. A string indicates a single worker, | ||
forked based on value of ```workerCount```. An object indicates one or more worker objects: | ||
```{ "worker1": { workers: "worker1.js", cwd: process.cwd(), count: 2, ready: true, restart: true } }```. | ||
This option is automatically set if run via command-line ```cservice "worker.js"``` if | ||
the ```.js``` extension is detected. | ||
* accessKey - A secret key that must be specified if you wish to invoke commands to your service. | ||
Allows CLI & REST interfaces. | ||
* config - A filename to the configuration to load. Useful to keep options from having to be inline. | ||
* restartonFailure (default: true) - When a worker stops unexpectedly, should it be automatically | ||
restarted? | ||
This option is automatically set if run via command-line ```cservice "config.json"``` if | ||
the ```.json``` extension is detected. | ||
* host (default: "localhost") - Host to bind to for REST interface. (Will only bind if accessKey | ||
@@ -102,22 +130,27 @@ is provided) | ||
workers do not impact availability, such as task queues, and can be run as a single instance. | ||
* restartDelayMs (default: 100) - The delay between failure detection and restart. | ||
* restartsPerMinute (default: 10) - How many restarts are permitted by a worker in a minute | ||
before determining too critical to recover from. | ||
* allowHttpGet (default: false) - For development purposes, can be enabled for testing, but is | ||
not recommended otherwise. | ||
* cliEnabled (default: true) - Enable the command line interface. Can be disabled for background | ||
* cli (default: true) - Enable the command line interface. Can be disabled for background | ||
services, or test cases. | ||
* ssl - If provided, will bind using HTTPS by passing this object as the | ||
[TLS options](http://nodejs.org/api/tls.html#tls_tls_createserver_options_secureconnectionlistener). | ||
* run - Ability to run a command, output result in json, and exit. | ||
* workerReady (default: false) - If true, cservice will always wait for ```workerReady``` callback | ||
indicating the service is online. Useful for services which you want to confirm operational instead | ||
of assuming success. Option is preserved through restarts as well. | ||
* run - Ability to run a command, output result in json, and exit. This option is automatically | ||
set if run via command-line ```cservice "restart all"``` and no extension is detected. | ||
* json - If specified in conjunction with ```run```, will *only* output the result in JSON for | ||
consumption from other tasks/services. No other data will be output. | ||
* silent (default: false) - If true, forked workers will not send their output to parent's stdio. | ||
* allowHttpGet (default: false) - For development purposes, can be enabled for testing, but is | ||
not recommended otherwise. | ||
## Console & REST API | ||
A DPS Cluster Service has two interfaces, the console (stdio), and an HTTP REST API. The two interfaces are treated identical, as console input/output is piped over the REST API. The reason for the piping is that a DPS Cluster Service is intentionally designed to only support one version of the given service running at any one time, and the port binding is the resource constraint. This allows secondary services to act as console-only interfaces as they pipe all input/output over HTTP to the already running service that owns the port. This flow enables the CLI to background processes. | ||
The REST API is locked to a "accessKey" expected in the query string. The console automatically passes this key to the REST API, but for external REST API access, the key will need to be known. | ||
A DPS Cluster Service has two interfaces, the console (stdio), and an HTTP REST API. The two | ||
interfaces are treated identical, as console input/output is piped over the REST API. The | ||
reason for the piping is that a DPS Cluster Service is intentionally designed to only | ||
support one instance of the given service running at any one time, and the port binding | ||
is the resource constraint. This allows secondary services to act as console-only | ||
interfaces as they pipe all input/output over HTTP to the already running service | ||
that owns the port. This flow enables the CLI to background processes. | ||
The REST API is locked to a "accessKey" expected in the query string. The console | ||
automatically passes this key to the REST API, but for external REST API access, | ||
the key will need to be known. | ||
@@ -137,2 +170,74 @@ { host: "localhost", port: 11987, accessKey: "lksjdf982734" } | ||
## Cluster Commands | ||
While a Cluster Service may provide its own custom commands, below are provided out-of-the-box. | ||
Commands may be disabled by overriding them. | ||
* start workerPath [cwd] { [timeout:60] } - Gracefully start service, one worker at a time. | ||
* restart all|pid { [timeout:60] } - Gracefully restart service, waiting up to timeout before terminating workers. | ||
* shutdown all|pid { [timeout:60] } - Gracefully shutdown service, waiting up to timeout before terminating workers. | ||
* exit now - Forcefully exits the service. | ||
* help [cmd] - Get help. | ||
* upgrade all|pid workerPath { [cwd] [timeout:60] } - Gracefully upgrade service, one worker at a time. (continuous deployment support). | ||
* workers - Returns list of active worker processes. | ||
* health - Returns health of service. Can be overidden by service to expose app-specific data. | ||
## Commands & Events | ||
Creating custom, or overriding commands and events is as simple as: | ||
var cservice = require("cluster-service"); | ||
cservice.on("custom", function(evt, cb, arg1, arg2) { // "custom" command | ||
// can also fire custom events | ||
cservice.trigger("on.custom.complete", 1, 2, 3); | ||
}; | ||
cservice.on("test", function(evt, cb, testScript, timeout) { // we're overriding the "test" command | ||
// arguments | ||
// do something, no callback required (events may optionally be triggered) | ||
}; | ||
// can also issue commands programatically | ||
cservice.trigger("custom", function(err) { /* my callback */ }, "arg1value", "arg2value"); | ||
## Cluster Events | ||
Events are emitted to interested parties. | ||
* workerStart (pid, reason) - Upon exit of any worker process, the process id of the exited worker. Reasons include: "start", "restart", "failure", and "upgrade". | ||
* workerExit (pid, reason) - Upon start of any worker process. Reasons include: "start", "restart", "failure", and "upgrade". | ||
## Async Support | ||
By default, when a process is started successfully without it exiting, it is assumed to be "running". | ||
This behavior is not always desired however, and may optionally be controlled by: | ||
Indicate the worker is NOT ready, via ```workers``` option: | ||
require("cluster-service").start({ workers: { "async_worker.js": { ready: false } } }); | ||
Have the worker inform the master once it is actually ready: | ||
// worker.js | ||
setTimeout(funtion() { | ||
// dumb example of async support | ||
require("cluster-service").workerReady(); // we're ready! | ||
}); | ||
Additionally, a worker may optionally perform cleanup tasks prior to exit, via: | ||
// worker.js | ||
require("cluster-service").workerReady({ | ||
onWorkerStop: function() { /* lets clean this place up */ } | ||
}); | ||
## Access Control | ||
@@ -157,5 +262,8 @@ | ||
## Continuous Deployment | ||
Combining the Worker Process (Cluster) model with a CLI piped REST API enables the ability command the already-running service to replace existing workers with workers in a different location. This capability is still a work in progress, but initial tests are promising. | ||
Combining the Worker Process (Cluster) model with a CLI piped REST API enables the ability | ||
command the already-running service to replace existing workers with workers in a | ||
different location. This capability is still a work in progress, but initial tests are promising. | ||
@@ -171,46 +279,5 @@ * Cluster Service A1 starts | ||
* Upgrade reports success | ||
## Cluster Commands | ||
While a Cluster Service may provide its own custom commands, below are provided out-of-the-box. Commands may be disabled by overriding them. | ||
* start workerPath [cwd] [timeout:60] - Gracefully start service, one worker at a time. | ||
* restart all|pid [timeout:60] - Gracefully restart service, waiting up to timeout before terminating workers. | ||
* shutdown all|pid [timeout:60] - Gracefully shutdown service, waiting up to timeout before terminating workers. | ||
* exit now - Forcefully exits the service. | ||
* help [cmd] - Get help. | ||
* upgrade all|pid workerPath [cwd] [timeout:60] - Gracefully upgrade service, one worker at a time. (continuous deployment support). | ||
* workers - Returns list of active worker processes. | ||
* health - Returns health of service. Can be overidden by service to expose app-specific data. | ||
* test testScript [timeout:0] - (NOT YET IMPLEMENTED) A path to the test_script must be provided to perform tests against the code base within the given environment. If success (or failure) is not reported within the allotted timeout (in milliseconds, 0 for infinite), the test will be cancelled and considered a failure. | ||
## Cluster Events | ||
Events are emitted to interested parties. | ||
* workerStart (pid, reason) - Upon exit of any worker process, the process id of the exited worker. Reasons include: "start", "restart", "failure", and "upgrade". | ||
* workerExit (pid, reason) - Upon start of any worker process. Reasons include: "start", "restart", "failure", and "upgrade". | ||
## Commands & Events | ||
Creating custom, or overriding commands and events is as simple as: | ||
var cservice = require("cluster-service"); | ||
cservice.on("custom", function(evt, cb, arg1, arg2) { // "custom" command | ||
// can also fire custom events | ||
cservice.trigger("on.custom.complete", 1, 2, 3); | ||
}; | ||
cservice.on("test", function(evt, cb, testScript, timeout) { // we're overriding the "test" command | ||
// arguments | ||
// do something, no callback required (events may optionally be triggered) | ||
}; | ||
// can also issue commands programatically | ||
cservice.trigger("custom", function(err) { /* my callback */ }, "arg1value", "arg2value"); | ||
## Tests & Code Coverage | ||
@@ -232,2 +299,3 @@ | ||
@@ -234,0 +302,0 @@ ## License |
var exit = require('../lib/commands/exit'); | ||
var assert = require("assert"); | ||
describe('Exit cmd', function(){ | ||
describe('[Exit cmd]', function(){ | ||
it('Invalid request if cmd not equal to now', function(done){ | ||
@@ -6,0 +6,0 @@ var evt = {}; |
var cmd = require('../lib/commands/health'); | ||
var assert = require("assert"); | ||
describe('Health cmd', function(){ | ||
describe('[Health cmd]', function(){ | ||
it('Issue command', function(done){ | ||
@@ -6,0 +6,0 @@ cmd({}, function(nullObj, data){ |
@@ -7,5 +7,5 @@ var cmd = require('../lib/commands/help'); | ||
cservice.isWorker && it("WORKER", function(done) { }); | ||
cservice.isMaster && describe('Help cmd', function(){ | ||
cservice.isMaster && describe('[Help cmd]', function(){ | ||
it('Start', function(done){ | ||
cservice.start({ worker: null, workerCount: 1, accessKey: "123", cliEnabled: false }, function() { | ||
cservice.start({ workers: null, workerCount: 1, accessKey: "123", cli: false }, function() { | ||
assert.equal(cservice.workers.length, 0, "0 worker expected, but " + cservice.workers.length + " found"); | ||
@@ -12,0 +12,0 @@ done(); |
var cmd = require('../lib/commands/version'); | ||
var assert = require("assert"); | ||
describe('Version cmd', function(){ | ||
describe('[Version cmd]', function(){ | ||
it('Get version', function(done){ | ||
@@ -6,0 +6,0 @@ var pkg = require("../package.json"); |
@@ -7,5 +7,5 @@ var cservice = require("../cluster-service"); | ||
cservice.isWorker && it("WORKER", function(done) { }); | ||
cservice.isMaster && describe('Workers cmd', function(){ | ||
cservice.isMaster && describe('[Workers cmd]', function(){ | ||
it('Start', function(done){ | ||
cservice.start(null, { workerCount: 1, accessKey: "123", cliEnabled: false }, function() { | ||
cservice.start({ workers: null, workerCount: 1, accessKey: "123", cli: false }, function() { | ||
assert.equal(cservice.workers.length, 0, "0 worker expected, but " + cservice.workers.length + " found"); | ||
@@ -12,0 +12,0 @@ done(); |
@@ -9,5 +9,5 @@ var cservice = require("../cluster-service"); | ||
cservice.isWorker && it("WORKER", function(done) { }); | ||
cservice.isMaster && describe('REST Server', function(){ | ||
cservice.isMaster && describe('[REST Server]', function(){ | ||
it('Start worker', function(done){ | ||
cservice.start(null, { workerCount: 0, accessKey: "123", cliEnabled: false }, function() { | ||
cservice.start({ workers: null, workerCount: 0, accessKey: "123", cli: false }, function() { | ||
assert.equal(cservice.workers.length, 0, "0 worker expected, but " + cservice.workers.length + " found"); | ||
@@ -14,0 +14,0 @@ done(); |
@@ -6,67 +6,61 @@ var cservice = require("../cluster-service"); | ||
cservice.isWorker && it("WORKER", function(done) { }); | ||
cservice.isMaster && describe('Restart', function(){ | ||
describe('Start workers', function(){ | ||
it('2 worker should be running', function(done){ | ||
cservice.start("./test/workers/basic", { workerCount: 2, accessKey: "123", cliEnabled: false, workerReady: true }, function() { | ||
assert.equal(cservice.workers.length, 2, "2 workers expected, but " + cservice.workers.length + " found"); | ||
done(); | ||
}); | ||
}); | ||
cservice.isMaster && describe('[Restart]', function(){ | ||
it('Start workers', function(done){ | ||
assert.equal(cservice.workers.length, 0, "0 workers expected, but " + cservice.workers.length + " found"); | ||
cservice.start({ workers: { basic2: { worker: "./test/workers/basic2", count: 2, ready: false } }, accessKey: "123", cli: false }, function() { | ||
assert.equal(cservice.workers.length, 2, "2 workers expected, but " + cservice.workers.length + " found"); | ||
done(); | ||
}); | ||
}); | ||
it('Bad input #1', function(done){ | ||
cservice.trigger("restart", function(err) { | ||
assert.equal(err, "Invalid request. Try help restart"); | ||
done(); | ||
}, "GG" | ||
); | ||
}); | ||
describe('Bad input #1', function(){ | ||
it('Bad input command', function(done){ | ||
cservice.trigger("restart", function(err) { | ||
assert.equal(err, "Invalid request. Try help restart"); | ||
done(); | ||
}, "GG" | ||
); | ||
}); | ||
it('Bad input #2', function(done){ | ||
cservice.trigger("restart", function(err) { | ||
assert.equal(err, "Invalid request. Try help restart"); | ||
done(); | ||
}, 0 | ||
); | ||
}); | ||
describe('Bad input #2', function(){ | ||
it('Bad input type', function(done){ | ||
cservice.trigger("restart", function(err) { | ||
assert.equal(err, "Invalid request. Try help restart"); | ||
done(); | ||
}, 0 | ||
); | ||
}); | ||
it('Restart without timeout', function(done){ | ||
cservice.trigger("restart", function() { | ||
assert.equal(cservice.workers.length, 2, "2 workers expected, but " + cservice.workers.length + " found"); | ||
done(); | ||
}, "all", { timeout: 30000 } // with timeout | ||
); | ||
}); | ||
describe('Restart workers', function(){ | ||
it('2 workers should be running', function(done){ | ||
cservice.trigger("restart", function() { | ||
it('Restart with timeout', function(done){ | ||
cservice.trigger("restart", function(err) { | ||
assert.equal(err, "timed out"); | ||
setTimeout(function() { | ||
assert.equal(cservice.workers.length, 2, "2 workers expected, but " + cservice.workers.length + " found"); | ||
done(); | ||
}, "all", 30000 /* with timeout */ | ||
); | ||
}); | ||
it('timeout', function(done){ | ||
cservice.trigger("restart", function(err) { | ||
assert.equal(err, "timed out"); | ||
done(); | ||
}, "all", 100 /* with timeout */ | ||
); | ||
}); | ||
}, 1000); | ||
}, "all", { timeout: 1 } // with timeout | ||
); | ||
}); | ||
describe('Stop workers', function(){ | ||
it('0 workers should be running', function(done){ | ||
cservice.stop(30000, function() { | ||
assert.equal(cservice.workers.length, 0, "0 workers expected, but " + cservice.workers.length + " found"); | ||
done(); | ||
}); | ||
}); | ||
}); | ||
it('Stop workers', function(done){ | ||
cservice.stop(30000, function() { | ||
assert.equal(cservice.workers.length, 0, "0 workers expected, but " + cservice.workers.length + " found"); | ||
done(); | ||
}); | ||
}); | ||
describe('Restart with no workers', function(){ | ||
it('Expect restart failure', function(done){ | ||
cservice.trigger("restart", function(err) { | ||
assert.equal(err, "No workers to restart"); | ||
done(); | ||
}, "all" | ||
); | ||
}); | ||
it('Restart with no workers', function(done){ | ||
cservice.trigger("restart", function(err) { | ||
assert.equal(err, "No workers to restart"); | ||
assert.equal(cservice.workers.length, 0, "0 workers expected, but " + cservice.workers.length + " found"); | ||
done(); | ||
}, "all" | ||
); | ||
}); | ||
}) |
@@ -6,5 +6,6 @@ var cservice = require("../cluster-service"); | ||
cservice.isWorker && it("WORKER", function(done) { }); | ||
cservice.isMaster && describe('Start & Stop', function(){ | ||
cservice.isMaster && describe('[Start & Stop]', function(){ | ||
it('Start worker', function(done){ | ||
cservice.start("./test/workers/basic", { workerCount: 1, accessKey: "123", cliEnabled: false, workerReady: true }, function() { | ||
assert.equal(cservice.workers.length, 0, "0 workers expected, but " + cservice.workers.length + " found"); | ||
cservice.start({ workers: { basic: { worker: "./test/workers/basic", count: 1 } }, accessKey: "123", cli: false }, function() { | ||
assert.equal(cservice.workers.length, 1, "1 worker expected, but " + cservice.workers.length + " found"); | ||
@@ -19,3 +20,3 @@ done(); | ||
done(); | ||
}, "./test/workers/basic", { workerCount: 1, timeout: 10000 }); | ||
}, "./test/workers/basic", { ready: false, count: 1, timeout: 10000 }); | ||
}); | ||
@@ -27,3 +28,3 @@ | ||
done(); | ||
}, "./test/workers/longInit", { workerReady: true, workerCount: 1, timeout: 1000 }); | ||
}, "./test/workers/longInit", { ready: false, count: 1, timeout: 1000 }); | ||
}); | ||
@@ -42,3 +43,3 @@ | ||
done(); | ||
}, null, { workerCount: 1, timeout: 1000 }); | ||
}, null, { count: 1, timeout: 1000 }); | ||
}); | ||
@@ -58,3 +59,3 @@ | ||
done(); | ||
}, "all", "./test/workers/basic2" | ||
}, "all", "./test/workers/basic2", { ready: false } | ||
); | ||
@@ -61,0 +62,0 @@ }); |
@@ -5,2 +5,4 @@ var | ||
cservice.workerReady(); | ||
setTimeout(function() { | ||
cservice.workerReady(); | ||
}, 100); |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Dynamic require
Supply chain riskDynamic require can indicate the package is performing dangerous or unsafe dynamic code execution.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Dynamic require
Supply chain riskDynamic require can indicate the package is performing dangerous or unsafe dynamic code execution.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 1 instance in 1 package
69473
38
1728
294