node-resque
Advanced tools
Comparing version 5.3.2 to 5.4.0
@@ -250,20 +250,23 @@ const path = require('path') | ||
let queues = workers[workerName] | ||
if (!queues) { throw new Error('worker not found') } | ||
let workingOn = await this.workingOn(workerName, queues) | ||
let message = 'Worker Timeout (killed manually)' | ||
if (workingOn) { | ||
workingOn = JSON.parse(workingOn) | ||
errorPayload = { | ||
worker: workerName, | ||
queue: workingOn.queue, | ||
payload: workingOn.payload || [], | ||
exception: message, | ||
error: message, | ||
backtrace: [ | ||
`killed by ${os.hostname} at ${new Date()}`, | ||
'queue#forceCleanWorker', | ||
'node-resque' | ||
], | ||
failed_at: (new Date()).toString() | ||
if (!queues) { | ||
this.emit('error', `force-cleaning worker ${workerName}, but cannot find queues`) | ||
} else { | ||
let workingOn = await this.workingOn(workerName, queues) | ||
let message = 'Worker Timeout (killed manually)' | ||
if (workingOn) { | ||
workingOn = JSON.parse(workingOn) | ||
errorPayload = { | ||
worker: workerName, | ||
queue: workingOn.queue, | ||
payload: workingOn.payload || [], | ||
exception: message, | ||
error: message, | ||
backtrace: [ | ||
`killed by ${os.hostname} at ${new Date()}`, | ||
'queue#forceCleanWorker', | ||
'node-resque' | ||
], | ||
failed_at: (new Date()).toString() | ||
} | ||
} | ||
@@ -273,3 +276,5 @@ | ||
await this.connection.redis.incr(this.connection.key('stat', 'failed', workerName)) | ||
await this.connection.redis.rpush(this.connection.key('failed'), JSON.stringify(errorPayload)) | ||
if (errorPayload) { | ||
await this.connection.redis.rpush(this.connection.key('failed'), JSON.stringify(errorPayload)) | ||
} | ||
} | ||
@@ -276,0 +281,0 @@ |
@@ -70,5 +70,8 @@ // To read notes about the master locking scheme, check out: | ||
} else { | ||
setTimeout(async () => { | ||
await this.end() | ||
}, (this.options.timeout / 2)) | ||
return new Promise((resolve) => { | ||
setTimeout(async () => { | ||
await this.end() | ||
resolve() | ||
}, (this.options.timeout / 2)) | ||
}) | ||
} | ||
@@ -202,2 +205,3 @@ } | ||
for (let i in payloads) { | ||
if (!payloads[i]) continue | ||
const {name, time} = payloads[i] | ||
@@ -204,0 +208,0 @@ const delta = nowInSeconds - time |
@@ -6,3 +6,3 @@ { | ||
"license": "Apache-2.0", | ||
"version": "5.3.2", | ||
"version": "5.4.0", | ||
"homepage": "http://github.com/taskrabbit/node-resque", | ||
@@ -9,0 +9,0 @@ "repository": { |
@@ -184,2 +184,3 @@ # node-resque: The best background jobs in node. | ||
- If you plan to run more than one worker per nodejs process, be sure to name them something distinct. Names **must** follow the pattern `hostname:pid+unique_id`. For example: | ||
- For the Retry plugin, a success message will be emitted from the worker on each attempt (even if the job fails) except the final retry. The final retry will emit a failure message instead. | ||
@@ -308,3 +309,3 @@ ```javascript | ||
By default, the scheduler will check for workers which haven't pinged redis in 60 minutes. If this happens, we will assume the process crashed, and remove it from redis. If this worker was working on a job, we will place it in the failed queue for later inspection. Every worker has a timer running in which it then updates a key in redis every `timeout` (default: 5 seconds). If your job is slow, but async, there should be no problem. However, if your job consumes 100% of the CPU of the process, this timer might not fire. | ||
By default, the scheduler will check for workers which haven't pinged redis in 60 minutes. If this happens, we will assume the process crashed, and remove it from redis. If this worker was working on a job, we will place it in the failed queue for later inspection. Every worker has a timer running in which it then updates a key in redis every `timeout` (default: 5 seconds). If your job is slow, but async, there should be no problem. However, if your job consumes 100% of the CPU of the process, this timer might not fire. | ||
@@ -335,3 +336,3 @@ To modify the 60 minute check, change `stuckWorkerTimeout` when configuring your scheudler, ie: | ||
If you know the name of a worker that should be removed, you can also call `await queue.forceCleanWorker(workerName)` directly, and that will also remove the worker and move any job it was working on into the error queue. | ||
If you know the name of a worker that should be removed, you can also call `await queue.forceCleanWorker(workerName)` directly, and that will also remove the worker and move any job it was working on into the error queue. This method will still proceed for workers which are only partially in redis, indicting a previous connection failure. In this case, the job which the worker was working on is irrecoverably lost. | ||
@@ -338,0 +339,0 @@ ## Job Schedules |
530158
3701
497