Socket
Socket
Sign inDemoInstall

keuss

Package Overview
Dependencies
Maintainers
0
Versions
76
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

keuss - npm Package Compare versions

Comparing version 1.7.4 to 2.0.0

backends/postgres.js

22

backends/bucket-mongo-safe.js

@@ -99,4 +99,2 @@ var async = require ('async');

// TODO optimization: set this._b[i] = null
if (is_reserve) {

@@ -318,3 +316,2 @@ this._b_states[i] = State.Reserved;

this._active_bucket = null;
// TODO loop internally!!!!!
cb ();

@@ -528,5 +525,3 @@ }

class BucketMongoSafeQueue extends Queue {
/*
options:

@@ -546,5 +541,3 @@ bucket_max_size || 1024;

this._factory = factory;
this._col = factory._db.collection (name);
this._ensureIndexes (err => {});

@@ -828,3 +821,3 @@ this._insert_bucket = {

_ensureIndexes (cb) {
this._col.createIndex ({mature : 1}, err => cb (err));
this._col.createIndex ({mature : 1}, err => cb (err, this));
}

@@ -923,6 +916,13 @@

queue (name, opts) {
var full_opts = {}
queue (name, opts, cb) {
if (!cb) {
cb = opts;
opts = {};
}
const full_opts = {};
_.merge(full_opts, this._opts, opts);
return new BucketMongoSafeQueue (name, this, full_opts, opts);
const q = new BucketMongoSafeQueue (name, this, full_opts, opts);
q._ensureIndexes (cb);
}

@@ -929,0 +929,0 @@

@@ -15,10 +15,6 @@ const _ = require ('lodash');

class IntraOrderedQueue extends Queue {
//////////////////////////////////////////////
constructor (name, factory, opts, orig_opts) {
super (name, factory, opts, orig_opts);
this._factory = factory;
this._col = factory._db.collection (name);
this.ensureIndexes (err => {});
}

@@ -53,3 +49,2 @@

if (err) return callback (err);
// TODO result.insertedCount must be 1
callback (null, result.upsertedId || q._id);

@@ -313,6 +308,6 @@ });

// create needed indexes for O(1) functioning
ensureIndexes (cb) {
_ensureIndexes (cb) {
async.series ([
cb => this._col.createIndex ({mature : 1, qcnt: 1}, cb),
], cb);
cb => this._col.createIndex ({mature : 1, qcnt: 1}, err => cb (err)),
], err => cb (err, this));
}

@@ -329,6 +324,13 @@ }

queue (name, opts) {
var full_opts = {};
queue (name, opts, cb) {
if (!cb) {
cb = opts;
opts = {};
}
const full_opts = {};
_.merge(full_opts, this._opts, opts);
return new IntraOrderedQueue (name, this, full_opts, opts);
const q = new IntraOrderedQueue (name, this, full_opts, opts);
q._ensureIndexes (cb);
}

@@ -335,0 +337,0 @@

@@ -14,6 +14,3 @@ var _ = require ('lodash');

super (name, factory, opts, orig_opts);
this._factory = factory;
this._col = factory._db.collection (name);
this.ensureIndexes (function (err) {});
}

@@ -37,3 +34,2 @@

if (err) return callback (err);
// TODO result.insertedCount must be 1
callback (null, result.insertedId);

@@ -218,4 +214,4 @@ });

// create needed indexes for O(1) functioning
ensureIndexes (cb) {
this._col.createIndex ({mature : 1}, err => cb (err));
_ensureIndexes (cb) {
this._col.createIndex ({mature : 1}, err => cb (err, this));
}

@@ -232,6 +228,13 @@ };

queue (name, opts) {
var full_opts = {};
queue (name, opts, cb) {
if (!cb) {
cb = opts;
opts = {};
}
const full_opts = {};
_.merge(full_opts, this._opts, opts);
return new SimpleMongoQueue (name, this, full_opts, opts);
const q = new SimpleMongoQueue (name, this, full_opts, opts);
q._ensureIndexes (cb);
}

@@ -238,0 +241,0 @@

@@ -122,7 +122,10 @@ const _ = require ('lodash');

///////////////////////////////////////////////////////////
queue (name, opts) {
if (!opts) opts = {};
queue (name, opts, cb) {
if (!cb) {
cb = opts;
opts = {};
}
var pl_name = opts.pipeline || 'default';
var pipeline = this._pipelines[pl_name];
const pl_name = opts.pipeline || 'default';
let pipeline = this._pipelines[pl_name];

@@ -134,3 +137,3 @@ if (!pipeline) {

return this._queue_from_pipeline (name, pipeline, opts);
return setImmediate (() => cb (null, this._queue_from_pipeline (name, pipeline, opts)));
}

@@ -137,0 +140,0 @@

@@ -19,5 +19,3 @@ var async = require ('async');

this._factory = factory;
this._col = factory._db.collection (name);
this.ensureIndexes (function (err) {});
}

@@ -41,3 +39,2 @@

if (err) return callback (err);
// TODO result.insertedCount must be 1
callback (null, result.insertedId);

@@ -278,6 +275,6 @@ });

// create needed indexes for O(1) functioning
ensureIndexes (cb) {
_ensureIndexes (cb) {
this._col.createIndex ({mature : 1}, err => {
if (err) return cb (err);
this._col.createIndex({processed: 1}, {expireAfterSeconds: this._opts.ttl}, err => cb (err));
this._col.createIndex({processed: 1}, {expireAfterSeconds: this._opts.ttl}, err => cb (err, this));
});

@@ -295,6 +292,12 @@ }

queue (name, opts) {
var full_opts = {};
queue (name, opts, cb) {
if (!cb) {
cb = opts;
opts = {};
}
const full_opts = {};
_.merge(full_opts, this._opts, opts);
return new PersistentMongoQueue (name, this, full_opts, opts);
const q = new PersistentMongoQueue (name, this, full_opts, opts);
q._ensureIndexes (cb);
}

@@ -301,0 +304,0 @@

@@ -109,6 +109,11 @@ var async = require ('async');

queue (name, opts) {
var full_opts = {};
queue (name, opts, cb) {
if (!cb) {
cb = opts;
opts = {};
}
const full_opts = {};
_.merge(full_opts, this._opts, opts);
return new RedisListQueue (name, this, full_opts, opts);
return setImmediate(() => cb (null, new RedisListQueue (name, this, full_opts, opts)));
}

@@ -147,6 +152,1 @@

module.exports = creator;

@@ -131,6 +131,11 @@ var _ = require ('lodash')

queue (name, opts) {
var full_opts = {};
queue (name, opts, cb) {
if (!cb) {
cb = opts;
opts = {};
}
const full_opts = {};
_.merge(full_opts, this._opts, opts);
return new RedisOQ (name, this, full_opts, opts);
return setImmediate (() => cb (null, new RedisOQ (name, this, full_opts, opts)));
}

@@ -137,0 +142,0 @@

@@ -20,3 +20,2 @@ const _ = require ('lodash');

this._factory = factory;
this._col = factory._db.collection (name);

@@ -27,11 +26,2 @@ this._groups_str = this._opts.groups || 'A,B:C';

this.ensureIndexes (err => {
if (err) {
console.error ('keuss:Queue:StreamMongo: index creation failed, queues performance will be severely impacted:', err);
}
else {
debug ('indexes created');
}
});
debug ('created with groups %j and gid %s (used for pop/reserve only)', this._groups_vector, this._gid);

@@ -75,3 +65,2 @@ }

if (err) return callback (err);
// TODO result.insertedCount must be 1
callback (null, result.insertedId);

@@ -409,3 +398,3 @@ this._groups_vector.forEach (i => this._stats.incr (`stream.${i}.put`));

// create needed indexes for O(1) functioning
ensureIndexes (cb) {
_ensureIndexes (cb) {
const tasks = [];

@@ -419,3 +408,3 @@

tasks.push (cb => this._col.createIndex ({t: 1}, {expireAfterSeconds: this._opts.ttl}, cb));
async.series (tasks, cb);
async.series (tasks, err => cb (err, this));
}

@@ -432,6 +421,12 @@ }

queue (name, opts) {
queue (name, opts, cb) {
if (!cb) {
cb = opts;
opts = {};
}
const full_opts = {};
_.merge(full_opts, this._opts, opts);
return new StreamMongoQueue (name, this, full_opts, opts);
const q = new StreamMongoQueue (name, this, full_opts, opts);
q._ensureIndexes (cb);
}

@@ -438,0 +433,0 @@

{
"name": "keuss",
"version": "1.7.4",
"version": "2.0.0",
"keywords": [

@@ -9,2 +9,4 @@ "queue",

"mongodb",
"postgres",
"postgresql",
"redis",

@@ -31,15 +33,17 @@ "HA",

"@nodebb/mubsub": "~1.8.0",
"async": "~3.2.4",
"async-lock": "~1.3.1",
"debug": "~4.3.4",
"ioredis": "~5.3.2",
"async": "~3.2.5",
"async-lock": "~1.4.1",
"debug": "~4.3.5",
"ioredis": "~5.4.1",
"lodash": "~4.17.21",
"mitt": "~3.0.0",
"mitt": "~3.0.1",
"mongodb": "~4.17.0",
"uuid": "~8.3.2"
"uuid": "~8.3.2",
"pg": "~8.12.0"
},
"devDependencies": {
"chance": "~1.1.11",
"mocha": "~10.2.0",
"mocha": "~10.4.0",
"should": "~13.2.3",
"nyc": "~15.1.0",
"why-is-node-running": "^2.2.2"

@@ -46,0 +50,0 @@ },

@@ -40,3 +40,2 @@ const _ = require ('lodash');

if (err) return callback (err);
// TODO result.insertedCount must be 1
callback (null, result.insertedId);

@@ -43,0 +42,0 @@ });

@@ -39,8 +39,13 @@ var async = require ('async');

if (this._opts.deadletter) {
this._deadletter_queue = this.queue (this._opts.deadletter.queue || '__deadletter__');
this._max_ko = this._opts.deadletter.max_ko || 0;
debug('%s: uses deadletter queue %s, max KO is %d', this._name, this._deadletter_queue.name(), this._max_ko);
const qname = this._opts.deadletter.queue || '__deadletter__';
debug('%s: uses deadletter queue %s, max KO is %d', this._name, qname, this._max_ko);
this.queue (qname, (err, dlq) => {
this._deadletter_queue = dlq;
cb (err);
});
}
cb();
else {
cb();
}
});

@@ -57,4 +62,4 @@ }

queue (name, opts) {
return null;
queue (name, opts, cb) {
return cb ();
}

@@ -61,0 +66,0 @@

@@ -345,4 +345,2 @@ var async = require ('async');

pop (cid, opts, callback) {
// TODO fail if too many consumers?
if (this._drained) return setImmediate (() => {

@@ -465,3 +463,3 @@ debug ('%s: pop while drained, return error', this._name);

(this._factory.deadletter_queue ()) && // AND the factory has a deadletter queue
(this._factory.max_ko ()) && // AND thee's a max ko attempts
(this._factory.max_ko ()) && // AND there's a max ko attempts
(obj.tries > this._factory.max_ko ()) && // AND we got enough tries

@@ -597,3 +595,2 @@ (this.name () != '__deadletter__') // and this queue is not deadletter already

// TODO eat up errors?
if (err) {

@@ -600,0 +597,0 @@ // get/reserve in error

# Keuss
Enterprise-grade Job Queues for node.js, backed by redis and/or MongoDB
Enterprise-grade Job Queues for node.js backed by redis, MongoDB or PostgreSQL
* [Quickstart](https://pepmartinez.github.io/keuss/docs/quickstart)

@@ -5,0 +6,0 @@ * [Documentation](https://pepmartinez.github.io/keuss/docs/)

@@ -71,3 +71,2 @@ var _ = require ('lodash');

// TODO remove from factory
if (cb) cb();

@@ -74,0 +73,0 @@ }

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc