advisory-lock
Advanced tools
Comparing version 1.0.1 to 1.0.2
@@ -136,3 +136,3 @@ 'use strict'; | ||
return function (name) { | ||
var createMutex = function createMutex(name) { | ||
var key = typeof name === 'string' ? strToKey(name) : name; | ||
@@ -182,2 +182,4 @@ | ||
}; | ||
createMutex.client = client; | ||
return createMutex; | ||
}; |
{ | ||
"name": "advisory-lock", | ||
"version": "1.0.1", | ||
"version": "1.0.2", | ||
"description": "Distributed locking using PostgreSQL advisory locks", | ||
@@ -12,7 +12,7 @@ "main": "lib/index.js", | ||
"preversion": "npm test", | ||
"postversion": "npm run build", | ||
"postversion": "npm run build", | ||
"test": "npm run test:quick", | ||
"test:quick": "babel-tape-runner test/*.js", | ||
"test:quick": "babel-tape-runner test/*.test.js", | ||
"dev": "nodemon --exec npm -- run test:quick", | ||
"docker": "docker-compose up dbcreate && docker-compose up -d db" | ||
"docker": "blockai-dc && docker-compose up dbcreate" | ||
}, | ||
@@ -43,7 +43,8 @@ "repository": { | ||
"babel-tape-runner": "^2.0.1", | ||
"blue-tape": "^0.2.0", | ||
"blockai-dc": "^1.0.0", | ||
"eslint": "^2.8.0", | ||
"eslint-config-airbnb": "^8.0.0", | ||
"eslint-plugin-import": "^1.6.1", | ||
"nodemon": "^1.9.2" | ||
"nodemon": "^1.9.2", | ||
"tape": "^4.5.1" | ||
}, | ||
@@ -50,0 +51,0 @@ "dependencies": { |
@@ -9,2 +9,11 @@ # advisory-lock | ||
- You have a ["clock process"](https://devcenter.heroku.com/articles/scheduled-jobs-custom-clock-processes) | ||
and want to make absolutely sure there will never be more than one | ||
process active at any given time. This sort of situation could | ||
otherwise happen if you scale up the process by accident or through a | ||
zero downtime deploy mechanism that keeps the old version of the | ||
process running while the new one is starting. | ||
- Running database migration when app starts | ||
- You run an Express based web app and want to post a message to Slack | ||
@@ -15,2 +24,3 @@ every 30 mins containing some stats (new registrations in last 30 mins | ||
library to elect a "master" process which sends the messages. | ||
- [etc.](http://lmgtfy.com/?q=distributed%20lock) | ||
@@ -26,12 +36,32 @@ | ||
### advisoryLock(connectionString)(lockName) | ||
### advisoryLock(connectionString) | ||
- `connectionString` must be a Postgres connection string | ||
Returns a `createMutex` function. | ||
The `createMutex` function also exposes a `client` property | ||
that can be used to terminate the database connection if necessary. | ||
PS: Each call to `advisoryLock(connectionString)` creates a new PostgreSQL | ||
connection which is not automatically terminated, so if that is an | ||
[issue for you](https://github.com/blockai/advisory-lock/issues/1), you | ||
can use `createMutex.client.end()` to end the connection when | ||
appropriate (e.g. after releasing a lock). This is however typically | ||
not needed since usually, `advisoryLock()` only needs to be called once. | ||
### createMutex(lockName) | ||
- `lockName` must be a unique identifier for the lock | ||
Returns a **mutex** object containing the following Promise returning | ||
functions: | ||
Returns a **mutex** object containing the functions listed below. All | ||
**object** methods are really just functions attached to the object and | ||
are not bound to *this* so they can be safely destructured, | ||
e.g. `const { withLock } = createMutext(lockName)`. | ||
#### withLock(fn) | ||
For a better understanding of what each functions does, | ||
see [PosgtreSQL's manual](http://www.postgresql.org/docs/current/static/functions-admin.html#FUNCTIONS-ADVISORY-LOCKS). | ||
#### mutex.withLock(fn) | ||
- `fn` Promise returning function or regular function to be executed once the lock is acquired | ||
@@ -45,3 +75,3 @@ | ||
#### tryLock() | ||
#### mutex.tryLock() | ||
@@ -51,23 +81,23 @@ Returns a promise which resolves to `true` if the lock is free and | ||
#### lock() | ||
#### mutex.lock() | ||
Wait until we get exclusive lock. | ||
#### unlock() | ||
#### mutex.unlock() | ||
Release the exclusive lock. | ||
#### tryLockShared() | ||
#### mutex.tryLockShared() | ||
Like `tryLock()` but for shared lock. | ||
#### lockShared() | ||
#### mutex.lockShared() | ||
While held, this blocks any attempt to obtain an exclusive lock. (e.g.: calls to `.lock()` or `.withLock()`) | ||
#### unlockShared() | ||
#### mutex.unlockShared() | ||
Release shared lock. | ||
#### withLockShared(fn) | ||
#### mutex.withLockShared(fn) | ||
@@ -96,7 +126,6 @@ Same as `withLock()` but using a shared lock. | ||
// doesn't "block" | ||
mutex.tryLock().then((obtainedLock) => { | ||
if (obtainedLock) { | ||
return doSomething().then(() => mutex.release()) | ||
return doSomething().then(() => mutex.unlock()) | ||
} else { | ||
@@ -111,1 +140,6 @@ throw new Error('failed to obtain lock') | ||
## Roadmap | ||
pgmutex binary which waits for exclusive lock before starting process | ||
passed as argument. e.g: `pgmutex ./path/to/worker` | ||
Sorry, the diff of this file is not supported yet
11780
156
139
11