disk-memoizer
Advanced tools
Comparing version 2.1.4 to 2.2.0
@@ -172,3 +172,3 @@ module.exports = { | ||
"no-spaced-func": "error", | ||
"no-sync": "error", | ||
"no-sync": "off", | ||
"no-tabs": "error", | ||
@@ -175,0 +175,0 @@ "no-template-curly-in-string": "error", |
@@ -9,2 +9,3 @@ "use strict"; | ||
DISK_MEMOIZER_FLUSH_CACHE = _process$env.DISK_MEMOIZER_FLUSH_CACHE, | ||
DISK_MEMOIZER_LOCK_STALE_MS = _process$env.DISK_MEMOIZER_LOCK_STALE_MS, | ||
DISK_MEMOIZER_GC = _process$env.DISK_MEMOIZER_GC, | ||
@@ -22,5 +23,6 @@ DISK_MEMOIZER_GC_INTERVAL = _process$env.DISK_MEMOIZER_GC_INTERVAL, | ||
CACHE_DIR: DISK_MEMOIZER_CACHE_DIR || os.tmpdir() + "/disk-memoizer", | ||
LOCK_STALE_MS: +(DISK_MEMOIZER_LOCK_STALE_MS || 30 * 1000), | ||
GC: cluster.isMaster && DISK_MEMOIZER_GC !== "false", | ||
GC_INTERVAL: DISK_MEMOIZER_GC_INTERVAL || 1000 * 60 * 5, | ||
GC_INTERVAL: +(DISK_MEMOIZER_GC_INTERVAL || 1000 * 60 * 5), | ||
GC_LAST_ACCESS: DISK_MEMOIZER_GC_LAST_ACCESS || "1h" | ||
}; |
@@ -13,3 +13,3 @@ "use strict"; | ||
var fs = require("fs"); | ||
var fs = require("graceful-fs"); | ||
var config = require("./config"); | ||
@@ -22,5 +22,6 @@ var gcTmpFiles = require("./gc"); | ||
var LruCache = require("lru-cache"); | ||
var lockFile = require("lockfile"); | ||
// Used to convert md5 hashes into subfolder chunks | ||
var RE_PATHIFY = /^([a-z0-9]{2})([a-z0-9]{2})([a-z0-9]{2})(.+)/; | ||
var os = require("os"); | ||
var LOCK_TMP_DIR = os.tmpdir(); | ||
@@ -38,9 +39,13 @@ function diskMemoizer(unmemoizedFn) { | ||
_ref$memoryCacheItems = _ref.memoryCacheItems, | ||
memoryCacheItems = _ref$memoryCacheItems === undefined ? config.MEMORY_CACHE_ITEMS : _ref$memoryCacheItems; | ||
var memoryCache = memoryCacheItems > 0 ? new LruCache({ | ||
memoryCacheItems = _ref$memoryCacheItems === undefined ? config.MEMORY_CACHE_ITEMS : _ref$memoryCacheItems, | ||
_ref$lockStale = _ref.lockStale, | ||
lockStale = _ref$lockStale === undefined ? config.LOCK_STALE_MS : _ref$lockStale, | ||
_ref$lruCacheOptions = _ref.lruCacheOptions, | ||
lruCacheOptions = _ref$lruCacheOptions === undefined ? { | ||
max: memoryCacheItems, | ||
maxAge: maxAge | ||
}) : fakeLruCache(); | ||
} : _ref$lruCacheOptions; | ||
var memoryCache = memoryCacheItems > 0 || lruCacheOptions.max > 0 ? new LruCache(lruCacheOptions) : fakeLruCache(); | ||
function diskMemoized() { | ||
@@ -78,2 +83,3 @@ for (var _len = arguments.length, args = Array(_len), _key = 0; _key < _len; _key++) { | ||
memoryCache: memoryCache, | ||
lockStale: lockStale, | ||
type: type | ||
@@ -87,2 +93,3 @@ }, callback); | ||
marshaller: marshaller, | ||
lockStale: lockStale, | ||
memoryCache: memoryCache, | ||
@@ -124,2 +131,5 @@ type: type | ||
// Used to convert md5 hashes into subfolder chunks | ||
var RE_PATHIFY = /^([a-z0-9]{2})([a-z0-9]{2})([a-z0-9]{2})(.+)/; | ||
function getCachePath(key) { | ||
@@ -131,2 +141,6 @@ var cacheDir = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : config.CACHE_DIR; | ||
function getLockPath(key) { | ||
return path.normalize(LOCK_TMP_DIR + "/" + createHash("md5").update(key).digest("hex") + ".lock"); | ||
} | ||
function grabAndCache(_ref3, callback) { | ||
@@ -141,5 +155,27 @@ var key = _ref3.key, | ||
memoryCache = _ref3$memoryCache === undefined ? fakeLruCache() : _ref3$memoryCache, | ||
lockStale = _ref3.lockStale, | ||
type = _ref3.type; | ||
var lockPath = getLockPath(cachePath); | ||
var isLocked = lockFile.checkSync(lockPath, { stale: lockStale }); | ||
if (isLocked) { | ||
// We'll wait until the lock | ||
waitForLockRelease({ | ||
lockPath: lockPath, | ||
key: key, | ||
cachePath: cachePath, | ||
unmemoizedFn: unmemoizedFn, | ||
marshaller: marshaller, | ||
lockStale: lockStale, | ||
memoryCache: memoryCache, | ||
type: type | ||
}, callback); | ||
return; | ||
} | ||
lockFile.lockSync(lockPath, { stale: lockStale }); | ||
unmemoizedFn.apply(undefined, _toConsumableArray(args.concat(grabAndCacheCallback))); | ||
@@ -169,10 +205,28 @@ | ||
fs.writeFile(cachePath, data, function (err) { | ||
// We'll save the file on a temporary file to avoid in-flight files | ||
// from being taken as complete | ||
var tmpCachePath = cachePath + ".tmp"; | ||
fs.writeFile(tmpCachePath, data, function (err) { | ||
if (err) { | ||
debug("[error] Failed saving %s. Got error: %s", cachePath, err.message); | ||
} else { | ||
debug("[error] Failed saving %s. Got error: %s", tmpCachePath, err.message); | ||
return callback(err); | ||
} | ||
// Rename the file once it's persisted in disk to make it available | ||
// to any queued cache | ||
fs.rename(tmpCachePath, cachePath, function (err) { | ||
if (err) { | ||
debug("[error] Failed saving %s. Got error: %s", cachePath, err.message); | ||
return callback(err); | ||
} | ||
debug("[info] Saved cache for %s on %s", key, cachePath); | ||
} | ||
memoryCache.set(key, unmarshalledData); | ||
callback(null, unmarshalledData); | ||
memoryCache.set(key, unmarshalledData); | ||
lockFile.unlock(lockPath, function (err) { | ||
if (err) { | ||
return callback(err); | ||
} | ||
callback(null, unmarshalledData); | ||
}); | ||
}); | ||
}); | ||
@@ -184,13 +238,66 @@ }); | ||
function useCachedFile(_ref4, callback) { | ||
var key = _ref4.key, | ||
var lockWatchers = {}; | ||
function waitForLockRelease(_ref4, callback) { | ||
var lockPath = _ref4.lockPath, | ||
key = _ref4.key, | ||
cachePath = _ref4.cachePath, | ||
unmemoizedFn = _ref4.unmemoizedFn, | ||
_ref4$marshaller = _ref4.marshaller, | ||
marshaller = _ref4$marshaller === undefined ? marshallers.none : _ref4$marshaller, | ||
_ref4$memoryCache = _ref4.memoryCache, | ||
memoryCache = _ref4$memoryCache === undefined ? fakeLruCache() : _ref4$memoryCache, | ||
marshaller = _ref4.marshaller, | ||
lockStale = _ref4.lockStale, | ||
memoryCache = _ref4.memoryCache, | ||
type = _ref4.type; | ||
// We only want to keep one lock watcher per lock path | ||
var hasWatcher = !!lockWatchers[lockPath]; | ||
lockWatchers[lockPath] = lockWatchers[lockPath] || []; | ||
var currentWatchList = lockWatchers[lockPath]; | ||
if (hasWatcher) { | ||
// If there's already a watcher there's no need to register a new | ||
// one, we'll defer the execution of the task until the watcher notifies | ||
// about changes on the lock | ||
currentWatchList.push(runOnLockReleased); | ||
} else { | ||
// Register a singleton watcher for a particular lock file | ||
var _watcherFn = null; | ||
_watcherFn = function watcherFn() { | ||
runOnLockReleased(); | ||
// Run all the watchers | ||
currentWatchList.forEach(function (watcher) { | ||
return watcher(); | ||
}); | ||
fs.unwatchFile(lockPath, _watcherFn); | ||
currentWatchList.splice(0, currentWatchList.length); | ||
}; | ||
fs.watch(lockPath, _watcherFn); | ||
} | ||
function runOnLockReleased() { | ||
useCachedFile({ | ||
key: key, | ||
cachePath: cachePath, | ||
unmemoizedFn: unmemoizedFn, | ||
marshaller: marshaller, | ||
lockStale: lockStale, | ||
memoryCache: memoryCache, | ||
type: type | ||
}, callback); | ||
} | ||
} | ||
function useCachedFile(_ref5, callback) { | ||
var key = _ref5.key, | ||
cachePath = _ref5.cachePath, | ||
unmemoizedFn = _ref5.unmemoizedFn, | ||
_ref5$marshaller = _ref5.marshaller, | ||
marshaller = _ref5$marshaller === undefined ? marshallers.none : _ref5$marshaller, | ||
_ref5$memoryCache = _ref5.memoryCache, | ||
memoryCache = _ref5$memoryCache === undefined ? fakeLruCache() : _ref5$memoryCache, | ||
lockStale = _ref5.lockStale, | ||
type = _ref5.type; | ||
marshaller = getMarshaller({ | ||
@@ -209,2 +316,3 @@ type: type, | ||
marshaller: marshaller, | ||
lockStale: lockStale, | ||
type: type | ||
@@ -276,5 +384,5 @@ }, callback); | ||
function getMarshaller(_ref5) { | ||
var type = _ref5.type, | ||
marshaller = _ref5.marshaller; | ||
function getMarshaller(_ref6) { | ||
var type = _ref6.type, | ||
marshaller = _ref6.marshaller; | ||
@@ -281,0 +389,0 @@ if (marshallers[type]) { |
@@ -15,2 +15,6 @@ // Environment variables | ||
// Time for a lock to be considered stale. | ||
// Defaults to 30000 ms (30s) | ||
DISK_MEMOIZER_LOCK_STALE_MS, | ||
// Disables memoization garbage collection when set to false | ||
@@ -40,5 +44,6 @@ // Garbage collection will not take place on cluster workers | ||
`${os.tmpdir()}/disk-memoizer`, | ||
LOCK_STALE_MS: +(DISK_MEMOIZER_LOCK_STALE_MS || 30 * 1000), | ||
GC: cluster.isMaster && (DISK_MEMOIZER_GC !== "false"), | ||
GC_INTERVAL: DISK_MEMOIZER_GC_INTERVAL || 1000 * 60 * 5, | ||
GC_INTERVAL: +(DISK_MEMOIZER_GC_INTERVAL || 1000 * 60 * 5), | ||
GC_LAST_ACCESS: DISK_MEMOIZER_GC_LAST_ACCESS || "1h" | ||
}; |
@@ -9,3 +9,3 @@ // Simple disk memoization and in memory LRU cache for high | ||
const fs = require("fs"); | ||
const fs = require("graceful-fs"); | ||
const config = require("./config"); | ||
@@ -18,7 +18,7 @@ const gcTmpFiles = require("./gc"); | ||
const LruCache = require("lru-cache"); | ||
const lockFile = require("lockfile"); | ||
const os = require("os"); | ||
const LOCK_TMP_DIR = os.tmpdir(); | ||
// Used to convert md5 hashes into subfolder chunks | ||
const RE_PATHIFY = /^([a-z0-9]{2})([a-z0-9]{2})([a-z0-9]{2})(.+)/; | ||
function diskMemoizer(unmemoizedFn, { | ||
@@ -63,11 +63,16 @@ | ||
// DISK_MEMOIZER_MEMORY_CACHE_ITEMS | ||
memoryCacheItems = config.MEMORY_CACHE_ITEMS | ||
memoryCacheItems = config.MEMORY_CACHE_ITEMS, | ||
// How long before considering the lock stale? | ||
lockStale = config.LOCK_STALE_MS, | ||
lruCacheOptions = { | ||
max: memoryCacheItems, | ||
maxAge | ||
} | ||
} = {}) { | ||
const memoryCache = memoryCacheItems > 0 | ||
? new LruCache({ | ||
max: memoryCacheItems, | ||
maxAge | ||
}) | ||
const memoryCache = memoryCacheItems > 0 || lruCacheOptions.max > 0 | ||
? new LruCache(lruCacheOptions) | ||
: fakeLruCache(); | ||
@@ -107,2 +112,3 @@ | ||
memoryCache, | ||
lockStale, | ||
type | ||
@@ -116,2 +122,3 @@ }, callback); | ||
marshaller, | ||
lockStale, | ||
memoryCache, | ||
@@ -152,2 +159,5 @@ type | ||
// Used to convert md5 hashes into subfolder chunks | ||
const RE_PATHIFY = /^([a-z0-9]{2})([a-z0-9]{2})([a-z0-9]{2})(.+)/; | ||
function getCachePath(key, cacheDir = config.CACHE_DIR) { | ||
@@ -162,2 +172,11 @@ return path.normalize( | ||
function getLockPath(key) { | ||
return path.normalize( | ||
`${LOCK_TMP_DIR}/${createHash("md5"). | ||
update(key). | ||
digest("hex")}.lock` | ||
); | ||
} | ||
function grabAndCache({ | ||
@@ -170,5 +189,27 @@ key, | ||
memoryCache = fakeLruCache(), | ||
lockStale, | ||
type | ||
}, callback) { | ||
const lockPath = getLockPath(cachePath); | ||
const isLocked = lockFile.checkSync(lockPath, {stale: lockStale}); | ||
if (isLocked) { | ||
// We'll wait until the lock | ||
waitForLockRelease({ | ||
lockPath, | ||
key, | ||
cachePath, | ||
unmemoizedFn, | ||
marshaller, | ||
lockStale, | ||
memoryCache, | ||
type | ||
}, callback); | ||
return; | ||
} | ||
lockFile.lockSync(lockPath, {stale: lockStale}); | ||
unmemoizedFn(...args.concat(grabAndCacheCallback)); | ||
@@ -198,19 +239,89 @@ | ||
fs.writeFile(cachePath, data, (err) => { | ||
// We'll save the file on a temporary file to avoid in-flight files | ||
// from being taken as complete | ||
const tmpCachePath = `${cachePath}.tmp`; | ||
fs.writeFile(tmpCachePath, data, (err) => { | ||
if (err) { | ||
debug("[error] Failed saving %s. Got error: %s", | ||
cachePath, err.message); | ||
} else { | ||
tmpCachePath, | ||
err.message | ||
); | ||
return callback(err); | ||
} | ||
// Rename the file once it's persisted in disk to make it available | ||
// to any queued cache | ||
fs.rename(tmpCachePath, cachePath, (err) => { | ||
if (err) { | ||
debug("[error] Failed saving %s. Got error: %s", | ||
cachePath, err.message); | ||
return callback(err); | ||
} | ||
debug("[info] Saved cache for %s on %s", key, cachePath); | ||
} | ||
memoryCache.set(key, unmarshalledData); | ||
callback(null, unmarshalledData); | ||
memoryCache.set(key, unmarshalledData); | ||
lockFile.unlock(lockPath, (err) => { | ||
if (err) { | ||
return callback(err); | ||
} | ||
callback(null, unmarshalledData); | ||
}); | ||
}); | ||
}); | ||
}); | ||
}); | ||
}); | ||
} | ||
} | ||
const lockWatchers = {}; | ||
function waitForLockRelease({ | ||
lockPath, | ||
key, | ||
cachePath, | ||
unmemoizedFn, | ||
marshaller, | ||
lockStale, | ||
memoryCache, | ||
type | ||
}, callback) { | ||
// We only want to keep one lock watcher per lock path | ||
const hasWatcher = !!lockWatchers[lockPath]; | ||
lockWatchers[lockPath] = lockWatchers[lockPath] || []; | ||
const currentWatchList = lockWatchers[lockPath]; | ||
if (hasWatcher) { | ||
// If there's already a watcher there's no need to register a new | ||
// one, we'll defer the execution of the task until the watcher notifies | ||
// about changes on the lock | ||
currentWatchList.push(runOnLockReleased); | ||
} else { | ||
// Register a singleton watcher for a particular lock file | ||
let watcherFn = null; | ||
watcherFn = () => { | ||
runOnLockReleased(); | ||
// Run all the watchers | ||
currentWatchList.forEach((watcher) => watcher()); | ||
fs.unwatchFile(lockPath, watcherFn); | ||
currentWatchList.splice(0, currentWatchList.length); | ||
}; | ||
fs.watch(lockPath, watcherFn); | ||
} | ||
function runOnLockReleased() { | ||
useCachedFile({ | ||
key, | ||
cachePath, | ||
unmemoizedFn, | ||
marshaller, | ||
lockStale, | ||
memoryCache, | ||
type | ||
}, callback); | ||
} | ||
} | ||
function useCachedFile({ | ||
@@ -222,2 +333,3 @@ key, | ||
memoryCache = fakeLruCache(), | ||
lockStale, | ||
type | ||
@@ -239,2 +351,3 @@ }, callback) { | ||
marshaller, | ||
lockStale, | ||
type | ||
@@ -241,0 +354,0 @@ }, callback); |
{ | ||
"name": "disk-memoizer", | ||
"version": "2.1.4", | ||
"version": "2.2.0", | ||
"description": "Simple disk memoization and in memory LRU cache for high latency IO responses", | ||
@@ -14,3 +14,3 @@ "main": "dist/disk_memoizer.js", | ||
"prepublish": "npm run build", | ||
"prebuild": "npm run test", | ||
"postbuild": "npm run test", | ||
"pretest": "npm run lint" | ||
@@ -44,3 +44,3 @@ }, | ||
"babel-preset-es2015": "^6.24.1", | ||
"eslint": "^3.19.0", | ||
"eslint": "^4.1.1", | ||
"mocha": "^3.2.0" | ||
@@ -52,2 +52,4 @@ }, | ||
"glob": "^7.1.1", | ||
"graceful-fs": "^4.1.11", | ||
"lockfile": "^1.0.3", | ||
"lru-cache": "^4.0.2", | ||
@@ -54,0 +56,0 @@ "mkdirp": "^0.5.1", |
@@ -5,3 +5,5 @@ # disk-memoizer | ||
Queues up concurrent requests for the same resource before it has been cached to avoid fetching it multiple times in parallel. | ||
[![Build Status](https://api.travis-ci.org/bermi/disk-memoizer.svg)](http://travis-ci.org/bermi/disk-memoizer) [![Dependency Status](https://david-dm.org/bermi/disk-memoizer.svg)](https://david-dm.org/bermi/disk-memoizer) [![](http://img.shields.io/npm/v/disk-memoizer.svg) ![](http://img.shields.io/npm/dm/disk-memoizer.svg)](https://www.npmjs.org/package/disk-memoizer) | ||
@@ -80,3 +82,9 @@ | ||
// DISK_MEMOIZER_MEMORY_CACHE_ITEMS | ||
memoryCacheItems | ||
memoryCacheItems, | ||
// lru-cache options | ||
lruCacheOptions = { | ||
max: memoryCacheItems, | ||
maxAge | ||
} | ||
} | ||
@@ -98,5 +106,5 @@ | ||
| DISK_MEMOIZER_GC_LAST_ACCESS | 1h | When removing old files only those that have not been accessed for the specified time will be removed. | | ||
| DISK_MEMOIZER_LOCK_STALE_MS | 30000 | Milliseconds for the cache lock to be considerer stale. | | ||
### Garbage collection | ||
@@ -103,0 +111,0 @@ |
@@ -1,6 +0,7 @@ | ||
/* eslint no-sync: 0, init-declarations: 0, max-lines: 0 */ | ||
/* eslint no-sync: 0, init-declarations: 0, max-lines: 0, max-statements: 0 */ | ||
const diskMemoizer = require("../"); | ||
const assert = require("assert"); | ||
const fs = require("fs"); | ||
const fs = require("graceful-fs"); | ||
const path = require("path"); | ||
@@ -169,2 +170,30 @@ const os = require("os"); | ||
const concurrentCalls = 1000; | ||
it(`should allow ${concurrentCalls} concurrent requests`, (done) => { | ||
fs.unlink(expectedCachePath, () => { | ||
let fetchCount = 0; | ||
const memoizedFn = diskMemoizer((url, callback) => { | ||
fetchCount += 1; | ||
assert.equal(fetchCount, 1, "Fetch only expected once"); | ||
callback(null, jsonDoc); | ||
}, {type: "json"}); | ||
let callbackCount = 0; | ||
[...Array(concurrentCalls)].map(() => memoizedFn(testingUrl, | ||
(err, doc) => { | ||
if (err) { | ||
return done(err); | ||
} | ||
callbackCount += 1; | ||
assert.deepEqual(doc, jsonDoc); | ||
if (callbackCount === concurrentCalls) { | ||
done(); | ||
} | ||
} | ||
)); | ||
}); | ||
}); | ||
}); | ||
@@ -272,3 +301,3 @@ | ||
const memoizedGetJson = diskMemoizer(({url}, callback) => { | ||
const memoizedGetJson = diskMemoizer((url, callback) => { | ||
firstResponse = firstResponse || getResponseJson(); | ||
@@ -275,0 +304,0 @@ callback(null, firstResponse); |
Sorry, the diff of this file is not supported yet
135295
25
1577
153
9
8
+ Addedgraceful-fs@^4.1.11
+ Addedlockfile@^1.0.3
+ Addedgraceful-fs@4.2.11(transitive)
+ Addedlockfile@1.0.4(transitive)
+ Addedsignal-exit@3.0.7(transitive)