Comparing version 1.1.1 to 2.0.0
72
index.js
'use strict'; | ||
var ASC = require( './lib/asc.js' ); | ||
var caches = {}; | ||
const ASC = require( './lib/asc.js' ); | ||
@@ -14,70 +13,1 @@ /** | ||
module.exports = ASC; | ||
/** | ||
* Clear Cache | ||
* | ||
* Removes a cache from the factory inventory. External references to the cache | ||
* will persist, but the cache will be emptied. | ||
* | ||
* @param {string} name The name of the cache to clear and remove from inventory | ||
*/ | ||
module.exports.clear = function( name ) { | ||
if ( caches[ name ] ) { | ||
caches[ name ].clearAll(); | ||
delete caches[ name ]; | ||
} | ||
}; | ||
/** | ||
* Get Cache | ||
* | ||
* Returns a cache instance identified by name. | ||
* | ||
* @param {string} name The name of the cache to return | ||
* @param {object} [options] Configuration for the cache | ||
* @param {number ? 300000} [options.ttl] The number of milliseconds each | ||
* key exist in the cache | ||
* @param {function} options.update A function used to update the cache one | ||
* key at a time. Prototype defined below | ||
* @param {function} [options.updateBatch] A function used to update the | ||
* cache in bulk. Prototype defined below | ||
* @param {function} [options.clear] A function called when a key is | ||
* cleared, by a timeout or otherwise. The key is passed to the | ||
* function | ||
* | ||
* options.update = function( key, callback ), where key is the lookup key | ||
* and callback is a function that should be passed a single parameter, | ||
* the value corresponding to key, or undefined if key has no | ||
* corresponding value or if value can not be determined for any reason. | ||
* | ||
* options.updateBatch = function( keys, callback ), where keys is an array | ||
* of keys to lookup in batch, and callback is a function that should be | ||
* passed a single array, containing one entry for each key in the | ||
* corresponding index in keys. Note: if this function is omitted, then | ||
* batch lookups against the cache will fall back to multiple update | ||
* calls in the background. | ||
* | ||
* @return {ASC} A cache instance | ||
*/ | ||
module.exports.getCache = function( name, options ) { | ||
// cache does not exist, create it | ||
if ( !caches[ name ] ) { | ||
if ( !options ) { | ||
options = {}; | ||
} | ||
caches[ name ] = new ASC( options ); | ||
} | ||
// cache exists and new options passed, update the options | ||
else if ( options !== undefined ) { | ||
caches[ name ].updateOptions( options ); | ||
} | ||
// return the cache | ||
return caches[ name ]; | ||
}; | ||
707
lib/asc.js
'use strict'; | ||
var async = require( 'async' ); | ||
const __ = require( 'doublescore' ); | ||
const async = require( 'async' ); | ||
const Memory = require( './memory' ); | ||
const util = require( './util' ); | ||
/** | ||
* The Cache Class for ASC | ||
* | ||
* The constructor for a single cache instance. | ||
* | ||
* @param {object} [options] Configuration for the cache | ||
* @param {number ? 300000} [options.ttl] The number of milliseconds each | ||
* key exist in the cache | ||
* @param {function} options.update A function used to update the cache one | ||
* key at a time. Prototype defined below | ||
* @param {function} [options.updateBatch] A function used to update the | ||
* cache in bulk. Prototype defined below | ||
* @param {function} [options.clear] A function called when a key is | ||
* cleared, by a timeout or otherwise. The key is passed to the | ||
* function | ||
* | ||
* options.update = function( key, callback ), where key is the lookup key | ||
* and callback is a function that should be passed a single parameter, | ||
* the value corresponding to key, or undefined if key has no | ||
* corresponding value or if value can not be determined for any reason. | ||
* | ||
* options.updateBatch = function( keys, callback ), where keys is an array | ||
* of keys to lookup in batch, and callback is a function that should be | ||
* passed a single array, containing one entry for each key in the | ||
* corresponding index in keys. Note: if this function is omitted, then | ||
* batch lookups against the cache will fall back to multiple update | ||
* calls in the background. | ||
* | ||
* @constructor | ||
*/ | ||
var ASC = function( options ) { | ||
class ASC { | ||
var self = this; | ||
/** | ||
* Constructor Data Types | ||
* | ||
* @typedef MemoryParams | ||
* @type {object} | ||
* @property {boolean} [disabled=false] If TRUE, the in-memory cache is disabled | ||
* @property {number} [ttl=60000] The TTL in milliseconds for the in-memory cache | ||
* | ||
* @callback DataCallback | ||
* @param {Error} err - An instance of Error if an error occurred, null otherwise | ||
* @param {any} [data] - Any data, MUST only be omitted if err is set. | ||
* | ||
* @callback ErrorOnlyCallback | ||
* @param {Error} [err] - An instance of Error if an error occurred, empty or null otherwise | ||
* | ||
* @callback GetCallback | ||
* @param {string} key - The key of the data to return | ||
* @param {DataCallback} - The done callback to pass an error or data to | ||
* | ||
* @callback SetCallback | ||
* @param {string} key - The key of the data to return | ||
* @param {any} data - The data to set for respective key | ||
* @param {ErrorOnlyCallback} - The done callback | ||
* | ||
* @callback ClearCallback | ||
* @param {string} key - The key of the data to return | ||
* @param {ErrorOnlyCallback} - The done callback | ||
* | ||
* @typedef CacheLayer | ||
* @type {object} | ||
* @property {DataCallback} get The get function | ||
* @property {SetCallback} [set] The set data function | ||
* @property {ClearCallback} [clear] The clear function | ||
* | ||
* @typedef ConstructorParams | ||
* @type {object} | ||
* @param {MemoryParams} [ConstructorParams.memory] | ||
* @param {[CacheLayer]} [ConstructorParams.layers] | ||
* @param {GetCallback} [ConstructorParams.get] | ||
* | ||
*/ | ||
self.storage = {}; | ||
self.updateCallbacks = {}; | ||
self.updateOptions( options ); | ||
/** | ||
* Return an instance of ASC class. | ||
* | ||
* @param {ConstructorParams|[CacheLayer]|GetCallback} params Can either be a configuration object, an array of just | ||
* layers, or just a single get function for handling in-memory misses. | ||
* | ||
*/ | ||
constructor( params ) { | ||
}; | ||
this._init( params ); | ||
this._setup(); | ||
/** | ||
* Update Options | ||
* | ||
* Updates the cache's options | ||
* | ||
* @param {object} [options] Configuration for the cache | ||
* @param {number ? 300000} [options.ttl] The number of milliseconds each | ||
* key exist in the cache | ||
* @param {function} options.update A function used to update the cache one | ||
* key at a time. Prototype defined below | ||
* @param {function} [options.updateBatch] A function used to update the | ||
* cache in bulk. Prototype defined below | ||
* @param {function} [options.clear] A function called when a key is | ||
* cleared, by a timeout or otherwise. The key is passed to the | ||
* function | ||
* | ||
* options.update = function( key, callback ), where key is the lookup key | ||
* and callback is a function that should be passed a single parameter, | ||
* the value corresponding to key, or undefined if key has no | ||
* corresponding value or if value can not be determined for any reason. | ||
* | ||
* options.updateBatch = function( keys, callback ), where keys is an array | ||
* of keys to lookup in batch, and callback is a function that should be | ||
* passed a single array, containing one entry for each key in the | ||
* corresponding index in keys. Note: if this function is omitted, then | ||
* batch lookups against the cache will fall back to multiple update | ||
* calls in the background. | ||
* | ||
*/ | ||
ASC.prototype.updateOptions = function( options ) { | ||
} | ||
this.options = { | ||
ttl: (options.ttl || 300000), | ||
update: (options.update || null), | ||
updateBatch: (options.updateBatch || null), | ||
clear: (options.clear || null) | ||
}; | ||
_init( params ) { | ||
if ( typeof this.options.ttl !== 'number' || this.options.ttl < 1000 ) { | ||
this.options.ttl = 1000; | ||
} | ||
// clamp to defaults | ||
params = typeof params === 'function' ? { // if params is just the get function, convert it to shortcut get | ||
get: params | ||
} : params; | ||
params = __.isArray( params ) ? { // if params is just the array of layers, convert it to a shortcut layers | ||
layers: params | ||
} : params; | ||
params = __.isObject( params ) ? params : {}; | ||
this._layers = __.isArray( params.layers ) ? params.layers : []; | ||
if ( typeof this.options.update !== 'function' ) { | ||
this.options.update = null; | ||
} | ||
// shortcut for cache with no middle layers | ||
if ( typeof params.get === 'function' ) { | ||
this._layers.push( { | ||
get: params.get | ||
} ); | ||
} | ||
if ( typeof this.options.clear !== 'function' ) { | ||
this.options.clear = null; | ||
} | ||
this._memoryParams = __( { | ||
disabled: false | ||
} ).mixin( params.memory || {} ); | ||
}; | ||
if ( this._layers.length < 1 ) { | ||
throw new Error( 'no caching layers provided' ); | ||
} | ||
/** | ||
* Get Batch | ||
* | ||
* Gets the values for an array of keys all at once. Values are passed to | ||
* the callback as an array, each entry corresponding to the respective | ||
* entry in keys. | ||
* | ||
* @param {(string|number|Array|object)[]} keys An Array of keys, each of any | ||
* primitive type and schema | ||
* @param {function(*[])} callback Will be passed the values that | ||
* correspond to the passed in keys. Values return in the same order | ||
* keys are passed | ||
* | ||
*/ | ||
ASC.prototype.getBatch = function( keys, callback ) { | ||
} | ||
if ( typeof callback !== 'function' ) { | ||
return; | ||
} | ||
_setup() { | ||
callback = immediateCallback( callback ); | ||
this._serviceQueues = {}; | ||
// handle edge case of no keys passed | ||
if ( !Array.isArray( keys ) || keys.length < 1 ) { | ||
this._getLayers = []; | ||
this._setLayers = []; | ||
this._clearLayers = []; | ||
callback( [] ); | ||
// if memory cache enabled, make it first | ||
if ( !this._memoryParams.disabled ) { | ||
return; | ||
} | ||
delete this._memoryParams.disabled; | ||
var self = this; | ||
var data = []; // this will contain all the values for each queried key, and gets passed to the call back once populated | ||
const memory = new Memory( this._memoryParams ); | ||
// these arrays all indexed by j | ||
var missedIndexes = []; // this keeps track of what indexes in data[] are a miss and are waiting to be serviced, corresponds to key at same array location in missedKeys | ||
var missedKeys = []; // this keeps track of the key that goes with the miss in missedIndex, such that the key stored at missedKeys[n] identifies the value that should populate data[missedIndexes[n]] | ||
var missedKeyStrings = []; // the serialized cache of respective entry in missedKeys | ||
// prefix memory handler to layers | ||
this._layers.unshift( { | ||
get: ( key, done ) => { | ||
return memory.get( key, done ); | ||
}, | ||
set: ( key, data, done ) => { | ||
return memory.set( key, data, done ); | ||
}, | ||
clear: ( key, done ) => { | ||
return memory.clear( key, done ); | ||
} | ||
} ); | ||
var i, j, k, keyString; | ||
} | ||
// loop through keys and see which we have data for and which need to be serviced | ||
for ( i = 0; i < keys.length; i++ ) { | ||
// this handler ignores errors and data returned by the layer | ||
// this is used for set calls | ||
const generateSetHandler = ( handler ) => { | ||
return ( key, data, done ) => { | ||
handler( key, data, () => { | ||
// ignore any errors or data the handler returns | ||
done(); | ||
} ); | ||
}; | ||
}; | ||
keyString = stringifyKey( keys[ i ] ); | ||
// this handler ignores errors and data returned by the layer | ||
// this is used for clear calls | ||
const generateClearHandler = ( handler ) => { | ||
return ( key, done ) => { | ||
handler( key, () => { | ||
// ignore any errors or data the handler returns | ||
done(); | ||
} ); | ||
}; | ||
}; | ||
// hit on key | ||
if ( this._exists( keyString ) ) { | ||
data[ i ] = this._get( keyString ); | ||
} | ||
this._layers.forEach( ( layer, i ) => { | ||
// miss on key | ||
else { | ||
data[ i ] = undefined; // for now | ||
if ( !__.isObject( layer ) ) { | ||
throw new Error( 'layer ' + i + ' is not an object' ); | ||
} | ||
missedIndexes.push( i ); | ||
missedKeys.push( keys[ i ] ); | ||
missedKeyStrings.push( keyString ); | ||
} | ||
// get function is required | ||
if ( typeof layer.get !== 'function' ) { | ||
throw new Error( 'layer ' + i + ' is missing get function' ) | ||
} | ||
} | ||
// get is required on each layer | ||
this._getLayers.push( layer.get ); | ||
// case of all keys had a cache hit | ||
if ( missedIndexes.length < 1 ) { | ||
// set is not required on any layer, but it makes sense to want to populate each layers cache on a miss. | ||
// due to other logic, we set a no-op method if the layer doesn't provide one | ||
let layerSet = layer.set; | ||
if ( typeof layerSet !== 'function' ) { | ||
layerSet = ( key, data, done ) => { | ||
// NO-OP | ||
done(); | ||
}; | ||
} | ||
this._setLayers.push( generateSetHandler( layerSet ) ); | ||
callback( data ); | ||
// clear is not required on any layer, but also makes sense to want to | ||
// be able to propagate clears to all layers | ||
if ( typeof layer.clear === 'function' ) { | ||
this._clearLayers.push( generateClearHandler( layer.clear ) ); | ||
} | ||
} | ||
} ); | ||
// case of not all keys had a hit, but there is a batch update | ||
else if ( typeof self.options.updateBatch === 'function' ) { | ||
} | ||
// these arrays all indexed by k | ||
var batchMissedKeys = []; // tracks the un-serialized keys to pass to user updateBatch() function | ||
var batchMissedKeyStrings = []; // the serialized cache of respective entry in batchMissedKeys | ||
/** | ||
* Gets the corresponding key from the first layer to have the data. | ||
* | ||
* @param { any } key Can be any object or scalar, but must be serializable as JSON. | ||
* @param { any } data Can be anything. The in-memory layer built in to ASC can store anything, including resource handles, but it is up to your layers to be able to handle storage of whatever can be passed here. | ||
* @param { function( | ||
* {Error} err?, | ||
* {any} data The first layer to return data will route to here. Any layers above the layer that returned data, will have their set() method called. It is up to your layers to handle storage of whatever data types lower layers return. | ||
* ) | ||
* } done Will call back with no arguments, or first argument will be instance of Error if any of the layers errors out. | ||
*/ | ||
get( key, done ) { | ||
// generate and track callbacks to update the cache misses | ||
var batchCallbacksOutstanding = 0; // keep track of how many of the keys are still waiting on an update | ||
var getBatchCallback = function( j ) { | ||
// only use the marshalled key for ASC callback queues | ||
// pass original key to all cache layer handlers | ||
const marshalledKey = util.marshallKey( key ); | ||
// generating another callback, so count it | ||
batchCallbacksOutstanding++; | ||
if ( !this._serviceQueues.hasOwnProperty( marshalledKey ) ) { | ||
this._serviceQueues[ marshalledKey ] = []; | ||
} | ||
return function( value ) { | ||
this._serviceQueues[ marshalledKey ].push( done ); | ||
data[ missedIndexes[ j ] ] = value; | ||
if ( this._serviceQueues[ marshalledKey ].length > 1 ) { | ||
return; | ||
} | ||
// callback is done, so decrement the count | ||
batchCallbacksOutstanding--; | ||
let returnData; | ||
let hasData = false; | ||
let returnErr = null; | ||
let currentIndex = 0; | ||
// if there are no more callbacks outstanding, we can return the data | ||
if ( batchCallbacksOutstanding < 1 ) { | ||
callback( data ); | ||
} | ||
async.whilst( | ||
() => !hasData && returnErr === null && currentIndex < this._getLayers.length, | ||
( done ) => { | ||
}; | ||
const handler = this._getLayers[ currentIndex ]; | ||
}; | ||
handler( key, ( err, data ) => { | ||
// iterate through all cache miss keys, creating callbacks for when those keys get populated | ||
for ( j = 0; j < missedKeys.length; j++ ) { | ||
// this layer failed to return data, either not found or any other issue | ||
if ( err instanceof Error ) { | ||
keyString = missedKeyStrings[ j ]; | ||
if ( currentIndex === this._getLayers.length - 1 ) { | ||
// this was the last layer, take the error | ||
returnErr = err; | ||
} else { | ||
// more layers to try | ||
currentIndex++; | ||
} | ||
// push a callback for the missed key, and if it does not already have an update underway, add it to the batch update call | ||
if ( self._pushUpdateCallback( keyString, getBatchCallback( j ) ) ) { | ||
} else if ( err ) { | ||
batchMissedKeys.push( missedKeys[ j ] ); | ||
batchMissedKeyStrings.push( keyString ); | ||
// this layer returned an invalid value for error | ||
returnErr = new Error( 'layer ' + currentIndex + ' failed to return an instance of Error, returned: ' + | ||
__.getType( err ) ); | ||
} | ||
} else { | ||
} | ||
// assume this layer had data | ||
returnData = data; | ||
hasData = true; | ||
process.nextTick( function() { | ||
} | ||
// of the cache misses, batchMissedKeys did not already have an update in progress, so we need to update them in batch here | ||
self.options.updateBatch( batchMissedKeys, function( data ) { | ||
done(); | ||
// if the update function does not provide an array, clamp | ||
if ( !Array.isArray( data ) ) { | ||
data = []; | ||
} | ||
} ); | ||
// set the data in the cache, then trigger the callbacks | ||
for ( k = 0; k < batchMissedKeyStrings.length; k++ ) { | ||
}, | ||
() => { | ||
// new data should have one entry per key | ||
if ( data.length - 1 < k ) { | ||
data.push( undefined ); | ||
} | ||
const finish = () => { | ||
self._set( batchMissedKeyStrings[ k ], data[ k ] ); | ||
// clear out the queue and store it in a local variable so that | ||
// the callbacks we are about to fire don't create an endless loop if | ||
// they trigger another lookup on the same key | ||
const callbacks = this._serviceQueues[ marshalledKey ]; | ||
delete this._serviceQueues[ marshalledKey ]; | ||
} | ||
// fire all callbacks synchronously, in series | ||
callbacks.forEach( ( callback ) => { | ||
} ); | ||
// wrap this in a try/cache in case the external code is buggy | ||
try { | ||
} ); | ||
} | ||
if ( hasData ) { | ||
callback( null, returnData ); | ||
} else { | ||
callback( returnErr ); | ||
} | ||
// case of not all keys had a hit, and there is NOT a batch update | ||
else { | ||
} catch ( e ) { | ||
// NO-OP | ||
} | ||
var oneByOneLookups = []; | ||
} ); | ||
var pushLookup = function( j ) { | ||
}; | ||
// queue up parallel call | ||
oneByOneLookups.push( function( callback ) { | ||
// if we have data, back-populate up the layers | ||
// otherwise just start the callbacks | ||
if ( hasData ) { | ||
this._populateMisses( key, returnData, currentIndex, finish ); | ||
} else { | ||
finish(); | ||
} | ||
// lookup missed key j | ||
self.get( missedKeys[ j ], function( value ) { | ||
} | ||
); | ||
// put the data where it needs to be | ||
data[ missedIndexes[ j ] ] = value; | ||
} | ||
// tell async this function is done | ||
callback( null, true ); | ||
_populateMisses( key, data, index, done ) { | ||
} ); | ||
async.timesSeries( index, ( i, done ) => { | ||
} ); | ||
this._setLayers[ i ]( key, data, done ); | ||
}; | ||
}, ( err ) => { | ||
for ( j = 0; j < missedIndexes.length; j++ ) { | ||
if ( err ) { | ||
return done( err ); | ||
} | ||
// push lookup for missed data index j | ||
pushLookup( j ); | ||
done(); | ||
} | ||
} ); | ||
async.parallel( oneByOneLookups, function() { | ||
callback( data ); | ||
} ); | ||
} | ||
}; | ||
} | ||
/** | ||
* Get | ||
* | ||
* Gets the value for a single key. | ||
* | ||
* @param {string|number|Array|object} key A key used to identify the value | ||
* @param {function(*)} callback Will be passed the value corresponding to | ||
* the key | ||
* | ||
*/ | ||
ASC.prototype.get = function( key, callback ) { | ||
/** | ||
* Sets the corresponding key to store the passed data. | ||
* | ||
* @param { any } key Can be any object or scalar, but must be serializable as JSON. | ||
* @param { any } data Can be anything. The in-memory layer built in to ASC can store anything, including resource handles, but it is up to your layers to be able to handle storage of whatever can be passed here. | ||
* @param { function({Error} err?) } done Will call back with no arguments, or first argument will be instance of Error if any of the layers errors out. | ||
*/ | ||
set( key, data, done ) { | ||
if ( typeof callback !== 'function' ) { | ||
return; | ||
} | ||
async.applyEachSeries( this._setLayers, key, data, ( err ) => { | ||
callback = immediateCallback( callback ); | ||
if ( err ) { | ||
return done( err ); | ||
} | ||
var self = this; | ||
var keyString = stringifyKey( key ); | ||
done(); | ||
// the desired value exists, fire! | ||
if ( this._exists( keyString ) ) { | ||
callback( this._get( keyString ) ); | ||
} | ||
} ); | ||
// store the callback, and if this is the first request to come in for this key then we kick off the update | ||
else if ( self._pushUpdateCallback( keyString, callback ) ) { | ||
} | ||
process.nextTick( function() { | ||
/** | ||
* Clears the corresponding key. | ||
* | ||
* @param { any } key Can be any object or scalar, but must be serializable as JSON. | ||
* @param { function({Error} err?) } done Will call back with no arguments, or first argument will be instance of Error if any of the layers errors out. | ||
*/ | ||
clear( key, done ) { | ||
// run the update function | ||
if ( typeof self.options.update === 'function' ) { | ||
async.applyEachSeries( this._clearLayers, key, ( err ) => { | ||
self.options.update( key, function( value ) { | ||
if ( err ) { | ||
return done( err ); | ||
} | ||
// when the value comes back, cache it, and trigger updates | ||
self._set( keyString, value ); | ||
done(); | ||
} ); | ||
} ); | ||
} | ||
} | ||
// no update function | ||
else { | ||
} | ||
self._set( keyString, undefined ); | ||
} | ||
} ); | ||
} | ||
}; | ||
/** | ||
* Clear Entry | ||
* | ||
* Clears the specified cache entry. If a clear() callback is configured in | ||
* the options, it will fire. | ||
* | ||
* @param {string|number|Array|object} key The key identifying the entry to | ||
* delete | ||
* | ||
*/ | ||
ASC.prototype.clear = function( key ) { | ||
return this._clear( stringifyKey( key ) ); | ||
}; | ||
/** | ||
* Clear All | ||
* | ||
* Clears all keys from cache. IF a clear() callback is configured in the | ||
* options, it will fire once for each key. | ||
* | ||
*/ | ||
ASC.prototype.clearAll = function() { | ||
for ( var keyString in this.storage ) { | ||
if ( this.storage.hasOwnProperty( keyString ) ) { | ||
this._clear( keyString ); | ||
} | ||
} | ||
}; | ||
/** | ||
* Set Entry | ||
* | ||
* Sets the specified value for the specified key | ||
* | ||
* @param {string|number|Array|object} key The key identifying the entry to set | ||
* @param {string|number|Array|object} value The value of the entry to set | ||
* | ||
*/ | ||
ASC.prototype.set = function( key, value ) { | ||
return this._set( stringifyKey( key ), value ); | ||
}; | ||
/** | ||
* Key Exists | ||
* | ||
* See if the specified key exists in the cache. | ||
* | ||
* @param {string|number|Array|object} key The key identifying the entry to set | ||
* @return {boolean} True if the key exists in the cache, false otherwise | ||
* | ||
*/ | ||
ASC.prototype.exists = function( key ) { | ||
return this._exists( stringifyKey( key ) ); | ||
}; | ||
/* --- private functions --- */ | ||
ASC.prototype._clear = function( keyString ) { | ||
if ( this._exists( keyString ) ) { | ||
// clear any existing timeout | ||
if ( this.storage[ keyString ].timeout ) { | ||
clearTimeout( this.storage[ keyString ].timeout ); | ||
} | ||
// if a clear event handler exists | ||
if ( typeof this.options.clear === 'function' ) { | ||
immediateCallback( this.options.clear )( parseKeyString( keyString ) ); | ||
} | ||
// delete the entire entry | ||
delete this.storage[ keyString ]; | ||
} | ||
}; | ||
ASC.prototype._exists = function( keyString ) { | ||
return this.storage.hasOwnProperty( keyString ); | ||
}; | ||
ASC.prototype._get = function( keyString ) { | ||
if ( this._exists( keyString ) ) { | ||
return this.storage[ keyString ].value; | ||
} | ||
return undefined; | ||
}; | ||
ASC.prototype._set = function( keyString, value ) { | ||
var self = this; | ||
// empty values are indicated by undefined, their TTL is the minimum | ||
var overrideTTL = value === undefined ? 1000 : false; | ||
// remove existing timeouts and data | ||
self._clear( keyString ); | ||
// set new value | ||
self.storage[ keyString ] = { | ||
value: value, | ||
timeout: setTimeout( function() { | ||
// timeout fired, clear this entry | ||
self._clear( keyString ); | ||
}, overrideTTL || self.options.ttl ) | ||
}; | ||
// let everyone know the value is here | ||
self._triggerCallbacks( keyString, value ); | ||
}; | ||
ASC.prototype._pushUpdateCallback = function( keyString, callback ) { | ||
// if no updated pending, we are the first request and the callback stack will not exist | ||
if ( !Array.isArray( this.updateCallbacks[ keyString ] ) ) { | ||
this.updateCallbacks[ keyString ] = []; | ||
} | ||
// we use a callback array, so that if multiple requests come in for the same key before the first callback is | ||
// serviced, all the callbacks can share the single response | ||
this.updateCallbacks[ keyString ].push( callback ); | ||
// if this is the first callback, then caller is responsible for starting update, so return TRUE, otherwise, return false | ||
return ( this.updateCallbacks[ keyString ].length === 1 ); | ||
}; | ||
ASC.prototype._triggerCallbacks = function( keyString, value ) { | ||
if ( !Array.isArray( this.updateCallbacks[ keyString ] ) ) { | ||
return false; | ||
} | ||
var callbacks = this.updateCallbacks[ keyString ]; | ||
// the callbacks array is no longer needed | ||
delete this.updateCallbacks[ keyString ]; | ||
// fire callbacks | ||
for ( var i = 0, j = callbacks.length; i < j; i++ ) { | ||
if ( typeof callbacks[ i ] === 'function' ) { | ||
setImmediate( callbacks[ i ], value ); | ||
} | ||
} | ||
return true; | ||
}; | ||
module.exports = ASC; | ||
/* --- static private methods (sorta) --- */ | ||
function immediateCallback( callback ) { | ||
return function( value ) { | ||
setImmediate( function() { | ||
callback( value ); | ||
} ); | ||
}; | ||
} | ||
function stringifyKey( key ) { | ||
return JSON.stringify( key ); | ||
} | ||
function parseKeyString( keyString ) { | ||
return JSON.parse( keyString ); | ||
} |
{ | ||
"name": "asc", | ||
"description": "A middleware layer between a service and a client. The service could be an in-process library, a file, an external web service, anything. Any time the results from a resource call can be cached, you can use ASC as a proxy to that resource.", | ||
"version": "1.1.1", | ||
"author": "Anthony Hildoer <anthony@bluerival.com>", | ||
"repository": { | ||
"type": "git", | ||
"url": "git://github.com/bluerival/asc.git" | ||
}, | ||
"dependencies": { | ||
"async": "1.4.2" | ||
}, | ||
"devDependencies": { | ||
"mocha": "2.2.5" | ||
}, | ||
"keywords": [ | ||
"cache", | ||
"service proxy", | ||
"ttl cache", | ||
"batch lookup" | ||
], | ||
"engines": { | ||
"node": ">=0.10.0" | ||
}, | ||
"license": "MIT" | ||
"name": "asc", | ||
"description": "A middleware layer between a service and a client. The service could be an in-process library, a file, an external web service, anything. Any time the results from a resource call can be cached, you can use ASC as a proxy to that resource.", | ||
"version": "2.0.0", | ||
"author": "Anthony Hildoer <anthony@bluerival.com>", | ||
"repository": { | ||
"type": "git", | ||
"url": "git://github.com/bluerival/asc.git" | ||
}, | ||
"scripts": { | ||
"test-watch": "./node_modules/mocha/bin/mocha -b -u bdd -w --exit test/*.test.js", | ||
"test": "./node_modules/mocha/bin/mocha -b -u bdd --exit test/*.test.js" | ||
}, | ||
"dependencies": { | ||
"async": "^2.6.2", | ||
"doublescore": "^0.3.7" | ||
}, | ||
"devDependencies": { | ||
"@types/mocha": "^5.2.6", | ||
"mocha": "^6.1.4" | ||
}, | ||
"keywords": [ | ||
"cache", | ||
"service proxy", | ||
"ttl cache", | ||
"batch lookup" | ||
], | ||
"engines": { | ||
"node": ">=0.10.0" | ||
}, | ||
"license": "MIT" | ||
} |
632
README.md
@@ -1,243 +0,509 @@ | ||
ASC (pronounced "ASK") | ||
======== | ||
# ASC (pronounced "ASK") | ||
A middleware layer between a service and a client. The service could be an | ||
in-process library, a file, an external web service, anything. Any time the | ||
results from a resource call can be cached, you can use ASC as a proxy to that | ||
resource. | ||
ASC is a middleware layer between a set of cacheable services and a client. It allows the developer to define caching tiers | ||
by declaring layers which ASC will asynchronously update and query as needed. Hence, ASC stands for Asynchronous | ||
Self-Updating Cache. | ||
The services defined in each layer could be an in-process library, a file, an external web service, anything. Any time | ||
the results from a resource call can be cached, you can use ASC as a proxy to that resource. | ||
Why use it? | ||
======== | ||
ASC is a good solution to cache data returned from a service if the following | ||
criteria can be met: | ||
# New 2.0.0 | ||
* The service being consumed can be proxied to serve up data in this design | ||
pattern: | ||
This is a major re-write to the previous version. The previous version is 1.x | ||
```js | ||
ascInstance.get( key, function( value ) { | ||
// handle the value here | ||
} ); | ||
``` | ||
Here are the changes to ASC since 1.x | ||
OR | ||
* Tiered cache support: Add multiple cache layers. In addition to the built-in, in-memory cache, add Redis, Memcached, | ||
DynamoDB, S3, whatever you want. Each caching layer you define has a get, set and clear methods. Do whatever you want | ||
with each layer. The first layer to return a hit will populate all cache layers above it. | ||
```js | ||
ascInstance.getBatch( keys, function( values ) { | ||
// handle the values here, in same order as respective keys | ||
} ); | ||
``` | ||
* Your returned values can be cached for some TTL >= 1000 ms. | ||
* In-memory cache is optional: If you don't want to use the in-memory cache, it can be disabled. | ||
* Dropped batch key lookup. (We may bring it back if there is demand) | ||
Features | ||
======== | ||
* Dropped the global factory. (We won't bring this back, its an anti-pattern to create global storage at the process | ||
level) | ||
* Cache with a TTL (minimum 1000 milliseconds) | ||
* Update function used to re-populate on cache-miss | ||
* Batch update function used to re-populate a set of keys on cache-miss (batch | ||
operations must be supported by the service consumed) | ||
* Request de-duplication | ||
* Shared cache instances between modules/libraries/etc | ||
* Complex keys, including arrays and primitive objects | ||
# Why use it? | ||
Create Instance | ||
======== | ||
Tiered storage of data is a key solution for scale. Typically data is stored at many levels between user access and | ||
permanent cold storage. Also, these tiers of storage are assembled at an infrastructure level, not an application level. | ||
The impact is considerable overhead between developers and infrastructure admins to develop a solid system. | ||
The ASC module exports the cache class. This is useful for creating caches that | ||
you do not want to share system wide. | ||
Developers have to inform admins how to cache data: which fields to cache on, what data to cache, how to cache it, how | ||
long to cache it, etc. The aim of ASC is to move most of that conversation away from developers <-> admins, and just | ||
between developers <-> developers. | ||
ASC will handle: | ||
* Traversing cache layers until a tier returns a hit for a key get(). | ||
* Propagating a hit up the layers with a set(). | ||
* De-duplicating parallel get() requests for the same key. If a key is requested, and a request is already in flight for | ||
that key, the first request will return data to all new, in-coming get() calls for the same key. | ||
* Being really fast and predictable. | ||
# When NOT to use it? | ||
* You can't have your data cached, need latest data every time. | ||
* Your data can't be serialized, because its an object containing resources, and not just arrays, objects and scalars. | ||
* Your keys can't be serialized using JSON.stringify. | ||
# API | ||
The ASC module exports a single class. Just instantiate it with ```new```. This is the full JSDoc for the constructor | ||
function. | ||
```js | ||
/** | ||
* The Cache Class for ASC | ||
/** | ||
* Constructor Data Types | ||
* | ||
* The constructor for a single cache instance. | ||
* @typedef MemoryParams | ||
* @type {object} | ||
* @property {boolean} [disabled=false] If TRUE, the in-memory cache is disabled | ||
* @property {number} [ttl=60000] The TTL in milliseconds for the in-memory cache | ||
* | ||
* @param {object} [options] Configuration for the cache | ||
* @param {number ? 300000} [options.ttl] The number of milliseconds each | ||
* key exist in the cache | ||
* @param {function} options.update A function used to update the cache one | ||
* key at a time. Prototype defined below | ||
* @param {function} [options.updateBatch] A function used to update the | ||
* cache in bulk. Prototype defined below | ||
* @param {function} [options.clear] A function called when a key is | ||
* cleared, by a timeout or otherwise. The key is passed to the | ||
* function | ||
* @callback DataCallback | ||
* @param {Error} err - An instance of Error if an error occurred, null otherwise | ||
* @param {any} [data] - Any data, MUST only be omitted if err is set. | ||
* | ||
* options.update = function( key, callback ), where key is the lookup key | ||
* and callback is a function that should be passed a single parameter, | ||
* the value corresponding to key, or undefined if key has no | ||
* corresponding value or if value can not be determined for any reason. | ||
* @callback ErrorOnlyCallback | ||
* @param {Error} [err] - An instance of Error if an error occurred, empty or null otherwise | ||
* | ||
* options.updateBatch = function( keys, callback ), where keys is an array | ||
* of keys to lookup in batch, and callback is a function that should be | ||
* passed a single array, containing one entry for each key in the | ||
* corresponding index in keys. Note: if this function is omitted, then | ||
* batch lookups against the cache will fall back to multiple update | ||
* calls in the background. | ||
* @callback GetCallback | ||
* @param {string} key - The key of the data to return | ||
* @param {DataCallback} - The done callback to pass an error or data to | ||
* | ||
* @constructor | ||
* @callback SetCallback | ||
* @param {string} key - The key of the data to return | ||
* @param {any} data - The data to set for respective key | ||
* @param {ErrorOnlyCallback} - The done callback | ||
* | ||
* @callback ClearCallback | ||
* @param {string} key - The key of the data to return | ||
* @param {ErrorOnlyCallback} - The done callback | ||
* | ||
* @typedef CacheLayer | ||
* @type {object} | ||
* @property {DataCallback} get The get function | ||
* @property {SetCallback} [set] The set data function | ||
* @property {ClearCallback} [clear] The clear function | ||
* | ||
* @typedef ConstructorParams | ||
* @type {object} | ||
* @param {MemoryParams} [ConstructorParams.memory] | ||
* @param {[CacheLayer]} [ConstructorParams.layers] | ||
* @param {GetCallback} [ConstructorParams.get] | ||
* | ||
*/ | ||
var ASC = require( 'asc' ); | ||
var profileCache = new ASC( [options] ); | ||
/** | ||
* Return an instance of ASC class. | ||
* | ||
* @param {ConstructorParams|[CacheLayer]|GetCallback} params Can either be a configuration object, an array of just | ||
* layers, or just a single get function for handling in-memory misses. | ||
* | ||
*/ | ||
const ASC = require( 'asc' ); | ||
const cache = new ASC( params ); | ||
``` | ||
Factory | ||
======== | ||
# Layers, Diggity-Layers and All That | ||
You can also use the ASC module as a factory and shared cache store, using the | ||
getCache() method on the asc module. | ||
The new tiering logic defines each storage tier in layers. The potential layers are: | ||
* The in-memory cache | ||
* Any user-defined layers | ||
* The user-defined get method (called if in-memory cache, and all user-defined layers miss on a key) | ||
Layers are prioritized in the order they are defined. The in-memory cache is always top of the list, then all the | ||
user-defined layers are next, in the same order they are passed to the ASC constructor. Lets look at the most expressive | ||
way to define several the layers. | ||
Consider this code. | ||
```js | ||
/** | ||
* Get Cache | ||
* | ||
* Returns a cache instance identified by name. | ||
* | ||
* @param {string} name The name of the cache to return | ||
* @param {object} [options] Configuration for the cache | ||
* @param {number ? 300000} [options.ttl] The number of milliseconds each | ||
* key exist in the cache | ||
* @param {function} options.update A function used to update the cache one | ||
* key at a time. Prototype defined below | ||
* @param {function} [options.updateBatch] A function used to update the | ||
* cache in bulk. Prototype defined below | ||
* @param {function} [options.clear] A function called when a key is | ||
* cleared, by a timeout or otherwise. The key is passed to the | ||
* function | ||
* | ||
* options.update = function( key, callback ), where key is the lookup key | ||
* and callback is a function that should be passed a single parameter, | ||
* the value corresponding to key, or undefined if key has no | ||
* corresponding value or if value can not be determined for any reason. | ||
* | ||
* options.updateBatch = function( keys, callback ), where keys is an array | ||
* of keys to lookup in batch, and callback is a function that should be | ||
* passed a single array, containing one entry for each key in the | ||
* corresponding index in keys. Note: if this function is omitted, then | ||
* batch lookups against the cache will fall back to multiple update | ||
* calls in the background. | ||
* | ||
* @return {ASC} An instance of the ASC cache | ||
*/ | ||
var profileCache = require( 'asc' ).getCache( name, [options] ); | ||
const ASC = require( 'asc' ); | ||
const fancyCache = new ASC( { | ||
memory: { // in-memory layer | ||
disabled: false, // this is the default, but we set it explicitly for illustration purpose here | ||
ttl: 60000 // 60 seconds | ||
}, | ||
layers: [ | ||
{ // layer 1 | ||
get: ( key, done ) => { | ||
// call some service, maybe Redis, and get the data for key, then return it | ||
done( null, dataFromRedis ); // success | ||
// if there is no data, or any other error occurs, return an instance of Error | ||
done( new Error( 'not found' ) ); | ||
}, | ||
set: ( key, dataForRedis, done ) => { // optional, but its a nice way to propagate data up the chain on misses | ||
// call some service, maybe Redis, and store the data for key | ||
done(); // success | ||
// if there is an error, or for any reason that data can not be stored, return an instance of Error. | ||
done( new Error( 'some API failure' ) ); | ||
}, | ||
clear: ( key, data, done ) => { // also optional, but not if you want to force this layer to delete a key | ||
// call some service, maybe Redis, and clear the data for key | ||
done(); | ||
// if there is an error, return an instance of Error. If the key doesn't exist in this layer, that is not | ||
// considered an error. Anything that prevents a key from clearing when it does exist is an error. | ||
done( new Error( 'some API failure' ) ); | ||
} | ||
}, | ||
{ // layer 2 | ||
get: ( key, done ) => { | ||
// call some service, maybe DynamoDB, and get the data for key | ||
done( null, dataFromDynamoDB ); | ||
}, | ||
set: ( key, dataForDynamoDB, done ) => { | ||
done(); // success | ||
}, | ||
clear: ( key, data, done ) => { | ||
done(); // success | ||
} | ||
}, | ||
{ // layer 3 | ||
get: ( key, done ) => { | ||
// query some heavy process, maybe a SQL report, maybe a 3rd party API across the inter-webs, maybe do some | ||
// heavy number crunching, etc. | ||
done( null, someExpensiveDataToGenerate ); | ||
} | ||
// no set or clear, because this is layer is ground truth. If it can't return data, it doesn't exist, or there | ||
// is an error. | ||
} | ||
] | ||
} ); | ||
``` | ||
Example | ||
======== | ||
Here, the priority of the layers are: | ||
Lets pretend you have a server that handles profiles, and it has a web service | ||
for looking up profiles which returns JSON. Lets also say that profile server | ||
takes 250ms to service a request. Lets also assume profiles can be up to 5 | ||
minutes old in your system to meet business requirements. The following code is | ||
one way you could cache the profile lookups. | ||
* In-Memory | ||
* Layer 1 | ||
* Layer 2 | ||
* Layer 3 | ||
When ```fancyCache.get( key... )``` is called, ASC will first check memory, then call get() on layer 1, then on layer 2, | ||
finally on layer 3. As soon as one of the layers fires the callback with ```null``` as the error argument will end the | ||
waterfall. Each layer above the layer that finally returned data will have it's set() method called, to populate that | ||
layer. If you don't want a layer to have data set() after a cache miss, simply don't provide a set() method. Or, provide | ||
a set() method and then ignore calls depending on the key or any other state your layer cares about. If you do provide | ||
a set() method, you MUST call done to prevent hanging. | ||
Here is a more concrete example relative to the code above. Let's say a call for data on a key is made. That key is | ||
missing from memory layer and layer 1. But, the key does have data in layer 2. The following methods will be called: | ||
fancyCache.get() // called | ||
memory.get() // miss | ||
layer1.get() // miss | ||
layer2.get() // hit | ||
layer1.set() // propagate data | ||
memory.set() // propagate data | ||
fancyCache.get() // returned data to callback | ||
The advantage to this approach is that it allows the application to determine how to migrate data between cache layers, | ||
with priority tiering. | ||
In this example we suggested that layer 1 is a Redis cluster, and layer 2 is DynamoDB global tables cluster, and layer 3 | ||
is ground truth/source for the data. In a geographically distributed deployment this logic allows for seamless data | ||
migration and caching throughout your architecture with little thought as to where your code is running. With this setup | ||
you could easily have a global deployment like this: | ||
```text | ||
- Ground Truth of Expensive Data Origination // Infinite cache, ground truth, never expires, etc | ||
- DynamoDB Global Table // Set the longest possible TTL on DynamoDB records that makes sense for your data | ||
- Redis Cluster in Tokyo DataCenter // Set a TTL that is much shorter than the DynamoDB TTL, probably minutes | ||
- Your application instances in Tokyo // Set a TTL that is much shorter than the Redis TTL, probably seconds | ||
- Redis Cluster in US East DataCenter | ||
- Your application instances in US East | ||
- Redis Cluster in US West DataCenter | ||
- Your application instances in US West | ||
- Redis Cluster in Ireland DataCenter | ||
- Your application instances in Ireland | ||
``` | ||
With this globally deployed architecture your entire infrastructure has the same kind of caching logic and layers you find | ||
within a modern CPU and motherboard. CPUs have multiple layers of very fast, but very short TTL caches, then system | ||
memory which is larger, but slower, followed by disk (and sometimes network storage) which is very slow but is permanent | ||
ground truth. | ||
There are of course other benefits to ASC which apply to other use cases that are much simpler than a globally deployed | ||
application. See the section "Common Use Cases with Examples" for additional examples. | ||
# Warning on the In-Memory Cache | ||
The built-in memory cache is not a magical box of unlimited storage. This cache will consume the primary memory in your | ||
process. Node.js running on 32-bit systems has a maximum stack of 512MB and on 64-bit systems it is 1GB. | ||
It is up to you to determine a good TTL too prevent your process from running out of memory. If you don't want to use | ||
any memory, no worries, just disable it. | ||
```js | ||
// first, create your cache instance. if the cache was already created | ||
// elsewhere, and you simply need access to it, replace this block with: | ||
// var profileCache = require( 'asc' ).getCache( 'user.profile' ); | ||
var profileCache = require( 'asc' ).getCache( 'user.profile', { | ||
const ASC = require( 'asc' ); | ||
// milliseconds to cache data ( 5 minutes ). minimum is 1000ms | ||
ttl: 300000, | ||
const fancyCache = new ASC( { | ||
memory: { | ||
disabled: true | ||
}, ... | ||
} ); | ||
// when a .get( key, callback ) call results in a cache-miss, this function | ||
// is called to update that cache entry | ||
update: function( username, callback ) { | ||
``` | ||
request( { | ||
url: "http://profile-server.mydomain.com/user/" + username | ||
}, | ||
function( err, result ) { | ||
if ( err ) { | ||
## Common Use Cases with Examples | ||
// if a cache entry can not be populated, undefined should be | ||
// returned | ||
callback( undefined ); | ||
} | ||
else { | ||
callback( JSON.parse( result.body ) ); | ||
} | ||
} ); | ||
### Simple In Memory Caching Only | ||
}, | ||
This is the base case. It is just use the built-in memory cache which stores data in memory. The default TTL is used | ||
here and it is 60,000ms. | ||
// when a .getBatch( keys, callback ) call results in cache-misses for any | ||
// of the keys, this function is called to to update those cache entries | ||
updateBatch: function ( usernames, callback ) { | ||
```js | ||
request( { | ||
url: "http://profile-server.mydomain.com/users/" + | ||
usernames.join( ',' ); | ||
}, | ||
function( err, result ) { | ||
if ( err ) { | ||
callback( undefined ); | ||
} | ||
else { | ||
const ASC = require( 'asc' ); | ||
// assumes service returns profiles in same order usernames | ||
// passed to request | ||
callback( JSON.parse( result.body ) ); | ||
const cache = new ASC( ( key, done ) => { | ||
// call service, or load file, or do whatever, then return data | ||
done( null, 'some data based on key' ); | ||
} ); | ||
} | ||
} ); | ||
// If the key is in memory, data will be returned from memory, otherwise the handler function will be called | ||
cache.get( 'some key', ( err, data ) => { | ||
// do something with the data | ||
} ); | ||
} | ||
``` | ||
#### Config shortcuts | ||
The above example is shorthand for defining an in-memory cache with a single layer. Here are 3 other ways to create the | ||
exact same cache. | ||
One | ||
```js | ||
const ASC = require( 'asc' ); | ||
const cache = new ASC( { | ||
get: ( key, done ) => { | ||
// call service, or load file, or do whatever, then return data | ||
done( null, 'some data based on key' ); | ||
} | ||
} ); | ||
``` | ||
// now do all the profile lookups you want | ||
Two | ||
```js | ||
const ASC = require( 'asc' ); | ||
// cache miss, triggers call to user update function for next three gets | ||
profileCache.get( 'anthony', function( profile ) { | ||
console.log(value); | ||
const cache = new ASC( { | ||
layers: [ | ||
{ | ||
get: ( key, done ) => { | ||
// call service, or load file, or do whatever, then return data | ||
done( null, 'some data based on key' ); | ||
} | ||
} | ||
] | ||
} ); | ||
profileCache.get( 'suzanne', function( value ) { | ||
console.log(value); | ||
``` | ||
Three | ||
```js | ||
const ASC = require( 'asc' ); | ||
const cache = new ASC( { | ||
layers: [ | ||
{ | ||
get: ( key, done ) => { | ||
// call service, or load file, or do whatever, then return data | ||
done( null, 'some data based on key' ); | ||
}, | ||
set: ( key, data, done ) => { | ||
// NO-OP | ||
done(); | ||
}, | ||
clear: ( key, done ) => { | ||
// NO-OP | ||
done(); | ||
} | ||
} | ||
] | ||
} ); | ||
profileCache.get( 'jackson', function( value ) { | ||
console.log(value); | ||
``` | ||
You can use most of the shortcuts with multiple layers too. These two examples are also equivalent caches. | ||
One | ||
```js | ||
const ASC = require( 'asc' ); | ||
const cache = new ASC( { | ||
layers: [ | ||
{ | ||
get: ( key, done ) => { | ||
done( null, 'layer 1 cache data' ); | ||
} | ||
} | ||
], | ||
get: ( key, done ) => { | ||
// call service, or load file, or do whatever, then return data | ||
done( null, 'some data based on key' ); | ||
} | ||
} ); | ||
``` | ||
// good chance the first request for suzanne is not yet serviced, but this get | ||
// does NOT trigger a second call to the profile server. This call to get will | ||
// queue up and receive the same data that is passed to the first get for | ||
// suzanne | ||
profileCache.get( 'suzanne', function( value ) { | ||
console.log( value ); | ||
Two | ||
```js | ||
const ASC = require( 'asc' ); | ||
const cache = new ASC( { | ||
layers: [ | ||
{ | ||
get: ( key, done ) => { | ||
done( null, 'layer 1 cache data' ); | ||
} | ||
}, | ||
{ | ||
get: ( key, done ) => { | ||
// call service, or load file, or do whatever, then return data | ||
done( null, 'some data based on key' ); | ||
} | ||
} | ||
] | ||
} ); | ||
``` | ||
// the cache will already have anthony and jackson in the cache (or at least an | ||
// outstanding update) which can be used to service those keys, but lilly and | ||
// landon have no existing entry or outstanding update. The updateBatch() | ||
// function will get called with keys [ 'lilly', 'landon' ] instead of all four. | ||
// This prevents the service provider from having to search for as much data, | ||
// and typically this will reduce the servicing time. | ||
profileCache.getBatch( [ 'anthony', 'jackson', 'lilly', 'landon' ], | ||
function ( values ) { | ||
console.log( values ); | ||
} ); | ||
### Redis Shared Cache | ||
// this will not trigger a call to update, instead it will get data when the | ||
// previous batch call finishes its update on [ 'lilly', 'landon' ]. | ||
profileCache.get( 'lilly', function( value ) { | ||
console.log( value ); | ||
Let's say you use a beefy, well tuned Redis cluster and prefer to use that for caching. Also, lets say you don't want to | ||
use in-memory caching because you are deploying code on low-memory containers. This is a good way to use Redis to cache | ||
data and make it available to all containers in your cluster. | ||
```js | ||
const ASC = require( 'asc' ); | ||
const async = require( 'async' ); | ||
const redis = require( 'redis' ); | ||
const redisClient = redis.createClient(); | ||
const fancyCache = new ASC( { | ||
memory: { // disable in-memory layer | ||
disabled: true | ||
}, | ||
layers: [ | ||
{ // Redis tier | ||
get: ( key, done ) => { // see if redis has the key | ||
async.waterfall([ | ||
( done ) => { | ||
// Redis doesn't support fancy objects as keys, so we just JSON encode the key object | ||
redisClient.get( JSON.stringify( key ), done ); | ||
}, | ||
( strData, done ) => { | ||
let data = null; | ||
// wrap in try catch in case invalid JSON comes back and causes JSON.parse() to throw an Error. | ||
try { | ||
data = JSON.parse( strData ); // Redis doesn't understand JSON objects, so we store data as a string. | ||
} catch (e) { | ||
return done( e ); | ||
} | ||
done( null, data ); | ||
} | ||
], done); | ||
}, | ||
set: ( key, dataForRedis, done ) => { | ||
// store data in redis cache for 300 seconds | ||
async.waterfall( [ | ||
( done ) => { | ||
// store the data with a timeout of 300 seconds | ||
redisClient.set( JSON.stringify(key), JSON.stringify(dataForRedis), 'EX', 300, done ); | ||
}, | ||
( OK, done ) => { | ||
if ( OK !== 'OK' ) { | ||
return done( new Error('unknown error' ) ); | ||
} | ||
done(); | ||
} | ||
], done ); | ||
}, | ||
clear: ( key, data, done ) => { | ||
// delete the key from redis even if the TTL has not expired | ||
redisClient.del( JSON.stringify( key ), done ); | ||
} | ||
}, | ||
{ // Final tier | ||
get: ( key, done ) => { | ||
// query some heavy process, maybe a SQL report, maybe a 3rd party API across the inter-webs, maybe do some | ||
// heavy number crunching, etc. | ||
done( null, someExpensiveDataToGenerate ); | ||
} | ||
// no set or clear, because this is layer is ground truth. If it can't return data, it doesn't exist, or there | ||
// is an error. | ||
} | ||
] | ||
} ); | ||
``` | ||
Some things to notice from the example. The name of the cache we returned from | ||
the cache factory, user.profile, is a singleton. If any other module does a | ||
getCache on that cache name, they will get the same cache, thereby allowing | ||
different modules to benefit from the same data cache for that resource. Also | ||
note that passing a configuration object is optional. On your first getCache, | ||
you must pass the options with an update function, but subsequent calls can | ||
simply be require( 'asc' ).getCache( 'user.profile' ); | ||
# Contributing | ||
If you would like to help with this project, please do the following: | ||
* Fork the project on GitHub. | ||
* Branch your changes from develop to a new branch called `feature/<some unique name for your change>`. | ||
* Ensure you validate your code using the .eslintrc file in this repo. | ||
* Ensure your changes are covered by at least one test in test/. | ||
* Ensure `npm test` passes. | ||
* Issue a pull request from `feature/<some unique name for your change>` back to develop in the main repo. | ||
* Eat a sandwich. | ||
License | ||
@@ -248,3 +514,3 @@ ======== | ||
Copyright (c) 2015 BlueRival Software <anthony@bluerival.com> | ||
Copyright (c) 2019 BlueRival Software <support@bluerival.com> | ||
@@ -251,0 +517,0 @@ Permission is hereby granted, free of charge, to any person obtaining a copy of |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
96653
14
2175
533
1
2
2
1
+ Addeddoublescore@^0.3.7
+ Addedasync@2.6.4(transitive)
+ Addeddoublescore@0.3.7(transitive)
+ Addedlodash@4.17.21(transitive)
- Removedasync@1.4.2(transitive)
Updatedasync@^2.6.2