Introduction
The project goal is to provide an eventstore implementation for node.js:
- load and store events via EventStream object
- event dispatching to your publisher (optional)
- supported Dbs (inmemory, mongodb, redis, tingodb, elasticsearch, azuretable, dynamodb)
- snapshot support
- query your events
Consumers
Installation
npm install eventstore
Usage
Require the module and init the eventstore:
var eventstore = require('eventstore');
var es = eventstore();
By default the eventstore will use an inmemory Storage.
Logging
For logging and debugging you can use debug by TJ Holowaychuk
simply run your process with
DEBUG=eventstore* node app.js
Provide implementation for storage
example with mongodb:
var es = require('eventstore')({
type: 'mongodb',
host: 'localhost',
port: 27017,
dbName: 'eventstore',
eventsCollectionName: 'events',
snapshotsCollectionName: 'snapshots',
transactionsCollectionName: 'transactions',
timeout: 10000,
});
example with redis:
var es = require('eventstore')({
type: 'redis',
host: 'localhost',
port: 6379,
db: 0,
prefix: 'eventstore',
eventsCollectionName: 'events',
snapshotsCollectionName: 'snapshots',
timeout: 10000
});
example with tingodb:
var es = require('eventstore')({
type: 'tingodb',
dbPath: '/path/to/my/db/file',
eventsCollectionName: 'events',
snapshotsCollectionName: 'snapshots',
transactionsCollectionName: 'transactions',
timeout: 10000,
});
example with elasticsearch:
var es = require('eventstore')({
type: 'elasticsearch',
host: 'localhost:9200',
indexName: 'eventstore',
eventsTypeName: 'events',
snapshotsTypeName: 'snapshots',
log: 'warning',
maxSearchResults: 10000,
});
example with custom elasticsearch client (e.g. with AWS ElasticSearch client. Note http-aws-es
package usage in this example):
var elasticsearch = require('elasticsearch');
var esClient = = new elasticsearch.Client({
hosts: 'SOMETHING.es.amazonaws.com',
connectionClass: require('http-aws-es'),
amazonES: {
region: 'us-east-1',
accessKey: 'REPLACE_AWS_accessKey',
secretKey: 'REPLACE_AWS_secretKey'
}
});
var es = require('eventstore')({
type: 'elasticsearch',
client: esClient,
indexName: 'eventstore',
eventsTypeName: 'events',
snapshotsTypeName: 'snapshots',
log: 'warning',
maxSearchResults: 10000
});
example with azuretable:
var es = require('eventstore')({
type: 'azuretable',
storageAccount: 'nodeeventstore',
storageAccessKey: 'aXJaod96t980AbNwG9Vh6T3ewPQnvMWAn289Wft9RTv+heXQBxLsY3Z4w66CI7NN12+1HUnHM8S3sUbcI5zctg==',
storageTableHost: 'https://nodeeventstore.table.core.windows.net/',
eventsTableName: 'events',
snapshotsTableName: 'snapshots',
timeout: 10000,
emitStoreEvents: true
});
example with dynamodb:
var es = require('eventstore')({
type: 'dynamodb',
eventsTableName: 'events',
snapshotsTableName: 'snapshots',
undispatchedEventsTableName: 'undispatched'
EventsReadCapacityUnits: 1,
EventsWriteCapacityUnits: 3,
SnapshotReadCapacityUnits: 1,
SnapshotWriteCapacityUnits: 3,
UndispatchedEventsReadCapacityUnits: 1,
UndispatchedEventsReadCapacityUnits: 1,
useUndispatchedEventsTable: true
eventsTableStreamEnabled: false
eventsTableStreamViewType: 'NEW_IMAGE',
emitStoreEvents: true
});
DynamoDB credentials are obtained by eventstore either from environment vars or credentials file. For setup see AWS Javascript SDK.
DynamoDB provider supports DynamoDB local for local development via the AWS SDK endpoint
option. Just set the $AWS_DYNAMODB_ENDPOINT
(or %AWS_DYNAMODB_ENDPOINT%
in Windows) environment variable to point to your running instance of Dynamodb local like this:
$ export AWS_DYNAMODB_ENDPOINT=http://localhost:8000
Or on Windows:
> set AWS_DYNAMODB_ENDPOINT=http://localhost:8000
The useUndispatchedEventsTable option to available for those who prefer to use DyanmoDB.Streams to pull events from the store instead of the UndispatchedEvents table. The default is true. Setting this option to false will result in the UndispatchedEvents table not being created at all, the getUndispatchedEvents method will always return an empty array, and the setEventToDispatched will effectively do nothing.
Refer to StreamViewType for a description of the eventsTableStreamViewType option
Built-in event publisher (optional)
if defined the eventstore will try to publish AND set event do dispatched on its own...
sync interface
es.useEventPublisher(function(evt) {
});
async interface
es.useEventPublisher(function(evt, callback) {
});
catch connect and disconnect events
es.on('connect', function() {
console.log('storage connected');
});
es.on('disconnect', function() {
console.log('connection to storage is gone');
});
define event mappings [optional]
Define which values should be mapped/copied to the payload event.
es.defineEventMappings({
id: 'id',
commitId: 'commitId',
commitSequence: 'commitSequence',
commitStamp: 'commitStamp',
streamRevision: 'streamRevision'
});
initialize
es.init(function (err) {
});
es.init();
working with the eventstore
get the eventhistory (of an aggregate)
es.getEventStream('streamId', function(err, stream) {
var history = stream.events;
});
or
es.getEventStream({
aggregateId: 'myAggregateId',
aggregate: 'person',
context: 'hr'
}, function(err, stream) {
var history = stream.events;
});
'streamId' and 'aggregateId' are the same...
In ddd terms aggregate and context are just to be more precise in language.
For example you can have a 'person' aggregate in the context 'human ressources' and a 'person' aggregate in the context of 'business contracts'...
So you can have 2 complete different aggregate instances of 2 complete different aggregates (but perhaps with same name) in 2 complete different contexts
you can request an eventstream even by limit the query with a 'minimum revision number' and a 'maximum revision number'
var revMin = 5,
revMax = 8;
es.getEventStream('streamId' || {}, revMin, revMax, function(err, stream) {
var history = stream.events;
});
store a new event and commit it to store
es.getEventStream('streamId', function(err, stream) {
stream.addEvent({ my: 'event' });
stream.addEvents([{ my: 'event2' }]);
stream.commit();
stream.commit(function(err, stream) {
console.log(stream.eventsToDispatch);
});
});
if you defined an event publisher function the committed event will be dispatched to the provided publisher
if you just want to load the last event as stream you can call getLastEventAsStream instead of ´getEventStream´.
working with snapshotting
get snapshot and eventhistory from the snapshot point
es.getFromSnapshot('streamId', function(err, snapshot, stream) {
var snap = snapshot.data;
var history = stream.events;
});
or
es.getFromSnapshot({
aggregateId: 'myAggregateId',
aggregate: 'person',
context: 'hr'
}, function(err, snapshot, stream) {
var snap = snapshot.data;
var history = stream.events;
});
you can request a snapshot and an eventstream even by limit the query with a 'maximum revision number'
var revMax = 8;
es.getFromSnapshot('streamId' || {}, revMax, function(err, snapshot, stream) {
var snap = snapshot.data;
var history = stream.events;
});
create a snapshot point
es.getFromSnapshot('streamId', function(err, snapshot, stream) {
var snap = snapshot.data;
var history = stream.events;
if (history.length > myLimit) {
es.createSnapshot({
streamId: 'streamId',
data: myAggregate.getSnap(),
revision: stream.lastRevision,
version: 1
}, function(err) {
});
es.createSnapshot({
aggregateId: 'myAggregateId',
aggregate: 'person',
context: 'hr'
data: myAggregate.getSnap(),
revision: stream.lastRevision,
version: 1
}, function(err) {
});
}
});
You can automatically clean older snapshots by configuring the number of snapshots to keep with maxSnapshotsCount
in eventstore
options.
own event dispatching (no event publisher function defined)
es.getUndispatchedEvents(function(err, evts) {
console.log(evts);
for (var e in evts) {
var evt = evts[r];
es.setEventToDispatched(evt, function(err) {});
es.setEventToDispatched(evt.id, function(err) {});
}
});
query your events
for replaying your events or for rebuilding a viewmodel or just for fun...
skip, limit always optional
var skip = 0,
limit = 100;
es.getEvents(skip, limit, function(err, evts) {
});
es.getEvents('streamId', skip, limit, function(err, evts) {
});
es.getEvents({
context: 'hr',
aggregate: 'person',
aggregateId: 'uuid'
}, skip, limit, function(err, evts) {
});
by revision
revMin, revMax always optional
var revMin = 5,
revMax = 8;
es.getEventsByRevision('streamId', revMin, revMax, function(err, evts) {});
es.getEventsByRevision({
aggregateId: 'myAggregateId',
aggregate: 'person',
context: 'hr'
}, revMin, revMax, function(err, evts) {});
by commitStamp
skip, limit always optional
var skip = 0,
limit = 100;
es.getEventsSince(new Date(2015, 5, 23), skip, limit, function(err, evts) {
});
es.getEventsSince(new Date(2015, 5, 23), limit, function(err, evts) {
});
es.getEventsSince(new Date(2015, 5, 23), function(err, evts) {
});
streaming your events
Some databases support streaming your events, the api is similar to the query one
skip, limit always optional
var skip = 0,
limit = 100;
var stream = es.streamEvents(skip, limit);
var stream = es.streamEvents('streamId', skip, limit);
var stream = es.streamEventsSince(new Date(2015, 5, 23), skip, limit);
var stream = es.streamEventsByRevision({
aggregateId: 'myAggregateId',
aggregate: 'person',
context: 'hr',
});
stream.on('data', function(e) {
doSomethingWithEvent(e);
});
stream.on('end', function() {
console.log('no more evets');
});
stream.pipe(myWritableStream);
currently supported by:
- mongodb
get the last event
for example to obtain the last revision nr
es.getLastEvent('streamId', function(err, evt) {
});
es.getLastEvent({
context: 'hr',
aggregate: 'person',
aggregateId: 'uuid'
} function(err, evt) {
});
obtain a new id
es.getNewId(function(err, newId) {
if(err) {
console.log('ohhh :-(');
return;
}
console.log('the new id is: ' + newId);
});
position of event in store
some db implementations support writing the position of the event in the whole store additional to the streamRevision.
currently those implementations support this:
- inmemory ( by setting ```trackPosition`` option )
- mongodb ( by setting
positionsCollectionName
option)
special scaling handling with mongodb
Inserting multiple events (documents) in mongodb, is not atomic.
For the eventstore tries to repair itself when calling getEventsByRevision
.
But if you want you can trigger this from outside:
es.store.getPendingTransactions(function(err, txs) {
if(err) {
console.log('ohhh :-(');
return;
}
es.store.getLastEvent({
aggregateId: txs[0].aggregateId,
aggregate: txs[0].aggregate,
context: txs[0].context
}, function (err, lastEvent) {
if(err) {
console.log('ohhh :-(');
return;
}
es.store.repairFailedTransaction(lastEvent, function (err) {
if(err) {
console.log('ohhh :-(');
return;
}
console.log('everything is fine');
});
});
});
Catch before and after eventstore events
Optionally the eventstore can emit brefore and after events, to enable this feature set the emitStoreEvents
to true.
var eventstore = require('eventstore');
var es = eventstore({
emitStoreEvents: true,
});
es.on('before-clear', function({milliseconds}) {});
es.on('after-clear', function({milliseconds}) {});
es.on('before-get-next-positions', function({milliseconds, arguments: [positions]}) {});
es.on('after-get-next-positions', function({milliseconds, arguments: [positions]}) {});
es.on('before-add-events', function({milliseconds, arguments: [events]}) {});
es.on('after-add-events', function(milliseconds, arguments: [events]) {});
es.on('before-get-events', function({milliseconds, arguments: [query, skip, limit]}) {});
es.on('after-get-events', function({milliseconds, arguments: [query, skip, limit]}) {});
es.on('before-get-events-since', function({milliseconds, arguments: [milliseconds, date, skip, limit]}) {});
es.on('after-get-events-since', function({milliseconds, arguments: [date, skip, limit]}) {});
es.on('before-get-events-by-revision', function({milliseconds, arguments: [query, revMin, revMax]}) {});
es.on('after-get-events-by-revision', function({milliseconds, arguments, [query, revMin, revMax]}) {});
es.on('before-get-last-event', function({milliseconds, arguments: [query]}) {});
es.on('after-get-last-event', function({milliseconds, arguments: [query]}) {});
es.on('before-get-undispatched-events', function({milliseconds, arguments: [query]}) {});
es.on('after-get-undispatched-events', function({milliseconds, arguments: [query]}) {});
es.on('before-set-event-to-dispatched', function({milliseconds, arguments: [id]}) {});
es.on('after-set-event-to-dispatched', function({milliseconds, arguments: [id]}) {});
es.on('before-add-snapshot', function({milliseconds, arguments: [snap]}) {});
es.on('after-add-snapshot', function({milliseconds, arguments: [snap]}) {});
es.on('before-clean-snapshots', function({milliseconds, arguments: [query]}) {});
es.on('after-clean-snapshots', function({milliseconds, arguments: [query]}) {});
es.on('before-get-snapshot', function({milliseconds, arguments: [query, revMax]}) {});
es.on('after-get-snapshot', function({milliseconds, arguments: [query, revMax]}) {});
es.on('before-remove-transactions', function({milliseconds}, arguments: [event]) {});
es.on('after-remove-transactions', function({milliseconds}, arguments: [event]) {});
es.on('before-get-pending-transactions', function({milliseconds}) {});
es.on('after-get-pending-transactions', function({milliseconds}) {});
es.on('before-repair-failed-transactions', function({milliseconds, arguments: [lastEvt]}) {});
es.on('after-repair-failed-transactions', function({milliseconds, arguments: [lastEvt]}) {});
es.on('before-remove-tables', function({milliseconds}) {});
es.on('after-remove-tables', function({milliseconds}) {});
es.on('before-stream-events', function({milliseconds, arguments: [query, skip, limit]}) {});
es.on('after-stream-events', function({milliseconds, arguments: [query, skip, limit]}) {});
es.on('before-stream-events-since', function({milliseconds, arguments: [date, skip, limit]}) {});
es.on('after-stream-events-since', function({milliseconds, arguments: [date, skip, limit]}) {});
es.on('before-get-event-stream', function({milliseconds, arguments: [query, revMin, revMax]}) {});
es.on('after-get-event-stream', function({milliseconds, arguments: [query, revMin, revMax]}) {});
es.on('before-get-from-snapshot', function({milliseconds, arguments: [query, revMax]}) {});
es.on('after-get-from-snapshot', function({milliseconds, arguments: [query, revMax]}) {});
es.on('before-create-snapshot', function({milliseconds, arguments: [obj]}) {});
es.on('after-create-snapshot', function({milliseconds, arguments: [obj]}) {});
es.on('before-commit', function({milliseconds, arguments: [eventstream]}) {});
es.on('after-commit', function({milliseconds, arguments: [eventstream]}) {});
es.on('before-get-last-event-as-stream', function({milliseconds, arguments: [query]}) {});
es.on('after-get-last-event-as-stream', function({milliseconds, arguments: [query]}) {});
Sample Integration
- nodeCQRS A CQRS sample integrating eventstore
Inspiration
Database Support
Currently these databases are supported:
- inmemory
- mongodb (node-mongodb-native)
- redis (redis)
- tingodb (tingodb)
- azuretable (azure-storage)
- dynamodb (aws-sdk)
own db implementation
You can use your own db implementation by extending this...
var Store = require('eventstore').Store,
util = require('util'),
_ = require('lodash');
function MyDB(options) {
options = options || {};
Store.call(this, options);
}
util.inherits(MyDB, Store);
_.extend(MyDB.prototype, {
});
module.exports = MyDB;
and you can use it in this way
var es = require('eventstore')({
type: MyDB
});
License
Copyright (c) 2018 Adriano Raiano, Jan Muehlemann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.