Comparing version 0.0.4 to 0.0.5
@@ -5,20 +5,20 @@ // **N3Lexer** tokenizes N3 documents. | ||
// These token expressions were taken from the [context-free grammar in N3](http://www.w3.org/2000/10/swap/grammar/n3.n3). | ||
_explicituri: /<([^>]*)>/g, | ||
_string: /"[^"\\]*(?:\\.[^"\\]*)*"(?=[^"\\])/g, | ||
_tripleQuotedString: /""("[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*")""/g, | ||
_langcode: /@([a-z]+(?:-[a-z0-9]+)*)(?=[^a-z0-9\-])/g, | ||
_prefix: /([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:(?=\s)/g, | ||
_qname: /([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:((?:[A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?)(?=[\s\.;,])/g, | ||
_number: /(?:(-)|\+)?(\d+\.\d*|\.\d+|\d+)([eE](?:[\-\+])?\d+)?(?=\s*[,\.])/g, | ||
_boolean: /(?:true|false)(?=\s+)/g, | ||
_explicituri: /^<([^>]*)>/, | ||
_string: /^"[^"\\]*(?:\\.[^"\\]*)*"(?=[^"\\])/, | ||
_tripleQuotedString: /^""("[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*")""/, | ||
_langcode: /^@([a-z]+(?:-[a-z0-9]+)*)(?=[^a-z0-9\-])/, | ||
_prefix: /^([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:(?=\s)/, | ||
_qname: /^([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:((?:[A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?)(?=[\s\.;,])/, | ||
_number: /^(?:(-)|\+)?(\d+\.\d*|\.\d+|\d+)([eE](?:[\-\+])?\d+)?(?=\s*[,\.])/, | ||
_boolean: /^(?:true|false)(?=\s+)/, | ||
// The lexer needs these other token expressions as well. | ||
_punctuation: /\.(?!\d)|;|,|\[|\]|\(|\)/g, // If a digit follows a dot, it is a number, not punctuation. | ||
_fastString: /"[^"\\]+"(?=[^"\\])/g, | ||
_keyword: /@[a-z]+(?=\s)/g, | ||
_type: /\^\^(?:<([^>]*)>|([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)(?=[\s\.:,]))/g, | ||
_shortPredicates: /a(?=\s+|<)/g, | ||
_punctuation: /^\.(?!\d)|^;|^,|^\[|^\]|^\(|^\)/, // If a digit follows a dot, it is a number, not punctuation. | ||
_fastString: /^"[^"\\]+"(?=[^"\\])/, | ||
_keyword: /^@[a-z]+(?=\s)/, | ||
_type: /^\^\^(?:<([^>]*)>|([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)(?=[\s\.:,]))/, | ||
_shortPredicates: /^a(?=\s+|<)/, | ||
// Whitespace, newlines, and comments are actually not specified yet in the current N3 grammar. | ||
_newline: /[ \t]*(?:#[^\n\r]*)?(?:\r\n|\n|\r)[ \t]*/g, | ||
_whitespace: /[ \t]+|#[^\n\r]*/g, | ||
_nonwhitespace: /\S*/g, | ||
_newline: /^[ \t]*(?:#[^\n\r]*)?(?:\r\n|\n|\r)[ \t]*/, | ||
_whitespace: /^[ \t]+|^#[^\n\r]*/, | ||
_nonwhitespace: /^\S*/, | ||
}; | ||
@@ -41,9 +41,9 @@ | ||
Constructor.prototype = N3Lexer.prototype; | ||
// Initialize the new `N3Lexer`. | ||
var n3Lexer = new Constructor(); | ||
// Create local instances of the regular expressions. | ||
// Local copies of the patterns perform slightly better. | ||
for (var name in patterns) | ||
n3Lexer[name] = new CachedRegExp(patterns[name].source, 'g'); | ||
n3Lexer[name] = patterns[name]; | ||
// Return the new `N3Lexer`. | ||
@@ -55,5 +55,5 @@ return n3Lexer; | ||
constructor: N3Lexer, | ||
// ## Private methods | ||
// ### `_next` fires the callback with the next token. | ||
@@ -65,15 +65,15 @@ // Returns a boolean indicating whether a token has been emitted. | ||
return false; | ||
// Count and skip newlines. | ||
var match; | ||
while (match = this._newline.execAtIndex(this._input, this._pos)) { | ||
while (match = this._newline.exec(this._input)) { | ||
this._line++; | ||
this._pos = this._newline.lastIndex; | ||
this._input = this._input.substr(match[0].length); | ||
} | ||
// Skip whitespace. | ||
if (match = this._whitespace.execAtIndex(this._input, this._pos)) { | ||
this._pos = this._whitespace.lastIndex; | ||
if (match = this._whitespace.exec(this._input)) { | ||
this._input = this._input.substr(match[0].length); | ||
} | ||
// Create token skeleton. | ||
@@ -86,5 +86,5 @@ // We initialize all possible properties as strings, so the engine uses one runtime type for all tokens. | ||
}; | ||
// Emit the EOF token if we're at the end and reading is complete. | ||
if (this._pos >= this._input.length) { | ||
if (!this._input.length) { | ||
// If we're streaming, don't emit EOF yet. | ||
@@ -102,3 +102,3 @@ if (!this._inputComplete) | ||
// Try to find an `explicituri`. | ||
if (match = this._explicituri.execAtIndex(this._input, this._pos)) { | ||
if (match = this._explicituri.exec(this._input)) { | ||
token.type = 'explicituri'; | ||
@@ -108,7 +108,7 @@ token.value = match[1]; | ||
// Try to find a dot. | ||
else if (match = this._punctuation.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._punctuation.exec(this._input)) { | ||
token.type = punctuationTypes[match[0]]; | ||
} | ||
// Try to find a language code. | ||
else if (this._prevTokenType === 'literal' && (match = this._langcode.execAtIndex(this._input, this._pos))) { | ||
else if (this._prevTokenType === 'literal' && (match = this._langcode.exec(this._input))) { | ||
token.type = 'langcode'; | ||
@@ -120,3 +120,3 @@ token.value = match[1]; | ||
// If streaming, make sure the input is long enough so we don't miss language codes or string types. | ||
else if (match = this._fastString.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._fastString.exec(this._input)) { | ||
token.type = 'literal'; | ||
@@ -126,3 +126,3 @@ token.value = match[0]; | ||
// Try to find any other string literal wrapped in a pair of quotes. | ||
else if (match = this._string.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._string.exec(this._input)) { | ||
token.type = 'literal'; | ||
@@ -132,3 +132,3 @@ token.value = this._unescapeString(match[0]); | ||
// Try to find a string literal wrapped in a pair of triple quotes. | ||
else if (match = this._tripleQuotedString.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._tripleQuotedString.exec(this._input)) { | ||
token.type = 'literal'; | ||
@@ -140,3 +140,3 @@ // Count the newlines and advance line counter. | ||
// Try to find a number. | ||
else if (match = this._number.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._number.exec(this._input)) { | ||
token.type = 'literal'; | ||
@@ -156,3 +156,3 @@ var value = (match[1] === '-' ? '-' + match[2] : match[2]); | ||
// Try to match a boolean. | ||
else if (match = this._boolean.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._boolean.exec(this._input)) { | ||
token.type = 'literal'; | ||
@@ -162,3 +162,3 @@ token.value = '"' + match[0] + '"^^<http://www.w3.org/2001/XMLSchema#boolean>'; | ||
// Try to find a type. | ||
else if (this._prevTokenType === 'literal' && (match = this._type.execAtIndex(this._input, this._pos))) { | ||
else if (this._prevTokenType === 'literal' && (match = this._type.exec(this._input))) { | ||
token.type = 'type'; | ||
@@ -174,7 +174,7 @@ if (!match[2]) { | ||
// Try to find a keyword. | ||
else if (match = this._keyword.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._keyword.exec(this._input)) { | ||
token.type = match[0]; | ||
} | ||
// Try to find a prefix. | ||
else if (this._prevTokenType === '@prefix' && (match = this._prefix.execAtIndex(this._input, this._pos))) { | ||
else if (this._prevTokenType === '@prefix' && (match = this._prefix.exec(this._input))) { | ||
token.type = 'prefix'; | ||
@@ -184,3 +184,3 @@ token.value = match[1] || ''; | ||
// Try to find a qname. | ||
else if (match = this._qname.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._qname.exec(this._input)) { | ||
token.type = 'qname'; | ||
@@ -191,3 +191,3 @@ token.prefix = match[1] || ''; | ||
// Try to find an abbreviated predicate. | ||
else if (match = this._shortPredicates.execAtIndex(this._input, this._pos)) { | ||
else if (match = this._shortPredicates.exec(this._input)) { | ||
token.type = 'abbreviation'; | ||
@@ -201,3 +201,2 @@ token.value = fullPredicates[match[0]]; | ||
if (this._inputComplete) { | ||
this._nonwhitespace.lastIndex = this._pos; | ||
match = this._nonwhitespace.exec(this._input); | ||
@@ -209,9 +208,9 @@ delete this._input; | ||
} | ||
// Save the token type for the next iteration. | ||
this._prevTokenType = token.type; | ||
// Advance to next part to tokenize. | ||
this._pos += match[0].length; | ||
this._input = this._input.substr(match[0].length); | ||
// Emit the parsed token. | ||
@@ -221,3 +220,3 @@ callback(null, token); | ||
}, | ||
// ### `unescapeString` replaces escape codes in N3 strings by the corresponding characters. | ||
@@ -229,5 +228,5 @@ _unescapeString: function (string) { | ||
}, | ||
// ## Public methods | ||
// ### `tokenize` starts the transformation of an N3 document into an array of tokens. | ||
@@ -237,5 +236,4 @@ // The input can be a string or a stream. | ||
var self = this; | ||
this._pos = 0; | ||
this._line = 1; | ||
// If the input is a string, continuously emit tokens through callback until the end. | ||
@@ -253,21 +251,11 @@ if (typeof(input) === 'string') { | ||
this._inputComplete = false; | ||
// Read strings, not buffers. | ||
input.setEncoding('utf8'); | ||
// If new data arrives… | ||
input.on('data', function (data) { | ||
// …discard already parsed data and add the new data to the buffer. | ||
if (self._pos === self._input.length) | ||
self._input = data; | ||
else | ||
self._input = self._input.substr(self._pos) + data; | ||
// Reset the position to the beginning of the new input buffer. | ||
self._pos = 0; | ||
// Clear the RegExp caches, as they were created with the old position. | ||
for (var name in patterns) | ||
self[name].clearMatchCache(); | ||
// Parse as far as we can. | ||
// …add the new data to the buffer | ||
self._input += data; | ||
// …and parse as far as we can. | ||
while (self._next(callback)) ; | ||
@@ -286,42 +274,2 @@ }); | ||
// ## CachedRegExp | ||
// `CachedRegExp` is a cached regular expression that allows exact position matching, | ||
// providing an alternative to Mozilla's RegExp sticky flag. | ||
function CachedRegExp(pattern, flags) { | ||
var regExp = new RegExp(pattern, flags); | ||
for (var name in CachedRegExp.prototype) | ||
regExp[name] = CachedRegExp.prototype[name]; | ||
return regExp; | ||
} | ||
CachedRegExp.prototype = { | ||
// ### `execAtIndex` executes a position-bound regular expression | ||
// It executes the regular expression against `input` | ||
// and returns a match only if it occurs at position `index`. | ||
// If a match occurs at a higher position, this match is cached in `lastMatch` | ||
// for reuse in subsequent calls; clearing this cache is possible with `clearMatchCache`. | ||
execAtIndex: function (input, index) { | ||
// If there is no cache, or if the position has advanced past the cached match… | ||
if (!this.lastMatch || index > this.lastMatch.index) { | ||
// …invalidate the match… | ||
this.lastMatch = null; | ||
// …and execute the regex at the specified position. | ||
this.lastIndex = index; | ||
this.lastMatch = this.exec(input); | ||
// Return the match if it is successful and starts at the specified position. | ||
if (this.lastMatch && this.lastMatch.index === index) | ||
return this.lastMatch; | ||
} | ||
// Return the cached match if it starts at the specified position. | ||
else if (this.lastMatch.index === index) { | ||
return this.lastMatch; | ||
} | ||
}, | ||
// ### `clearMatchCache` removes a possibly cached match. | ||
clearMatchCache: function () { | ||
this.lastMatch = null; | ||
} | ||
}; | ||
// ## Exports | ||
@@ -328,0 +276,0 @@ |
@@ -19,7 +19,7 @@ // **N3Parser** parses N3 documents. | ||
config = config || {}; | ||
// We use a dummy constructor to enable construction without `new`. | ||
function Constructor() {} | ||
Constructor.prototype = N3Parser.prototype; | ||
// Initialize the new `N3Parser`. | ||
@@ -39,3 +39,3 @@ var n3Parser = new Constructor(); | ||
} | ||
// Return the new `N3Parser`. | ||
@@ -47,5 +47,5 @@ return n3Parser; | ||
constructor: N3Parser, | ||
// ## Private methods | ||
// ### `_readInTopContext` reads a token when in the top context. | ||
@@ -68,3 +68,3 @@ _readInTopContext: function (token) { | ||
}, | ||
// ### `_readSubject` reads a triple's subject. | ||
@@ -107,3 +107,3 @@ _readSubject: function (token) { | ||
}, | ||
// ### `_readPredicate` reads a triple's predicate. | ||
@@ -140,3 +140,3 @@ _readPredicate: function (token) { | ||
}, | ||
// ### `_readObject` reads a triple's object. | ||
@@ -195,3 +195,3 @@ _readObject: function (token) { | ||
return this._readPunctuation(token); | ||
// Store blank node triple. | ||
@@ -203,3 +203,3 @@ if (empty !== true) | ||
context: 'n3/contexts#default' }); | ||
// Restore parent triple that contains the blank node. | ||
@@ -233,3 +233,3 @@ var triple = this._tripleStack.pop(); | ||
} | ||
this._object += '^^' + value; | ||
this._object += '^^<' + value + '>'; | ||
return this._readPunctuation; | ||
@@ -252,3 +252,3 @@ case 'langcode': | ||
next = this._readListItem; // The next function to execute. | ||
switch (token.type) { | ||
@@ -353,3 +353,3 @@ case 'explicituri': | ||
}, | ||
// ### `_readPunctuation` reads punctuation between triples or triple parts. | ||
@@ -381,3 +381,3 @@ _readPunctuation: function (token) { | ||
}, | ||
// ### `_readPrefix` reads the prefix of a prefix declaration. | ||
@@ -390,3 +390,3 @@ _readPrefix: function (token) { | ||
}, | ||
// ### `_readPrefixURI` reads the URI of a prefix declaration. | ||
@@ -404,3 +404,3 @@ _readPrefixURI: function (token) { | ||
}, | ||
// ### `_readBaseURI` reads the URI of a base declaration. | ||
@@ -417,3 +417,3 @@ _readBaseURI: function (token) { | ||
}, | ||
// ### `_readDeclarationPunctuation` reads the punctiation of a declaration. | ||
@@ -425,3 +425,3 @@ _readDeclarationPunctuation: function (token) { | ||
}, | ||
// ### `_getNextReader` gets the next reader function at the end of a triple. | ||
@@ -432,3 +432,3 @@ _getNextReader: function () { | ||
return this._readPunctuation; | ||
switch (stack[stack.length - 1].type) { | ||
@@ -441,3 +441,3 @@ case 'blank': | ||
}, | ||
// ### `_error` emits an error message through the callback. | ||
@@ -447,5 +447,5 @@ _error: function (message, token) { | ||
}, | ||
// ## Public methods | ||
// ### `parse` parses the N3 input and emits each parsed triple through the callback. | ||
@@ -452,0 +452,0 @@ parse: function (input, callback) { |
// **N3Store** objects store N3 triples with an associated context in memory. | ||
// ## Constructor | ||
function N3Store() { | ||
function N3Store(triples) { | ||
// We use a dummy constructor to enable construction without `new`. | ||
function Constructor() {} | ||
Constructor.prototype = N3Store.prototype; | ||
// Initialize the new `N3Store`. | ||
@@ -21,3 +21,7 @@ var n3Store = new Constructor(); | ||
n3Store._blankNodeIndex = 0; | ||
// Add triples if passed | ||
if (triples) | ||
n3Store.addTriples(triples); | ||
// Return the new `N3Store`. | ||
@@ -29,5 +33,5 @@ return n3Store; | ||
constructor: N3Store, | ||
// ## Public properties | ||
// `defaultContext` is the default context wherein triples are stored. | ||
@@ -37,3 +41,3 @@ get defaultContext() { | ||
}, | ||
// ### `size` returns the number of triples in the store. | ||
@@ -45,3 +49,3 @@ get size() { | ||
return size; | ||
// Calculate the number of triples by counting to the deepest level. | ||
@@ -95,3 +99,3 @@ var contexts = this._contexts, subjects, subject; | ||
key2 in index2 ? (index2 = {})[key2] = tmp[key2] : index2 = {}; | ||
// Create triples for all items found in index 2. | ||
@@ -109,5 +113,5 @@ results.push.apply(results, Object.keys(index2).map(function (value2) { | ||
}, | ||
// ## Public methods | ||
// ### `add` adds a new N3 triple to the store. | ||
@@ -129,3 +133,3 @@ add: function (subject, predicate, object, context) { | ||
} | ||
// Since entities can often be long URIs, we avoid storing them in every index. | ||
@@ -135,3 +139,3 @@ // Instead, we have a separate index that maps entities to numbers, | ||
var entities = this._entities; | ||
subject = entities[subject] || (entities[subject] = ++this._entityCount); | ||
@@ -144,10 +148,22 @@ predicate = entities[predicate] || (entities[predicate] = ++this._entityCount); | ||
this._addToIndex(contextItem.objects, object, subject, predicate); | ||
// The cached triple count is now invalid. | ||
this._size = null; | ||
// Enable method chaining. | ||
return this; | ||
}, | ||
// ### `addTriple` as a triple to the store. | ||
addTriple: function (triple) { | ||
return this.add(triple.subject, triple.predicate, triple.object, triple.context); | ||
}, | ||
// ### `addTriples` adds multiple N3 triples to the store. | ||
addTriples: function (triples) { | ||
for (var i = triples.length - 1; i >= 0; i--) | ||
this.addTriple(triples[i]); | ||
return this; | ||
}, | ||
// ### `find` finds a set of triples matching a pattern. | ||
@@ -160,3 +176,3 @@ // Setting `subject`, `predicate`, or `object` to `null` means an _anything_ wildcard. | ||
entities = this._entities; | ||
// Translate URIs to internal index keys. | ||
@@ -167,7 +183,7 @@ // Optimization: if the entity doesn't exist, no triples with it exist. | ||
if (object && !(object = entities[object])) return []; | ||
// If the specified context contain no triples, there are no results. | ||
if (!contextItem) | ||
return []; | ||
// Choose the optimal index, based on what fields are present | ||
@@ -195,3 +211,3 @@ if (subject) { | ||
}, | ||
// ### `createBlankNode` creates a new blank node, returning its name. | ||
@@ -198,0 +214,0 @@ createBlankNode: function (suggestedName) { |
{ | ||
"name": "n3", | ||
"version": "0.0.4", | ||
"version": "0.0.5", | ||
"description": "Notation3 (N3) and RDF library.", | ||
@@ -13,3 +13,4 @@ "author": "Ruben Verborgh <ruben.verborgh@gmail.com>", | ||
"vows": "~0.6.0", | ||
"should": "~0.5.1", | ||
"chai": "~1.4.2", | ||
"chai-things": "~0.1.1", | ||
"docco": "~0.3.0", | ||
@@ -25,7 +26,7 @@ "request": "~2.9.203", | ||
"type": "git", | ||
"url": "http://github.com/RubenVerborgh/node-n3.git" | ||
"url": "https://github.com/RubenVerborgh/node-n3.git" | ||
}, | ||
"bugs": { | ||
"url": "http://github.com/RubenVerborgh/node-n3/issues" | ||
"url": "https://github.com/RubenVerborgh/node-n3/issues" | ||
} | ||
} |
# Node-n3 is a Notation3 and RDF library for node.js. | ||
[**Notation3 or N3**](http://www.w3.org/TeamSubmission/n3/) is a superset of [RDF](http://www.w3.org/TR/rdf-primer/), the [Semantic Web](http://www.w3.org/2001/sw/) language. | ||
[**Notation3 or N3**](http://www.w3.org/TeamSubmission/n3/) is a superset of [RDF](http://www.w3.org/TR/rdf-primer/), the [Semantic Web](http://www.w3.org/2001/sw/) language. | ||
This library will provide a high-performance N3 store, parser, and generator (when finished). | ||
@@ -23,3 +23,3 @@ | ||
In this example below, we create a new store and add the triples `:Pluto a :Dog.` and `:Mickey a :Mouse`. | ||
In this example below, we create a new store and add the triples `:Pluto a :Dog.` and `:Mickey a :Mouse`. | ||
Then, we find a triple with `:Mickey` as subject. | ||
@@ -26,0 +26,0 @@ |
var N3Lexer = require('../lib/n3lexer.js'); | ||
var vows = require('vows'), | ||
should = require('should'), | ||
chai = require('chai'), | ||
expect = chai.expect, | ||
events = require('events'); | ||
chai.should(); | ||
chai.use(require('chai-things')); | ||
@@ -9,7 +12,7 @@ vows.describe('N3Lexer').addBatch({ | ||
topic: function () { return N3Lexer; }, | ||
'should be a function': function (N3Lexer) { | ||
N3Lexer.should.be.a('function'); | ||
}, | ||
'should make N3Lexer objects': function (N3Lexer) { | ||
@@ -19,3 +22,3 @@ N3Lexer().constructor.should.eql(N3Lexer); | ||
}, | ||
'should be an N3Lexer constructor': function (N3Lexer) { | ||
@@ -26,3 +29,3 @@ new N3Lexer().constructor.should.eql(N3Lexer); | ||
}, | ||
'An N3Lexer instance': { | ||
@@ -32,7 +35,7 @@ 'should tokenize the empty string': | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a whitespace string': | ||
shouldTokenize(' \t \n ', | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize an explicituri': | ||
@@ -42,3 +45,3 @@ shouldTokenize('<http://ex.org/?bla#foo>', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize two explicituris separated by whitespace': | ||
@@ -49,3 +52,3 @@ shouldTokenize(' \n\t<http://ex.org/?bla#foo> \n\t<http://ex.org/?bla#bar> \n\t', | ||
{ type: 'eof', line: 4 }), | ||
'should tokenize a statement with explicituris': | ||
@@ -58,3 +61,3 @@ shouldTokenize(' \n\t<http://ex.org/?bla#foo> \n\t<http://ex.org/?bla#bar> \n\t<http://ex.org/?bla#boo> .', | ||
{ type: 'eof', line: 4 }), | ||
'should correctly recognize different types of newlines': | ||
@@ -67,3 +70,3 @@ shouldTokenize('<a>\r<b>\n<c>\r\n.', | ||
{ type: 'eof', line: 4 }), | ||
'should ignore comments': | ||
@@ -75,3 +78,3 @@ shouldTokenize('<#foo> #comment\n <#foo> #comment \r# comment\n\n<#bla>#', | ||
{ type: 'eof', line: 5 }), | ||
'should tokenize a quoted string literal': | ||
@@ -81,3 +84,3 @@ shouldTokenize('"string" ', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a triple quoted string literal': | ||
@@ -87,3 +90,3 @@ shouldTokenize('"""string"""', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a triple quoted string literal with quotes newlines inside': | ||
@@ -93,3 +96,3 @@ shouldTokenize('"""st"r\ni""ng"""', | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize a string with escape characters': | ||
@@ -100,3 +103,3 @@ shouldTokenize('"\\\\ \\\' \\" \\n \\r \\t \\ua1b2" \n """\\\\ \\\' \\" \\n \\r \\t \\ua1b2"""', | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize a quoted string literal with language code': | ||
@@ -109,3 +112,3 @@ shouldTokenize('"string"@en "string"@nl-be ', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a quoted string literal with type': | ||
@@ -118,3 +121,3 @@ shouldTokenize('"string"^^<type> "string"^^ns:mytype ', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize an integer literal': | ||
@@ -131,3 +134,3 @@ shouldTokenize('10, +20. -30, 40. ', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a decimal literal': | ||
@@ -146,3 +149,3 @@ shouldTokenize('1.. 2.0, .3. -0.4, -.5. ', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a double literal': | ||
@@ -157,3 +160,3 @@ shouldTokenize('10e20, +30.40E+50. -60.70e-80. ', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize booleans': | ||
@@ -164,3 +167,3 @@ shouldTokenize('true false ', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize statements with shared subjects': | ||
@@ -176,3 +179,3 @@ shouldTokenize('<a> <b> <c>;\n<d> <e>.', | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize statements with shared subjects and predicates': | ||
@@ -187,3 +190,3 @@ shouldTokenize('<a> <b> <c>,\n<d>.', | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize statements with shared subjects and predicates and qnames': | ||
@@ -201,3 +204,3 @@ shouldTokenize('a:a b:b c:c;d:d e:e,f:f.', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize the colon qname': | ||
@@ -210,3 +213,3 @@ shouldTokenize(': : :.', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a stream': | ||
@@ -230,3 +233,3 @@ shouldTokenize(streamOf('<a>\n<b', '> ', '"""', 'c\n', '"""', '.', | ||
{ type: 'eof', line: 3 }), | ||
'should tokenize prefix declarations': | ||
@@ -243,3 +246,3 @@ shouldTokenize('@prefix : <http://uri.org/#>.\n@prefix abc: <http://uri.org/#>.', | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize qnames': | ||
@@ -252,3 +255,3 @@ shouldTokenize(':a b:c d-dd:e-ee.', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize blank nodes': | ||
@@ -263,3 +266,3 @@ shouldTokenize('[] [<a> <b>]', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize lists': | ||
@@ -277,3 +280,3 @@ shouldTokenize('() (<a>) (<a> <b>)', | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize the "a" predicate': | ||
@@ -286,3 +289,3 @@ shouldTokenize('<x> a <y>.', | ||
{ type: 'eof', line: 1 }), | ||
'should not tokenize an invalid document': | ||
@@ -297,6 +300,6 @@ shouldNotTokenize(' \n @!', 'Syntax error: unexpected "@!" on line 2.') | ||
expected = Array.prototype.slice.call(arguments, 1); | ||
function tokenCallback(error, token) { | ||
should.not.exist(error); | ||
should.exist(token); | ||
expect(error).not.to.exist; | ||
expect(token).to.exist; | ||
var expectedItem = expected[result.length]; | ||
@@ -311,3 +314,3 @@ if (expectedItem) | ||
} | ||
return { | ||
@@ -318,3 +321,3 @@ topic: function () { | ||
}, | ||
'should equal the expected value': function (result) { | ||
@@ -328,3 +331,3 @@ result.should.eql(expected); | ||
var endCallback; | ||
function tokenCallback(error, token) { | ||
@@ -336,3 +339,3 @@ if (error) | ||
} | ||
return { | ||
@@ -343,5 +346,5 @@ topic: function () { | ||
}, | ||
'should equal the expected message': function (error, token) { | ||
should.not.exist(token); | ||
expect(token).not.to.exist; | ||
error.should.eql(expectedError); | ||
@@ -355,3 +358,3 @@ } | ||
stream = new events.EventEmitter(); | ||
stream.setEncoding = function (encoding) { | ||
@@ -361,3 +364,3 @@ if (encoding === 'utf8') | ||
}; | ||
function next() { | ||
@@ -372,4 +375,4 @@ if (elements.length) { | ||
} | ||
return stream; | ||
} |
var N3Parser = require('../lib/n3parser.js'); | ||
var vows = require('vows'), | ||
should = require('should'), | ||
eql = require('../node_modules/should/lib/eql.js'), | ||
chai = require('chai'), | ||
expect = chai.expect, | ||
util = require('util'); | ||
chai.should(); | ||
chai.use(require('chai-things')); | ||
@@ -10,7 +12,7 @@ vows.describe('N3Parser').addBatch({ | ||
topic: function () { return N3Parser; }, | ||
'should be a function': function (N3Parser) { | ||
N3Parser.should.be.a('function'); | ||
}, | ||
'should make N3Parser objects': function (N3Parser) { | ||
@@ -20,3 +22,3 @@ N3Parser().constructor.should.eql(N3Parser); | ||
}, | ||
'should be an N3Parser constructor': function (N3Parser) { | ||
@@ -27,18 +29,18 @@ new N3Parser().constructor.should.eql(N3Parser); | ||
}, | ||
'An N3Parser instance': { | ||
topic: function () { return function () { return new N3Parser(); }; }, | ||
'should parse the empty string': | ||
shouldParse('' | ||
/* no triples */), | ||
'should parse a whitespace string': | ||
shouldParse(' \t \n ' | ||
/* no triples */), | ||
'should parse a single triple': | ||
shouldParse('<a> <b> <c>.', | ||
['a', 'b', 'c']), | ||
'should parse three triples': | ||
@@ -49,27 +51,27 @@ shouldParse('<a> <b> <c>.\n<d> <e> <f>.\n<g> <h> <i>.', | ||
['g', 'h', 'i']), | ||
'should parse a triple with a literal': | ||
shouldParse('<a> <b> "string".', | ||
['a', 'b', '"string"']), | ||
'should parse a triple with a numeric literal': | ||
shouldParse('<a> <b> 3.0.', | ||
['a', 'b', '"3.0"^^<http://www.w3.org/2001/XMLSchema#decimal>']), | ||
'should parse a triple with a literal and a language code': | ||
shouldParse('<a> <b> "string"@en.', | ||
['a', 'b', '"string"@en']), | ||
'should parse a triple with a literal and a URI type': | ||
shouldParse('<a> <b> "string"^^<type>.', | ||
['a', 'b', '"string"^^type']), | ||
['a', 'b', '"string"^^<type>']), | ||
'should parse a triple with a literal and a qname type': | ||
shouldParse('@prefix x: <y#>. <a> <b> "string"^^x:z.', | ||
['a', 'b', '"string"^^y#z']), | ||
['a', 'b', '"string"^^<y#z>']), | ||
'should not parse a triple with a literal and a qname type with an inexistent prefix': | ||
shouldNotParse('<a> <b> "string"^^x:z.', | ||
'Undefined prefix "x:" at line 1.'), | ||
'should parse triples with prefixes': | ||
@@ -80,3 +82,3 @@ shouldParse('@prefix : <#>.\n' + | ||
['#x', 'a#a', 'a#b']), | ||
'should parse triples with prefixes and different punctuation': | ||
@@ -89,15 +91,15 @@ shouldParse('@prefix : <#>.\n' + | ||
['#x', 'a#c', 'a#e']), | ||
'should not parse undefined prefix in subject': | ||
shouldNotParse('a:a ', | ||
'Undefined prefix "a:" at line 1.'), | ||
'should not parse undefined prefix in predicate': | ||
shouldNotParse('<a> b:c ', | ||
'Undefined prefix "b:" at line 1.'), | ||
'should not parse undefined prefix in object': | ||
shouldNotParse('<a> <b> c:d ', | ||
'Undefined prefix "c:" at line 1.'), | ||
'should parse statements with shared subjects': | ||
@@ -107,3 +109,3 @@ shouldParse('<a> <b> <c>;\n<d> <e>.', | ||
['a', 'd', 'e']), | ||
'should parse statements with shared subjects and predicates': | ||
@@ -113,11 +115,11 @@ shouldParse('<a> <b> <c>, <d>.', | ||
['a', 'b', 'd']), | ||
'should parse statements with named blank nodes': | ||
shouldParse('_:a _:b _:c.', | ||
['_:b0', '_:b1', '_:b2']), | ||
'should parse statements with empty blank nodes': | ||
shouldParse('[] _:b [].', | ||
['_:b0', '_:b1', '_:b2']), | ||
'should parse statements with unnamed blank nodes in the subject': | ||
@@ -127,3 +129,3 @@ shouldParse('[<a> <b>] <c> <d>.', | ||
['_:b0', 'a', 'b']), | ||
'should parse statements with unnamed blank nodes in the object': | ||
@@ -133,3 +135,3 @@ shouldParse('<a> <b> [<c> <d>].', | ||
['_:b0', 'c', 'd']), | ||
'should parse statements with unnamed blank nodes with a string object': | ||
@@ -139,7 +141,7 @@ shouldParse('<a> <b> [<c> "x"].', | ||
['_:b0', 'c', '"x"']), | ||
'should not parse a blank node with missing subject': | ||
shouldNotParse('<a> <b> [<c>].', | ||
'Expected object to follow "c" at line 1.'), | ||
'should parse a multi-statement blank node': | ||
@@ -150,3 +152,3 @@ shouldParse('<a> <b> [ <u> <v>; <w> <z> ].', | ||
['_:b0', 'w', 'z']), | ||
'should parse a multi-statement blank node with trailing semicolon': | ||
@@ -157,3 +159,3 @@ shouldParse('<a> <b> [ <u> <v>; <w> <z>; ].', | ||
['_:b0', 'w', 'z']), | ||
'should parse statements with nested blank nodes in the subject': | ||
@@ -164,3 +166,3 @@ shouldParse('[<a> [<x> <y>]] <c> <d>.', | ||
['_:b1', 'x', 'y']), | ||
'should parse statements with nested blank nodes in the object': | ||
@@ -171,11 +173,11 @@ shouldParse('<a> <b> [<c> [<d> <e>]].', | ||
['_:b1', 'd', 'e']), | ||
'should parse statements with an empty list in the subject': | ||
shouldParse('() <a> <b>.', | ||
['http://www.w3.org/1999/02/22-rdf-syntax-ns#nil', 'a', 'b']), | ||
'should parse statements with an empty list in the object': | ||
shouldParse('<a> <b> ().', | ||
['a', 'b', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with a single-element list in the subject': | ||
@@ -187,3 +189,3 @@ shouldParse('(<x>) <a> <b>.', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with a single-element list in the object': | ||
@@ -195,3 +197,3 @@ shouldParse('<a> <b> (<x>).', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with a multi-element list in the subject': | ||
@@ -205,3 +207,3 @@ shouldParse('(<x> <y>) <a> <b>.', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with a multi-element list in the object': | ||
@@ -215,3 +217,3 @@ shouldParse('<a> <b> (<x> <y>).', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with a list containing strings': | ||
@@ -223,3 +225,3 @@ shouldParse('("y") <a> <b>.', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with a nested empty list': | ||
@@ -234,3 +236,3 @@ shouldParse('<a> <b> (<x> ()).', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with non-empty nested lists': | ||
@@ -247,3 +249,3 @@ shouldParse('<a> <b> (<x> (<y>)).', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with a list containing a blank node': | ||
@@ -255,3 +257,3 @@ shouldParse('([]) <a> <b>.', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should parse statements with a list containing multiple blank nodes': | ||
@@ -266,3 +268,3 @@ shouldParse('([] [<x> <y>]) <a> <b>.', | ||
['_:b3', 'x', 'y']), | ||
'should parse statements with a blank node containing a list': | ||
@@ -275,3 +277,3 @@ shouldParse('[<a> (<b>)] <c> <d>.', | ||
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil']), | ||
'should resolve the colon prefix against the hash URI': | ||
@@ -283,3 +285,3 @@ shouldParse('@base <base/>.\n' + | ||
['#x', '#y', '#z']), | ||
'should resolve URIs against @base': | ||
@@ -292,15 +294,15 @@ shouldParse('@base <http://ex.org/>.\n' + | ||
['http://ex.org/d/e', 'http://ex.org/d/f', 'http://ex.org/d/g']), | ||
'should not parse improperly nested square brackets': | ||
shouldNotParse('<a> <b> [<c> <d>]].', | ||
'Expected punctuation to follow "_:b0" at line 1.'), | ||
'should error when a predicate is not there': | ||
shouldNotParse('<a>.', | ||
'Expected predicate to follow "a" at line 1.'), | ||
'should error when an object is not there': | ||
shouldNotParse('<a> <b>.', | ||
'Expected object to follow "b" at line 1.'), | ||
'should error when a dot is not there': | ||
@@ -312,3 +314,3 @@ shouldNotParse('<a> <b> <c>', | ||
topic: function () { return function () { return new N3Parser({ documentURI: 'doc/file.ttl' }); }; }, | ||
'should resolve URIs against the document URI': | ||
@@ -320,3 +322,3 @@ shouldParse('@prefix : <#>.\n' + | ||
['doc/file.ttl#e', 'doc/file.ttl#f', 'doc/file.ttl#g']), | ||
'should respect @base statements': | ||
@@ -331,3 +333,3 @@ shouldParse('<a> <b> <c>.\n' + | ||
['http://ex.org/d/h', 'http://ex.org/d/i', 'http://ex.org/d/j']), | ||
'should resolve the colon prefix against the document URI': | ||
@@ -350,5 +352,5 @@ shouldParse('@base <base/>.\n' + | ||
}); | ||
function tripleCallback(error, triple) { | ||
should.not.exist(error); | ||
expect(error).not.to.exist; | ||
if (triple) | ||
@@ -359,3 +361,3 @@ result.push(triple); | ||
} | ||
return { | ||
@@ -366,8 +368,7 @@ topic: function (n3parserFactory) { | ||
}, | ||
'should equal the expected value': function (result) { | ||
result.should.have.lengthOf(expected.length); | ||
for (var i = 0; i < items.length; i++) | ||
should(result.some(function (x) { return eql(items[i], x); }), | ||
util.inspect(result) + ' should contain ' + util.inspect(items[i])); | ||
result.should.contain.something.that.deep.equals(items[i]); | ||
} | ||
@@ -379,3 +380,3 @@ }; | ||
var endCallback; | ||
function tripleCallback(error, triple) { | ||
@@ -387,3 +388,3 @@ if (error) | ||
} | ||
return { | ||
@@ -394,5 +395,5 @@ topic: function (n3parserFactory) { | ||
}, | ||
'should equal the expected message': function (error, triple) { | ||
should.not.exist(triple); | ||
expect(triple).not.to.exist; | ||
error.should.eql(expectedError); | ||
@@ -399,0 +400,0 @@ } |
var N3Store = require('../lib/n3store.js'); | ||
var vows = require('vows'), | ||
should = require('should'), | ||
eql = require('../node_modules/should/lib/eql.js'), | ||
chai = require('chai'), | ||
util = require('util'); | ||
chai.should(); | ||
chai.use(require('chai-things')); | ||
@@ -10,7 +11,7 @@ vows.describe('N3Store').addBatch({ | ||
topic: function () { return N3Store; }, | ||
'should be a function': function (N3Store) { | ||
N3Store.should.be.a('function'); | ||
}, | ||
'should make N3Store objects': function (N3Store) { | ||
@@ -20,3 +21,3 @@ N3Store().constructor.should.eql(N3Store); | ||
}, | ||
'should be an N3Store constructor': function (N3Store) { | ||
@@ -27,18 +28,18 @@ new N3Store().constructor.should.eql(N3Store); | ||
}, | ||
'An empty N3Store': { | ||
topic: new N3Store(), | ||
'should have size 0': function (n3Store) { | ||
n3Store.size.should.eql(0); | ||
}, | ||
'should be empty': function (n3Store) { | ||
n3Store.find().should.be.empty; | ||
}, | ||
'should have a default context': function (n3Store) { | ||
n3Store.defaultContext.should.eql('n3/contexts#default'); | ||
}, | ||
'should be able to create unnamed blank nodes': function (n3Store) { | ||
@@ -51,3 +52,3 @@ n3Store.createBlankNode().should.eql('_:b0'); | ||
}, | ||
'should be able to create named blank nodes': function (n3Store) { | ||
@@ -59,3 +60,18 @@ n3Store.createBlankNode('blank').should.eql('_:blank'); | ||
}, | ||
'An N3Store with initialized with 3 elements': { | ||
topic: function () { | ||
var n3Store = new N3Store([ | ||
{ subject: 's1', predicate: 'p1', object: 'o1'}, | ||
{ subject: 's1', predicate: 'p1', object: 'o2'}, | ||
{ subject: 's1', predicate: 'p1', object: 'o3'}, | ||
]); | ||
return n3Store; | ||
}, | ||
'should have size 3': function (n3Store) { | ||
n3Store.size.should.eql(3); | ||
}, | ||
}, | ||
'An N3Store with 5 elements': { | ||
@@ -65,135 +81,137 @@ topic: function () { | ||
n3Store.add('s1', 'p1', 'o1'); | ||
n3Store.add('s1', 'p1', 'o2'); | ||
n3Store.add('s1', 'p2', 'o2'); | ||
n3Store.add('s2', 'p1', 'o1'); | ||
n3Store.addTriple({ subject: 's1', predicate: 'p1', object: 'o2'}); | ||
n3Store.addTriples([ | ||
{ subject: 's1', predicate: 'p2', object: 'o2'}, | ||
{ subject: 's2', predicate: 'p1', object: 'o1'}, | ||
]); | ||
n3Store.add('s1', 'p2', 'o3', 'c4'); | ||
return n3Store; | ||
}, | ||
'should have size 5': function (n3Store) { | ||
n3Store.size.should.eql(5); | ||
}, | ||
'when searched without parameters': { | ||
topic: function (n3Store) { return n3Store.find(); }, | ||
'should return all items in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o1'], ['s1', 'p1', 'o2'], ['s1', 'p2', 'o2'], ['s2', 'p1', 'o1']) | ||
}, | ||
'when searched with an existing subject parameter': { | ||
topic: function (n3Store) { return n3Store.find('s1', null, null); }, | ||
'should return all items with this subject in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o1'], ['s1', 'p1', 'o2'], ['s1', 'p2', 'o2']) | ||
}, | ||
'when searched with a non-existing subject parameter': { | ||
topic: function (n3Store) { return n3Store.find('s3', null, null); }, | ||
'should return no items': shouldBeEmpty() | ||
}, | ||
'when searched with an existing predicate parameter': { | ||
topic: function (n3Store) { return n3Store.find(null, 'p1', null); }, | ||
'should return all items with this predicate in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o1'], ['s1', 'p1', 'o2'], ['s2', 'p1', 'o1']) | ||
}, | ||
'when searched with a non-existing predicate parameter': { | ||
topic: function (n3Store) { return n3Store.find(null, 'p3', null); }, | ||
'should return no items': shouldBeEmpty() | ||
}, | ||
'when searched with an existing object parameter': { | ||
topic: function (n3Store) { return n3Store.find(null, null, 'o1'); }, | ||
'should return all items with this object in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o1'], ['s2', 'p1', 'o1']) | ||
}, | ||
'when searched with a non-existing object parameter': { | ||
topic: function (n3Store) { return n3Store.find(null, null, 'o4'); }, | ||
'should return no items': shouldBeEmpty() | ||
}, | ||
'when searched with existing subject and predicate parameters': { | ||
topic: function (n3Store) { return n3Store.find('s1', 'p1', null); }, | ||
'should return all items with this subject and predicate in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o1'], ['s1', 'p1', 'o2']) | ||
}, | ||
'when searched with non-existing subject and predicate parameters': { | ||
topic: function (n3Store) { return n3Store.find('s2', 'p2', null); }, | ||
'should return no items': shouldBeEmpty() | ||
}, | ||
'when searched with existing subject and object parameters': { | ||
topic: function (n3Store) { return n3Store.find('s1', null, 'o2'); }, | ||
'should return all items with this subject and object in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o2'], ['s1', 'p2', 'o2']) | ||
}, | ||
'when searched with non-existing subject and object parameters': { | ||
topic: function (n3Store) { return n3Store.find('s2', 'p2', null); }, | ||
'should return no items': shouldBeEmpty() | ||
}, | ||
'when searched with existing predicate and object parameters': { | ||
topic: function (n3Store) { return n3Store.find(null, 'p1', 'o1'); }, | ||
'should return all items with this predicate and object in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o1'], ['s2', 'p1', 'o1']) | ||
}, | ||
'when searched with non-existing predicate and object parameters': { | ||
topic: function (n3Store) { return n3Store.find(null, 'p2', 'o3'); }, | ||
'should return no items': shouldBeEmpty() | ||
}, | ||
'when searched with existing subject, predicate, and object parameters': { | ||
topic: function (n3Store) { return n3Store.find('s1', 'p1', 'o1'); }, | ||
'should return all items with this subject, predicate, and object in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o1']) | ||
}, | ||
'when searched with non-existing subject, predicate, and object parameters': { | ||
topic: function (n3Store) { return n3Store.find('s2', 'p2', 'o2'); }, | ||
'should return no items': shouldBeEmpty() | ||
}, | ||
'when searched with the default context parameter': { | ||
topic: function (n3Store) { return n3Store.find(); }, | ||
'should return all items in the default context': | ||
shouldIncludeAll(['s1', 'p1', 'o1'], ['s1', 'p1', 'o2'], ['s1', 'p2', 'o2'], ['s2', 'p1', 'o1']) | ||
}, | ||
'when searched with an existing non-default context parameter': { | ||
topic: function (n3Store) { return n3Store.find(null, null, null, 'c4'); }, | ||
'should return all items in that context': | ||
shouldIncludeAll(['s1', 'p2', 'o3', 'c4']) | ||
}, | ||
'when searched with a non-existing non-default context parameter': { | ||
topic: function (n3Store) { return n3Store.find(null, null, null, 'c5'); }, | ||
'should return no items': shouldBeEmpty() | ||
}, | ||
}, | ||
'An N3Store': { | ||
topic: new N3Store(), | ||
// Test inspired by http://www.devthought.com/2012/01/18/an-object-is-not-a-hash/. | ||
@@ -206,3 +224,3 @@ // The value `__proto__` is not supported however – fixing it introduces too much overhead. | ||
}, | ||
'should be able to contain entities named "null"': function (n3Store) { | ||
@@ -230,5 +248,4 @@ n3Store.add('null', 'null', 'null', 'null'); | ||
for (var i = 0; i < items.length; i++) | ||
should(result.some(function (x) { return eql(items[i], x); }), | ||
util.inspect(result) + ' should contain ' + util.inspect(items[i])); | ||
result.should.include.something.that.deep.equals(items[i]); | ||
}; | ||
} |
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
87216
7
1880