Comparing version 0.0.6 to 0.1.0
// **N3Lexer** tokenizes N3 documents. | ||
// ## Regular expressions | ||
var patterns = { | ||
// These token expressions were taken from the [context-free grammar in N3](http://www.w3.org/2000/10/swap/grammar/n3.n3). | ||
_explicituri: /^<([^>]*)>/, | ||
_string: /^"[^"\\]*(?:\\.[^"\\]*)*"(?=[^"\\])/, | ||
_tripleQuotedString: /^""("[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*")""/, | ||
_explicituri: /^<((?:[^> \\]|\\[uU])*)>/, | ||
_string: /^"[^"\\]*(?:\\.[^"\\]*)*"(?=[^"\\])|^'[^'\\]*(?:\\.[^'\\]*)*'(?=[^'\\])/, | ||
_tripleQuotedString: /^""("[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*")""|^''('[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*')''/, | ||
_langcode: /^@([a-z]+(?:-[a-z0-9]+)*)(?=[^a-z0-9\-])/i, | ||
_prefix: /^([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:(?=\s)/, | ||
_qname: /^([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:((?:[A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?)(?=[\s\.;,])/, | ||
_number: /^(?:(-)|\+)?(\d+\.\d*|\.\d+|\d+)([eE](?:[\-\+])?\d+)?(?=\s*[,\.])/, | ||
_prefix: /^((?:[A-Za-zÀ-ÖØ-öø-˿Ͱ-ͽ\u037f-\u1fff\u200c-\u200d⁰-\u218fⰀ-\u2fef、-\ud7ff豈-\ufdcfﷰ-�]|[\ud800-\udb7f][\udc00-\udfff])(?:[\.\-0-9A-Z_a-z·À-ÖØ-öø-ͽ\u037f-\u1fff\u200c-\u200d‿-⁀⁰-\u218fⰀ-\u2fef、-\ud7ff豈-\ufdcfﷰ-�]|[\ud800-\udb7f][\udc00-\udfff])*)?:(?=\s)/, | ||
_qname: /^((?:[A-Z_a-zÀ-ÖØ-öø-˿Ͱ-ͽ\u037f-\u1fff\u200c-\u200d⁰-\u218fⰀ-\u2fef、-\ud7ff豈-\ufdcfﷰ-�]|[\ud800-\udb7f][\udc00-\udfff])(?:[\.\-0-9A-Z_a-z·À-ÖØ-öø-ͽ\u037f-\u1fff\u200c-\u200d‿-⁀⁰-\u218fⰀ-\u2fef、-\ud7ff豈-\ufdcfﷰ-�]|[\ud800-\udb7f][\udc00-\udfff])*)?:((?:(?:[0-:A-Z_a-z·À-ÖØ-öø-ͽ\u037f-\u1fff\u200c-\u200d‿-⁀⁰-\u218fⰀ-\u2fef、-\ud7ff豈-\ufdcfﷰ-�]|[\ud800-\udb7f][\udc00-\udfff]|%[0-9a-fA-F]{2}|\\[!#-\/;=?\-@_~])(?:(?:[\.\-0-:A-Z_a-z·À-ÖØ-öø-ͽ\u037f-\u1fff\u200c-\u200d‿-⁀⁰-\u218fⰀ-\u2fef、-\ud7ff豈-\ufdcfﷰ-�]|[\ud800-\udb7f][\udc00-\udfff]|%[0-9a-fA-F]{2}|\\[!#-\/;=?\-@_~])*(?:[\-0-:A-Z_a-z·À-ÖØ-öø-ͽ\u037f-\u1fff\u200c-\u200d‿-⁀⁰-\u218fⰀ-\u2fef、-\ud7ff豈-\ufdcfﷰ-�]|[\ud800-\udb7f][\udc00-\udfff]|%[0-9a-fA-F]{2}|\\[!#-\/;=?\-@_~]))?)?)(?=[\s\.;,)])/, | ||
_number: /^[\-+]?(?:\d+\.?\d*([eE](?:[\-\+])?\d+)|\d+\.\d+|\.\d+|\d+)(?=\s*[\s\.;,)])/, | ||
_boolean: /^(?:true|false)(?=\s+)/, | ||
// The lexer needs these other token expressions as well. | ||
_punctuation: /^\.(?!\d)|^;|^,|^\[|^\]|^\(|^\)/, // If a digit follows a dot, it is a number, not punctuation. | ||
_fastString: /^"[^"\\]+"(?=[^"\\])/, | ||
_keyword: /^@[a-z]+(?=\s)/, | ||
_type: /^\^\^(?:<([^>]*)>|([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)(?=[\s\.:,]))/, | ||
_keyword: /^(?:@[a-z]+|[Pp][Rr][Ee][Ff][Ii][Xx]|[Bb][Aa][Ss][Ee])(?=\s)/, | ||
_type: /^\^\^(?:<([^>]*)>|([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)?:([A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd][\-0-9A-Z_a-z\u00b7\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u037d\u037f-\u1fff\u200c-\u200d\u203f-\u2040\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd]*)(?=[\s\.;,)]))/, | ||
_shortPredicates: /^a(?=\s+|<)/, | ||
// Whitespace, newlines, and comments are actually not specified yet in the current N3 grammar. | ||
_newline: /^[ \t]*(?:#[^\n\r]*)?(?:\r\n|\n|\r)[ \t]*/, | ||
@@ -26,4 +23,9 @@ _whitespace: /^[ \t]+|^#[^\n\r]*/, | ||
// Regular expression and replacement string to escape N3 strings. | ||
var escapeSequence = /\\(\\|'|"|n|r|t|u([0-9abcdefABCDEF]{4}))/g; | ||
var escapeReplacements = { '\\': '\\', "'": "'", '"': '"', 'n': '\n', 'r': '\r', 't': '\t' }; | ||
// Note how we catch invalid unicode sequences separately (they will trigger an error). | ||
var escapeSequence = /\\u([a-fA-F0-9]{4})|\\U([a-fA-F0-9]{8})|\\[uU]|\\(.)/g; | ||
var escapeReplacements = { '\\': '\\', "'": "'", '"': '"', | ||
'n': '\n', 'r': '\r', 't': '\t', 'f': '\f', 'b': '\b', | ||
'_': '_', '~': '~', '.': '.', '-': '-', '!': '!', '$': '$', '&': '&', | ||
'(': '(', ')': ')', '*': '*', '+': '+', ',': ',', ';': ';', '=': '=', | ||
'/': '/', '?': '?', '#': '#', '@': '@', '%': '%' }; | ||
@@ -83,2 +85,3 @@ // Different punctuation types. | ||
}; | ||
var unescaped; | ||
@@ -100,4 +103,7 @@ // Emit the EOF token if we're at the end and reading is complete. | ||
if (match = this._explicituri.exec(this._input)) { | ||
unescaped = this._unescape(match[1]); | ||
if (unescaped === null) | ||
return reportSyntaxError(this); | ||
token.type = 'explicituri'; | ||
token.value = match[1]; | ||
token.value = unescaped; | ||
} | ||
@@ -122,11 +128,18 @@ // Try to find a dot. | ||
else if (match = this._string.exec(this._input)) { | ||
unescaped = this._unescape(match[0]); | ||
if (unescaped === null) | ||
return reportSyntaxError(this); | ||
token.type = 'literal'; | ||
token.value = this._unescapeString(match[0]); | ||
token.value = unescaped.replace(/^'|'$/g, '"'); | ||
} | ||
// Try to find a string literal wrapped in a pair of triple quotes. | ||
else if (match = this._tripleQuotedString.exec(this._input)) { | ||
unescaped = match[1] || match[2]; | ||
// Count the newlines and advance line counter. | ||
this._line += unescaped.split(/\r\n|\r|\n/).length - 1; | ||
unescaped = this._unescape(unescaped); | ||
if (unescaped === null) | ||
return reportSyntaxError(this); | ||
token.type = 'literal'; | ||
// Count the newlines and advance line counter. | ||
this._line += match[1].split(/\r\n|\r|\n/).length - 1; | ||
token.value = this._unescapeString(match[1]); | ||
token.value = unescaped.replace(/^'|'$/g, '"'); | ||
} | ||
@@ -136,13 +149,4 @@ // Try to find a number. | ||
token.type = 'literal'; | ||
var value = (match[1] === '-' ? '-' + match[2] : match[2]); | ||
if (match[3]) { | ||
token.value = '"' + value + match[3].replace('+', '').replace('E', 'e') + | ||
'"^^<http://www.w3.org/2001/XMLSchema#double>'; | ||
} | ||
else { | ||
if (value.match(/^-?\d+$/)) | ||
token.value = '"' + parseInt(value, 10) + '"^^<http://www.w3.org/2001/XMLSchema#integer>'; | ||
else | ||
token.value = '"' + value + '"^^<http://www.w3.org/2001/XMLSchema#decimal>'; | ||
} | ||
token.value = '"' + match[0] + '"^^<http://www.w3.org/2001/XMLSchema#' + | ||
(match[1] ? 'double>' : (/^[+-]?\d+$/.test(match[0]) ? 'integer>' : 'decimal>')); | ||
} | ||
@@ -167,6 +171,8 @@ // Try to match a boolean. | ||
else if (match = this._keyword.exec(this._input)) { | ||
token.type = match[0]; | ||
var keyword = match[0]; | ||
token.type = keyword[0] === '@' ? keyword : keyword.toUpperCase(); | ||
} | ||
// Try to find a prefix. | ||
else if (this._prevTokenType === '@prefix' && (match = this._prefix.exec(this._input))) { | ||
else if ((this._prevTokenType === '@prefix' || this._prevTokenType === 'PREFIX') && | ||
(match = this._prefix.exec(this._input))) { | ||
token.type = 'prefix'; | ||
@@ -177,5 +183,8 @@ token.value = match[1] || ''; | ||
else if (match = this._qname.exec(this._input)) { | ||
unescaped = this._unescape(match[2]); | ||
if (unescaped === null) | ||
return reportSyntaxError(this); | ||
token.type = 'qname'; | ||
token.prefix = match[1] || ''; | ||
token.value = match[2]; | ||
token.value = unescaped; | ||
} | ||
@@ -191,7 +200,4 @@ // Try to find an abbreviated predicate. | ||
// Otherwise, a syntax error has occurred in the input. | ||
if (this._inputComplete) { | ||
match = this._nonwhitespace.exec(this._input); | ||
delete this._input; | ||
callback('Syntax error: unexpected "' + match[0] + '" on line ' + this._line + '.'); | ||
} | ||
if (this._inputComplete) | ||
reportSyntaxError(this); | ||
return false; | ||
@@ -209,9 +215,42 @@ } | ||
return true; | ||
function reportSyntaxError(self) { | ||
match = self._nonwhitespace.exec(self._input); | ||
delete self._input; | ||
callback('Syntax error: unexpected "' + match[0] + '" on line ' + self._line + '.'); | ||
return false; | ||
} | ||
}, | ||
// ### `unescapeString` replaces escape codes in N3 strings by the corresponding characters. | ||
_unescapeString: function (string) { | ||
return string.replace(escapeSequence, function (sequence, code, hexCode) { | ||
return hexCode ? String.fromCharCode(parseInt(hexCode, 16)) : escapeReplacements[code]; | ||
}); | ||
// ### `unescape` replaces N3 escape codes by their corresponding characters. | ||
_unescape: function (item) { | ||
try { | ||
return item.replace(escapeSequence, function (sequence, unicode4, unicode8, escapedChar) { | ||
var charCode; | ||
if (unicode4) { | ||
charCode = parseInt(unicode4, 16); | ||
if (isNaN(charCode)) | ||
throw "invalid character code"; | ||
return String.fromCharCode(charCode); | ||
} | ||
else if (unicode8) { | ||
charCode = parseInt(unicode8, 16); | ||
if (isNaN(charCode)) | ||
throw "invalid character code"; | ||
if (charCode < 0xFFFF) | ||
return String.fromCharCode(charCode); | ||
return String.fromCharCode(Math.floor((charCode - 0x10000) / 0x400) + 0xD800) + | ||
String.fromCharCode((charCode - 0x10000) % 0x400 + 0xDC00); | ||
} | ||
else { | ||
var replacement = escapeReplacements[escapedChar]; | ||
if (!replacement) | ||
throw "invalid escape sequence"; | ||
return replacement; | ||
} | ||
}); | ||
} | ||
catch (error) { | ||
return null; | ||
} | ||
}, | ||
@@ -218,0 +257,0 @@ |
@@ -14,3 +14,3 @@ // **N3Parser** parses N3 documents. | ||
var undefined; | ||
var _undefined; | ||
@@ -57,6 +57,14 @@ // ## Constructor | ||
case '@prefix': | ||
this._sparqlStyle = false; | ||
return this._readPrefix; | ||
case 'PREFIX': | ||
this._sparqlStyle = true; | ||
return this._readPrefix; | ||
// It could be a base declaration. | ||
case '@base': | ||
this._sparqlStyle = false; | ||
return this._readBaseURI; | ||
case 'BASE': | ||
this._sparqlStyle = true; | ||
return this._readBaseURI; | ||
// Otherwise, the next token must be a subject. | ||
@@ -84,3 +92,3 @@ default: | ||
var prefix = this._prefixes[token.prefix]; | ||
if (prefix === undefined) | ||
if (prefix === _undefined) | ||
return this._error('Undefined prefix "' + token.prefix + ':"', token); | ||
@@ -103,2 +111,3 @@ this._subject = prefix + token.value; | ||
} | ||
this._subjectHasPredicate = false; | ||
// The next token must be a predicate. | ||
@@ -120,8 +129,7 @@ return this._readPredicate; | ||
if (token.prefix === '_') { | ||
this._predicate = this._blankNodes[token.value] || | ||
(this._blankNodes[token.value] = '_:b' + this._blankNodeCount++); | ||
return this._error('Disallowed blank node as predicate', token); | ||
} | ||
else { | ||
var prefix = this._prefixes[token.prefix]; | ||
if (prefix === undefined) | ||
if (prefix === _undefined) | ||
return this._error('Undefined prefix "' + token.prefix + ':"', token); | ||
@@ -132,7 +140,17 @@ this._predicate = prefix + token.value; | ||
case 'bracketclose': | ||
// Expected punctuation didn't come, must have been trailing semicolon. | ||
// Expected predicate didn't come, must have been trailing semicolon. | ||
return this._readBlankNodeTail(token, true); | ||
case 'dot': | ||
// A dot is not allowed if the subject did not have a predicate yet | ||
if (!this._subjectHasPredicate) | ||
return this._error('Unexpected dot', token); | ||
// Expected predicate didn't come, must have been trailing semicolon. | ||
return this._readPunctuation(token, true); | ||
case 'semicolon': | ||
// Extra semicolons can be safely ignored | ||
return this._readPredicate; | ||
default: | ||
return this._error('Expected predicate to follow "' + this._subject + '"', token); | ||
} | ||
this._subjectHasPredicate = true; | ||
// The next token must be an object. | ||
@@ -158,3 +176,3 @@ return this._readObject; | ||
var prefix = this._prefixes[token.prefix]; | ||
if (prefix === undefined) | ||
if (prefix === _undefined) | ||
return this._error('Undefined prefix "' + token.prefix + ':"', token); | ||
@@ -228,3 +246,3 @@ this._object = prefix + token.value; | ||
var prefix = this._prefixes[token.prefix]; | ||
if (prefix === undefined) | ||
if (prefix === _undefined) | ||
return this._error('Undefined prefix "' + token.prefix + ':"', token); | ||
@@ -263,3 +281,3 @@ value = prefix + token.value; | ||
var prefix = this._prefixes[token.prefix]; | ||
if (prefix === undefined) | ||
if (prefix === _undefined) | ||
return this._error('Undefined prefix "' + token.prefix + ':"', token); | ||
@@ -354,3 +372,3 @@ item = prefix + token.value; | ||
// ### `_readPunctuation` reads punctuation between triples or triple parts. | ||
_readPunctuation: function (token) { | ||
_readPunctuation: function (token, empty) { | ||
var next; | ||
@@ -374,6 +392,7 @@ switch (token.type) { | ||
// A triple has been completed now, so return it. | ||
this._callback(null, { subject: this._subject, | ||
predicate: this._predicate, | ||
object: this._object, | ||
context: 'n3/contexts#default' }); | ||
if (!empty) | ||
this._callback(null, { subject: this._subject, | ||
predicate: this._predicate, | ||
object: this._object, | ||
context: 'n3/contexts#default' }); | ||
return next; | ||
@@ -417,2 +436,6 @@ }, | ||
_readDeclarationPunctuation: function (token) { | ||
// SPARQL-style declarations don't have punctuation. | ||
if (this._sparqlStyle) | ||
return this._readInTopContext(token); | ||
if (token.type !== 'dot') | ||
@@ -449,3 +472,2 @@ return this._error('Expected declaration of to end with a dot', token); | ||
this._prefixes = Object.create(null); | ||
this._prefixes[''] = this._documentURI ? this._documentURI + '#' : '#'; | ||
// Set the triple callback. | ||
@@ -458,3 +480,3 @@ this._callback = callback; | ||
this._lexer.tokenize(input, function (error, token) { | ||
if (self._readCallback !== undefined) { | ||
if (self._readCallback !== _undefined) { | ||
if (error !== null) | ||
@@ -461,0 +483,0 @@ self._callback(error); |
{ | ||
"name": "n3", | ||
"version": "0.0.6", | ||
"description": "Notation3 (N3) and RDF library.", | ||
"version": "0.1.0", | ||
"description": "Lightning fast, asynchronous, streaming Turtle / N3 / RDF library.", | ||
"author": "Ruben Verborgh <ruben.verborgh@gmail.com>", | ||
"keywords": ["n3", "notation3", "turtle", "rdf"], | ||
"keywords": ["turtle", "rdf", "n3", "streaming", "asynchronous"], | ||
"main": "./n3.js", | ||
@@ -12,9 +12,10 @@ "engines": { | ||
"devDependencies": { | ||
"vows": "~0.6.0", | ||
"async": "~0.1.22", | ||
"chai": "~1.4.2", | ||
"chai-things": "~0.1.1", | ||
"colors": "~0.6.0", | ||
"docco": "~0.3.0", | ||
"jshint": "1.1.x", | ||
"request": "~2.9.203", | ||
"colors": "~0.6.0", | ||
"async": "~0.1.22" | ||
"vows": "~0.6.0" | ||
}, | ||
@@ -21,0 +22,0 @@ "scripts": { |
@@ -1,2 +0,2 @@ | ||
# Node-n3 is a Notation3 and RDF library for node.js. | ||
# Lightning fast, asynchronous, streaming Turtle / N3 / RDF | ||
@@ -7,14 +7,22 @@ [**Notation3 or N3**](http://www.w3.org/TeamSubmission/n3/) is a superset of [RDF](http://www.w3.org/TR/rdf-primer/), the [Semantic Web](http://www.w3.org/2001/sw/) language. | ||
Currently implemented: | ||
- streaming Turtle parser, [fully compliant](https://github.com/RubenVerborgh/node-n3/tree/master/spec) with the [latest candidate recommendation](http://www.w3.org/TR/turtle/) | ||
- high-performance N3 store | ||
- streaming Turtle parser | ||
[**Bringing reasoning to the Web**](http://reasoning.restdesc.org/) is the initiative with several open source projects (such as this one) that make N3 reasoning accessible. | ||
## Parsing Turtle | ||
# Use the node-n3 library. | ||
The node-n3 library features a streaming Turtle parser, | ||
processing Turtle documents as they grow. | ||
## Installation | ||
You can install the n3 library as an [npm](http://npmjs.org/) package. | ||
``` bash | ||
$ npm install n3 | ||
``` js | ||
var parser = new n3.Parser(); | ||
parser.parse('@prefix c: <http://example.org/cartoons#>.\n' + | ||
'c:Tom a c:Cat.\n' + | ||
'c:Jerry a c:Mouse;\n' + | ||
' c:smarterThan c:Tom.', | ||
function (error, triple) { | ||
if (triple) | ||
console.log(triple.subject, triple.predicate, triple.object, '.'); | ||
else | ||
console.log("# That's it, folks!") | ||
}); | ||
``` | ||
@@ -39,23 +47,11 @@ | ||
# Parsing Turtle | ||
## Installation | ||
You can install the n3 library as an [npm](http://npmjs.org/) package. | ||
The node-n3 library features a streaming Turtle parser, | ||
processing Turtle documents as they grow. | ||
``` js | ||
var parser = new n3.Parser(); | ||
parser.parse('@prefix c: <http://example.org/cartoons#>.\n' + | ||
'c:Tom a c:Cat.\n' + | ||
'c:Jerry a c:Mouse;\n' + | ||
' c:smarterThan c:Tom.', | ||
function (error, triple) { | ||
if (triple) | ||
console.log(triple.subject, triple.predicate, triple.object, '.'); | ||
else | ||
console.log("# That's it, folks!") | ||
}); | ||
``` bash | ||
$ npm install n3 | ||
``` | ||
# Learn more. | ||
# Learn more | ||
The [Bringing reasoning to the Web](http://reasoning.restdesc.org/) page explains the origins of this project and provides pointers to related resources. |
@@ -42,2 +42,12 @@ var N3Lexer = require('../lib/n3lexer.js'); | ||
'should tokenize an explicituri with four-digit unicode characters': | ||
shouldTokenize('<http://a.example/\\u0073>', | ||
{ type: 'explicituri', value: 'http://a.example/s', line: 1 }, | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize an explicituri with eight-digit unicode characters': | ||
shouldTokenize('<http://a.example/\\U00000073>', | ||
{ type: 'explicituri', value: 'http://a.example/s', line: 1 }, | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize two explicituris separated by whitespace': | ||
@@ -88,3 +98,3 @@ shouldTokenize(' \n\t<http://ex.org/?bla#foo> \n\t<http://ex.org/?bla#bar> \n\t', | ||
'should tokenize a string with escape characters': | ||
shouldTokenize('"\\\\ \\\' \\" \\n \\r \\t \\ua1b2" \n """\\\\ \\\' \\" \\n \\r \\t \\ua1b2"""', | ||
shouldTokenize('"\\\\ \\\' \\" \\n \\r \\t \\ua1b2" \n """\\\\ \\\' \\" \\n \\r \\t \\U0000a1b2"""', | ||
{ type: 'literal', value: '"\\ \' " \n \r \t \ua1b2"', line: 1 }, | ||
@@ -105,6 +115,45 @@ { type: 'literal', value: '"\\ \' " \n \r \t \ua1b2"', line: 2 }, | ||
'should tokenize a quoted string literal with type': | ||
shouldTokenize('"string"^^<type> "string"^^ns:mytype ', | ||
shouldTokenize('"stringA"^^<type> "stringB"^^ns:mytype ', | ||
{ type: 'literal', value: '"stringA"', line: 1 }, | ||
{ type: 'type', value: 'type', line: 1 }, | ||
{ type: 'literal', value: '"stringB"', line: 1 }, | ||
{ type: 'type', value: 'mytype', prefix: 'ns', line: 1 }, | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a single-quoted string literal': | ||
shouldTokenize("'string' ", | ||
{ type: 'literal', value: '"string"', line: 1 }, | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a triple single-quoted string literal': | ||
shouldTokenize("'''string'''", | ||
{ type: 'literal', value: '"string"', line: 1 }, | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a triple single-quoted string literal with quotes newlines inside': | ||
shouldTokenize("'''st'r\ni''ng'''", | ||
{ type: 'literal', value: '"st\'r\ni\'\'ng"', line: 1 }, | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize a single-quoted string with escape characters': | ||
shouldTokenize("'\\\\ \\\" \\' \\n \\r \\t \\ua1b2' \n '''\\\\ \\\" \\' \\n \\r \\t \\U0000a1b2'''", | ||
{ type: 'literal', value: '"\\ " \' \n \r \t \ua1b2"', line: 1 }, | ||
{ type: 'literal', value: '"\\ " \' \n \r \t \ua1b2"', line: 2 }, | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize a single-quoted string literal with language code': | ||
shouldTokenize("'string'@en 'string'@nl-be 'string'@EN ", | ||
{ type: 'literal', value: '"string"', line: 1 }, | ||
{ type: 'langcode', value: 'en', line: 1 }, | ||
{ type: 'literal', value: '"string"', line: 1 }, | ||
{ type: 'langcode', value: 'nl-be', line: 1 }, | ||
{ type: 'literal', value: '"string"', line: 1 }, | ||
{ type: 'langcode', value: 'EN', line: 1 }, | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize a single-quoted string literal with type': | ||
shouldTokenize("'stringA'^^<type> 'stringB'^^ns:mytype ", | ||
{ type: 'literal', value: '"stringA"', line: 1 }, | ||
{ type: 'type', value: 'type', line: 1 }, | ||
{ type: 'literal', value: '"string"', line: 1 }, | ||
{ type: 'literal', value: '"stringB"', line: 1 }, | ||
{ type: 'type', value: 'mytype', prefix: 'ns', line: 1 }, | ||
@@ -116,7 +165,7 @@ { type: 'eof', line: 1 }), | ||
{ type: 'literal', value: '"10"^^<http://www.w3.org/2001/XMLSchema#integer>', line: 1 }, | ||
{ type: 'comma', line: 1}, | ||
{ type: 'literal', value: '"20"^^<http://www.w3.org/2001/XMLSchema#integer>', line: 1 }, | ||
{ type: 'dot', line: 1}, | ||
{ type: 'comma', line: 1 }, | ||
{ type: 'literal', value: '"+20"^^<http://www.w3.org/2001/XMLSchema#integer>', line: 1 }, | ||
{ type: 'dot', line: 1 }, | ||
{ type: 'literal', value: '"-30"^^<http://www.w3.org/2001/XMLSchema#integer>', line: 1 }, | ||
{ type: 'comma', line: 1}, | ||
{ type: 'comma', line: 1 }, | ||
{ type: 'literal', value: '"40"^^<http://www.w3.org/2001/XMLSchema#integer>', line: 1 }, | ||
@@ -127,11 +176,11 @@ { type: 'dot', line: 1 }, | ||
'should tokenize a decimal literal': | ||
shouldTokenize('1.. 2.0, .3. -0.4, -.5. ', | ||
{ type: 'literal', value: '"1."^^<http://www.w3.org/2001/XMLSchema#decimal>', line: 1 }, | ||
{ type: 'dot', line: 1}, | ||
shouldTokenize('1. 2.0, .3. -0.4, -.5. ', | ||
{ type: 'literal', value: '"1"^^<http://www.w3.org/2001/XMLSchema#integer>', line: 1 }, | ||
{ type: 'dot', line: 1 }, | ||
{ type: 'literal', value: '"2.0"^^<http://www.w3.org/2001/XMLSchema#decimal>', line: 1 }, | ||
{ type: 'comma', line: 1}, | ||
{ type: 'comma', line: 1 }, | ||
{ type: 'literal', value: '".3"^^<http://www.w3.org/2001/XMLSchema#decimal>', line: 1 }, | ||
{ type: 'dot', line: 1}, | ||
{ type: 'dot', line: 1 }, | ||
{ type: 'literal', value: '"-0.4"^^<http://www.w3.org/2001/XMLSchema#decimal>', line: 1 }, | ||
{ type: 'comma', line: 1}, | ||
{ type: 'comma', line: 1 }, | ||
{ type: 'literal', value: '"-.5"^^<http://www.w3.org/2001/XMLSchema#decimal>', line: 1 }, | ||
@@ -145,3 +194,3 @@ { type: 'dot', line: 1 }, | ||
{ type: 'comma', line: 1}, | ||
{ type: 'literal', value: '"30.40e50"^^<http://www.w3.org/2001/XMLSchema#double>', line: 1 }, | ||
{ type: 'literal', value: '"+30.40E+50"^^<http://www.w3.org/2001/XMLSchema#double>', line: 1 }, | ||
{ type: 'dot', line: 1}, | ||
@@ -219,3 +268,3 @@ { type: 'literal', value: '"-60.70e-80"^^<http://www.w3.org/2001/XMLSchema#double>', line: 1 }, | ||
'should tokenize prefix declarations': | ||
'should tokenize @prefix declarations': | ||
shouldTokenize('@prefix : <http://uri.org/#>.\n@prefix abc: <http://uri.org/#>.', | ||
@@ -232,2 +281,30 @@ { type: '@prefix', line: 1 }, | ||
'should tokenize @base declarations': | ||
shouldTokenize('@base <http://uri.org/#>.\n@base <http://uri.org/#>.', | ||
{ type: '@base', line: 1 }, | ||
{ type: 'explicituri', value: 'http://uri.org/#', line: 1 }, | ||
{ type: 'dot', line: 1 }, | ||
{ type: '@base', line: 2 }, | ||
{ type: 'explicituri', value: 'http://uri.org/#', line: 2 }, | ||
{ type: 'dot', line: 2 }, | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize PREFIX declarations': | ||
shouldTokenize('PREFIX : <http://uri.org/#>\npreFiX abc: <http://uri.org/#>', | ||
{ type: 'PREFIX', line: 1 }, | ||
{ type: 'prefix', value: '', line: 1 }, | ||
{ type: 'explicituri', value: 'http://uri.org/#', line: 1 }, | ||
{ type: 'PREFIX', line: 2 }, | ||
{ type: 'prefix', value: 'abc', line: 2 }, | ||
{ type: 'explicituri', value: 'http://uri.org/#', line: 2 }, | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize BASE declarations': | ||
shouldTokenize('BASE <http://uri.org/#>\nbAsE <http://uri.org/#>', | ||
{ type: 'BASE', line: 1 }, | ||
{ type: 'explicituri', value: 'http://uri.org/#', line: 1 }, | ||
{ type: 'BASE', line: 2 }, | ||
{ type: 'explicituri', value: 'http://uri.org/#', line: 2 }, | ||
{ type: 'eof', line: 2 }), | ||
'should tokenize qnames': | ||
@@ -264,2 +341,13 @@ shouldTokenize(':a b:c d-dd:e-ee.', | ||
'should tokenize mixed lists': | ||
shouldTokenize('<a> <b> (1 "2" :o)', | ||
{ type: 'explicituri', value: 'a', line: 1 }, | ||
{ type: 'explicituri', value: 'b', line: 1 }, | ||
{ type: 'liststart', line: 1 }, | ||
{ type: 'literal', value: '"1"^^<http://www.w3.org/2001/XMLSchema#integer>', line: 1 }, | ||
{ type: 'literal', value: '"2"', line: 1 }, | ||
{ type: 'qname', value: 'o', line: 1 }, | ||
{ type: 'listend', line: 1 }, | ||
{ type: 'eof', line: 1 }), | ||
'should tokenize the "a" predicate': | ||
@@ -266,0 +354,0 @@ shouldTokenize('<x> a <y>.', |
@@ -91,2 +91,6 @@ var N3Parser = require('../lib/n3parser.js'); | ||
'should not parse undefined empty prefix in subject': | ||
shouldNotParse(':a ', | ||
'Undefined prefix ":" at line 1.'), | ||
'should not parse undefined prefix in subject': | ||
@@ -104,2 +108,8 @@ shouldNotParse('a:a ', | ||
'should parse triples with SPARQL prefixes': | ||
shouldParse('PREFIX : <#>\n' + | ||
'PrEfIX a: <a#> ' + | ||
':x a:a a:b.', | ||
['#x', 'a#a', 'a#b']), | ||
'should parse statements with shared subjects': | ||
@@ -110,2 +120,12 @@ shouldParse('<a> <b> <c>;\n<d> <e>.', | ||
'should parse statements with shared subjects and trailing semicolon': | ||
shouldParse('<a> <b> <c>;\n<d> <e>;\n.', | ||
['a', 'b', 'c'], | ||
['a', 'd', 'e']), | ||
'should parse statements with shared subjects and multiple semicolons': | ||
shouldParse('<a> <b> <c>;;\n<d> <e>.', | ||
['a', 'b', 'c'], | ||
['a', 'd', 'e']), | ||
'should parse statements with shared subjects and predicates': | ||
@@ -117,8 +137,8 @@ shouldParse('<a> <b> <c>, <d>.', | ||
'should parse statements with named blank nodes': | ||
shouldParse('_:a _:b _:c.', | ||
['_:b0', '_:b1', '_:b2']), | ||
shouldParse('_:a <b> _:c.', | ||
['_:b0', 'b', '_:b1']), | ||
'should parse statements with empty blank nodes': | ||
shouldParse('[] _:b [].', | ||
['_:b0', '_:b1', '_:b2']), | ||
shouldParse('[] <b> [].', | ||
['_:b0', 'b', '_:b1']), | ||
@@ -262,9 +282,2 @@ 'should parse statements with unnamed blank nodes in the subject': | ||
'should resolve the colon prefix against the hash URI': | ||
shouldParse('@base <base/>.\n' + | ||
'<a> <b> <c>.\n' + | ||
':x :y :z.', | ||
['base/a', 'base/b', 'base/c'], | ||
['#x', '#y', '#z']), | ||
'should resolve URIs against @base': | ||
@@ -278,2 +291,10 @@ shouldParse('@base <http://ex.org/>.\n' + | ||
'should resolve URIs against SPARQL base': | ||
shouldParse('BASE <http://ex.org/>\n' + | ||
'<a> <b> <c>. ' + | ||
'BASE <d/> ' + | ||
'<e> <f> <g>.', | ||
['http://ex.org/a', 'http://ex.org/b', 'http://ex.org/c'], | ||
['http://ex.org/d/e', 'http://ex.org/d/f', 'http://ex.org/d/g']), | ||
'should not parse improperly nested square brackets': | ||
@@ -283,6 +304,2 @@ shouldNotParse('<a> <b> [<c> <d>]].', | ||
'should error when a predicate is not there': | ||
shouldNotParse('<a>.', | ||
'Expected predicate to follow "a" at line 1.'), | ||
'should error when an object is not there': | ||
@@ -315,9 +332,2 @@ shouldNotParse('<a> <b>.', | ||
['http://ex.org/d/h', 'http://ex.org/d/i', 'http://ex.org/d/j']), | ||
'should resolve the colon prefix against the document URI': | ||
shouldParse('@base <base/>.\n' + | ||
'<a> <b> <c>.\n' + | ||
':x :y :z.', | ||
['doc/base/a', 'doc/base/b', 'doc/base/c'], | ||
['doc/file.ttl#x', 'doc/file.ttl#y', 'doc/file.ttl#z']), | ||
} | ||
@@ -324,0 +334,0 @@ }).export(module); |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Network access
Supply chain riskThis module accesses the network.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
101869
2194
8
56
4