@graphy/content.nq.read
Advanced tools
Comparing version 4.0.0 to 4.0.1
1094
main.js
@@ -7,46 +7,160 @@ | ||
const RT_ABSOLUTE_IRI_VALID = /^[a-z][a-z0-9+\-.]*:(?:[^\0-\x20<>"{}|^`\\]|\\u[A-Fa-f0-9]{4}|\\U[A-Fa-f0-9]{8})*$/; | ||
const RT_ABSOLUTE_IRI_ESCAPELESS_VALID = /^[a-z][a-z0-9+\-.]*:[^\0-\x20<>"{}|^`]*$/; | ||
const RT_NAMED_NODE_VALID = /^([^\0-\x20<>"{}|^`\\]|\\u[A-Fa-f0-9]{4}|\\U[A-Fa-f0-9]{8})*$/; | ||
const RT_NAMED_NODE_ESCAPELESS_VALID = /^([^\0-\x20<>"{}|^`])*$/; | ||
const H_ESCAPES_JSON = { | ||
'\t': '\\t', | ||
'\u0008': '\\b', | ||
'\n': '\\n', | ||
'\r': '\\r', | ||
'\f': '\\f', | ||
'"': '\\"', | ||
}; | ||
const R_ESCAPES = /(\\[\\])|\\([^tbnrfuU\\])/g; | ||
const R_UNICODE_8 = /\\U([0-9A-Fa-f]{8})/g; | ||
const R_UNICODE_ANY = /\\u([0-9A-Fa-f]{4})|\\U([0-9A-Fa-f]{8})/g; | ||
const F_REPLACE_UNICODE_ANY = (s_, s_8, s_4) => String.fromCodePoint(parseInt(s_8 || s_4, 16)); | ||
const F_REPLACE_UNICODE_ANY = (s_, s_4, s_8) => String.fromCodePoint(parseInt(s_4 || s_8, 16)); | ||
const unescape_literal = s => JSON.parse('"' | ||
+s | ||
.replace(R_UNICODE_8, F_REPLACE_UNICODE_ANY) | ||
.replace(R_ESCAPES, '$1$2') // no need to escape anything other than reserved characters | ||
.replace(/[\t"\f\u0008]/g, s_e => H_ESCAPES_JSON[s_e]) | ||
+'"'); | ||
const R_CLEAN = /\s*(?:#[^\n]*\n\s*)*\s*/y; | ||
const R_CLEAN_COMMENTS = /\s*(#[^\n]*\n\s*)*\s*/y; | ||
const R_LITERAL_ESCAPELESS = /^"([^\\"]*)"(?:\^\^<([^\\>]*)>|@([^ \t.]+)|)?$/; | ||
const R_LITERAL = /^"(.*)"(?:\^\^<(.*)>|@([^ \t.]+)|)?$/; | ||
const RT_HAS_ESCAPES = /[\\]/; | ||
const R_EOL = /[^\n]+\n/y; | ||
const RT_ABSOLUTE_IRI_VALID = /^[a-z][a-z0-9+\-.]*:([^\0-\x20<>"{}|^`\\]|\\u[A-Fa-f0-9]{4}|\\U[A-Fa-f0-9]{8})*$/; | ||
// eslint-disable-next-line no-misleading-character-class | ||
const RT_BLANK_NODE_LABEL_VALID = /^(?:[A-Za-z\xc0-\xd6\xd8-\xf6\xf8-\u{02ff}\u{0370}-\u{037d}\u{037f}-\u{1fff}\u{200c}-\u{200d}\u{2070}-\u{218f}\u{2c00}-\u{2fef}\u{3001}-\u{d7ff}\u{f900}-\u{fdcf}\u{fdf0}-\u{fffd}\u{10000}-\u{effff}_0-9])(?:(?:[A-Za-z\xc0-\xd6\xd8-\xf6\xf8-\u{02ff}\u{0370}-\u{037d}\u{037f}-\u{1fff}\u{200c}-\u{200d}\u{2070}-\u{218f}\u{2c00}-\u{2fef}\u{3001}-\u{d7ff}\u{f900}-\u{fdcf}\u{fdf0}-\u{fffd}\u{10000}-\u{effff}_\-0-9\xb7\u{0300}-\u{036f}\u{203f}-\u{2040}.])*[A-Za-z\xc0-\xd6\xd8-\xf6\xf8-\u{02ff}\u{0370}-\u{037d}\u{037f}-\u{1fff}\u{200c}-\u{200d}\u{2070}-\u{218f}\u{2c00}-\u{2fef}\u{3001}-\u{d7ff}\u{f900}-\u{fdcf}\u{fdf0}-\u{fffd}\u{10000}-\u{effff}_\-0-9\xb7\u{0300}-\u{036f}\u{203f}-\u{2040}])?$/u; | ||
const RT_ESCAPES_INVALID = /(?:(?:^|[^\\])(?:\\(?:\\\\)*[^"tbnrfuU\\_~.!$&'()*+,;=/?#@%-])(?:[^\\]|$))|\\u[^A-Fa-f0-9]{4}|\\U[^A-Fa-f0-9]{8}/; | ||
const RT_LITERAL_CONTENTS_VALID = /^(?:[^\\\n\r]|\\[tbnrf"'\\]|\\u[A-Fa-f0-9]{4}|\\U[A-Fa-f0-9]{8})*$/; | ||
const RT_LANGUAGE_VALID = /^[a-z]+(-[a-z0-9]+)*$/; | ||
const R_WS = /\s*/y; | ||
const R_HWS = /[ \t]*/y; | ||
const R_LANGTAG = /@([A-Za-z]+(?:-[A-Za-z0-9-]+)*)(?:\s+|(?=[.,;\])#]))/y; | ||
const R_IRIREF = /<([^>]*)>\s*/y; | ||
const R_QUAD_ESCAPELESS_SP = /(?:<([^\\>]*)>|_:([^\x20\t<]+))[\x20\t]*<([^\\>]*)>[\x20\t]*(?:(<[^\\>]*)>|_:([^\x20\t<]+)|"([^"\\]*)"(?:\^\^<([^\\>]*)>|@([^\x20\t.]+)|))[\x20\t]*(?:<([^\\>]*)>|_:([^\x20\t<]+)|)[\x20\t]*\.\s*(#[^\n]*\n\s*|\n\s*)+/y; | ||
const R_QUAD = /(?:<([^>]*)>|_:([^\x20\t<]+))[\x20\t]*<([^>]*)>[\x20\t]*(?:(<[^>]*)>|_:([^\x20\t<]+)|"((?:[^"\\]|\\.)*)"(?:\^\^<([^>]*)>|@([^\x20\t.]+)|))[\x20\t]*(?:<([^>]*)>|_:([^\x20\t<]+)|)[\x20\t]*\.\s*(#[^\n]*\n\s*|\n\s*)+/y; | ||
const F_REPLACE_STRLIT_CONTENTS = (s_, s_whitespace, s_auto, s_4, s_8, s_invalid) => { | ||
if(s_whitespace) { | ||
switch(s_whitespace) { | ||
case 't': return '\t'; | ||
case 'n': return '\n'; | ||
case 'r': return '\r'; | ||
case 'f': return '\f'; | ||
case 'b': return '\b'; | ||
default: { | ||
console.assert(`bad regex escape char mapping: '${s_whitespace}'`); | ||
} | ||
} | ||
} | ||
else if(s_auto) { | ||
return s_auto; | ||
} | ||
else if(s_4) { | ||
return String.fromCodePoint(parseInt(s_4, 16)); | ||
} | ||
else if(s_8) { | ||
return String.fromCodePoint(parseInt(s_8, 16)); | ||
} | ||
else if(s_invalid) { | ||
// pointless escape | ||
if('\\' === s_invalid[0]) { | ||
// // relaxed | ||
// return s_invalid[1]; | ||
// if relaxed then return s_invalid, otherwise throw: | ||
throw new Error(`expected string_literal but invalid escape sequence within contents: '${s_invalid}'. failed to parse a valid token`); | ||
} | ||
// bad character | ||
else { | ||
throw new Error(`expected string_literal but invalid whitespace character within contents: ${JSON.stringify(s_invalid)}. failed to parse a valid token`); | ||
} | ||
} | ||
else { | ||
console.assert(`unexpected no match branch in escape sequence replace callback`); | ||
} | ||
}; | ||
const R_STRLIT_SHORT_CONTENTS_ESCAPES_HARD = /(?:\\(?:([tnrfb])|([\\"'])|u([0-9A-Fa-f]{4})|U([0-9A-Fa-f]{8}))|([\r\n]|\\.))/g; | ||
const R_STRLIT_SHORT_CONTENTS_ESCAPES_SOFT = /(?:\\(?:([tnrfb])|([\\"'])|u([0-9A-Fa-f]{4})|U([0-9A-Fa-f]{8}))|([\r\n]|\\[^uU]|\\u[^]{4}|\\U[^]{8}))/g; | ||
const unescape_literal_short_hard = s_literal => s_literal | ||
.replace(R_STRLIT_SHORT_CONTENTS_ESCAPES_HARD, F_REPLACE_STRLIT_CONTENTS); | ||
const unescape_literal_short_soft = (s_literal) => { | ||
let m_incomplete = R_STRLIT_ESCAPE_INCOMPLETE.exec(s_literal); | ||
// incomplete escape | ||
if(m_incomplete) { | ||
let i_safe = m_incomplete.index; | ||
// rewind | ||
return [ | ||
s_literal.slice(0, i_safe) | ||
.replace(R_STRLIT_SHORT_CONTENTS_ESCAPES_SOFT, F_REPLACE_STRLIT_CONTENTS), | ||
s_literal.slice(i_safe), | ||
]; | ||
} | ||
// done | ||
else { | ||
return [ | ||
s_literal | ||
.replace(R_STRLIT_SHORT_CONTENTS_ESCAPES_SOFT, F_REPLACE_STRLIT_CONTENTS), | ||
'', | ||
]; | ||
} | ||
}; | ||
// lookbehind regexes | ||
const [ | ||
R_STRLIT_ESCAPE_INCOMPLETE, | ||
R_STRLIT_SHORT_DOUBLE_TERM, | ||
] = (() => { | ||
function RegExp_$lookbehind_polyfill(s_input) { | ||
let m_match = RegExp.prototype.exec.call(this, s_input); | ||
if(m_match) { | ||
let i_start = m_match[0].length - m_match[1].length; | ||
m_match.index += i_start; | ||
m_match[0] = m_match[0].slice(i_start); | ||
} | ||
return m_match; | ||
} | ||
let mk_lookbehind_regex = (() => { | ||
try { | ||
/(?<!h)i/; // eslint-disable-line no-unused-expressions | ||
} | ||
catch(e_compile) { | ||
return (f_lookbehind, r_polyfill, f_polyfill) => { | ||
r_polyfill.exec = f_polyfill; | ||
return r_polyfill; | ||
}; | ||
} | ||
return f_lookbehind => f_lookbehind(); | ||
})(); | ||
return [ | ||
// R_STRLIT_ESCAPE_INCOMPLETE | ||
mk_lookbehind_regex( | ||
() => /(?<!(?:[^\\]|^)(?:\\\\)*\\)\\(|u[0-9A-Fa-f]{0,3}|U[0-9A-Fa-f]{0,7})$/, | ||
/^(?:(?:[^\\]|\\.)*)(\\(?:|u[0-9A-Fa-f]{0,3}|U[0-9A-Fa-f]{0,7}))$/, | ||
function RegExp_$lookbehind_polyfill_n(s_input) { | ||
let m_match = RegExp.prototype.exec.call(this, s_input); | ||
if(m_match) { | ||
m_match.index += m_match[0].length - m_match[1].length; | ||
} | ||
return m_match; | ||
}, | ||
), | ||
// R_STRLIT_SHORT_DOUBLE_TERM | ||
mk_lookbehind_regex( | ||
() => /(?<!(?:[^\\]|^)(?:\\\\)*\\)"\s*/g, | ||
/(?:[^\\"]|\\.)*("\s*)/y, | ||
RegExp_$lookbehind_polyfill, | ||
), | ||
]; | ||
})(); | ||
const R_QUAD_ESCAPELESS_SP = /(?:<([^\\>]*)>|_:([^\x20\t<]+))[\x20\t]*<([^\\>]*)>[\x20\t]*(?:(?:(<[^\\>]*)>|_:([^\x20\t<]+))[\x20\t]*(?:<([^\\>]*)>|_:([^\x20\t<]+)|)[\x20\t]*\.\s*(#[^\n]*\n\s*|\n\s*)+|"([^"\\]*)(?:(")(?:\^\^<([^\\>]*)>|@([^\x20\t.]+)|)[\x20\t]*(?:<([^\\>]*)>|_:([^\x20\t<]+)|)[\x20\t]*\.\s*(#[^\n]*\n\s*|\n\s*)+)?)/y; | ||
const R_QUAD = /(?:<([^>]*)>|_:([^\x20\t<]+))[\x20\t]*<([^>]*)>[\x20\t]*(?:(?:(<[^>]*)>|_:([^\x20\t<]+))[\x20\t]*(?:<([^>]*)>|_:([^\x20\t<]+)|)[\x20\t]*\.\s*(#[^\n]*\n\s*|\n\s*)+|"((?:[^"\\]|\\.)*)(?:(")(?:\^\^<([^>]*)>|@([^\x20\t.]+)|)[\x20\t]*(?:<([^>]*)>|_:([^\x20\t<]+)|)[\x20\t]*\.\s*(#[^\n]*\n\s*|\n\s*)+)?)/y; | ||
const R_BLANK_NODE = /_:([^\x20\t<]+)/y; | ||
class NQuads_Reader extends stream.Transform { | ||
@@ -110,3 +224,2 @@ constructor(g_impls) { | ||
class Reader { | ||
@@ -125,6 +238,9 @@ constructor(g_config) { | ||
// allow relative iris flag | ||
let b_allow_relative_iris = g_config.allow_relative_iris || g_config.allowRelativeIRIs || g_config.allowRelativeIris || false; | ||
// adopt factory | ||
let dc_factory = this._dc_factory = factory.adopt(g_config.dataFactory || g_config.data_factory || factory.unfiltered); | ||
this._f_quad = dc_factory.quad; | ||
let f_quad = this._f_quad = dc_factory.quad; | ||
@@ -144,2 +260,12 @@ // fields | ||
_b_destroyed: false, | ||
_b_trim_start: true, | ||
_f_state: this.statement, | ||
_kt_subject: null, | ||
_kt_predicate: null, | ||
_kt_object: null, | ||
_s_literal: '', | ||
}); | ||
@@ -151,3 +277,3 @@ | ||
// clean regex | ||
let r_clean = R_CLEAN; | ||
let r_clean = this._r_clean = R_CLEAN; | ||
@@ -161,2 +287,12 @@ if(g_config.relaxed) { | ||
let namedNode = dc_factory.namedNode; | ||
let blankNode = dc_factory.blankNode; | ||
let languagedLiteral = dc_factory.languagedLiteral; | ||
// test for valid named node | ||
let rt_named_node_valid = b_allow_relative_iris? RT_NAMED_NODE_VALID: RT_ABSOLUTE_IRI_VALID; | ||
// test for valid named node escapeless | ||
let rt_named_node_valid_escapeless = b_allow_relative_iris? RT_NAMED_NODE_ESCAPELESS_VALID: RT_ABSOLUTE_IRI_ESCAPELESS_VALID; | ||
// validation | ||
@@ -166,30 +302,18 @@ let k_self = this; | ||
? { | ||
namedNode(s_iri) { | ||
if(!RT_ABSOLUTE_IRI_VALID.test(s_iri)) return k_self._error(`Invalid absolute IRI: "${s_iri}"`); | ||
return dc_factory.namedNode(s_iri); | ||
create_named_node(p_iri) { | ||
if(!rt_named_node_valid.test(p_iri)) return this.error(`invalid IRI: "${p_iri}"`); | ||
return namedNode(p_iri); | ||
}, | ||
blankNode(s_label) { | ||
if(!RT_BLANK_NODE_LABEL_VALID.test(s_label)) return k_self._error(`Invalid blank node label: "${s_label}"`); | ||
return dc_factory.blankNode(s_label); | ||
create_named_node_escapeless(p_iri) { | ||
if(!rt_named_node_valid_escapeless.test(p_iri)) return this.error(`invalid IRI: "${p_iri}"`); | ||
return namedNode(p_iri); | ||
}, | ||
simpleLiteral(s_contents) { | ||
if(!RT_LITERAL_CONTENTS_VALID.test(s_contents)) { | ||
return k_self._error(`Invalid simple-literal contents: "${s_contents}"`); | ||
} | ||
return dc_factory.simpleLiteral(s_contents); | ||
create_blank_node(s_label) { | ||
if(!RT_BLANK_NODE_LABEL_VALID.test(s_label)) return k_self._error(`Invalid blank node label: "${s_label}"`); | ||
return blankNode(s_label); | ||
}, | ||
languagedLiteral(s_contents, s_language) { | ||
if(!RT_LITERAL_CONTENTS_VALID.test(s_contents)) { | ||
return k_self._error(`Invalid languaged-literal contents: "${s_contents}"`); | ||
} | ||
create_languaged_literal(s_contents, s_language) { | ||
if(!RT_LANGUAGE_VALID.test(s_language)) { | ||
@@ -199,119 +323,13 @@ return k_self._error(`Invalid literal language tag: ${s_language}`); | ||
return dc_factory.languagedLiteral(s_contents, s_language); | ||
return languagedLiteral(s_contents, s_language); | ||
}, | ||
datatypedLiteral(s_contents, p_datatype) { | ||
if(!RT_LITERAL_CONTENTS_VALID.test(s_contents)) { | ||
return k_self._error(`Invalid datatyped-literal contents: "${s_contents}"`); | ||
} | ||
return dc_factory.datatypedLiteral(s_contents, k_self.namedNode(p_datatype)); | ||
}, | ||
simpleLiteralE(s_contents) { | ||
if(!RT_LITERAL_CONTENTS_VALID.test(s_contents)) { | ||
return k_self._error(`Invalid simple-literal contents: "${s_contents}"`); | ||
} | ||
try { | ||
s_contents = unescape_literal(s_contents); | ||
} | ||
catch(e_parse) { | ||
return this._error(`Invalid escaping within simple-literal contents: "${s_contents}"`); | ||
} | ||
return dc_factory.simpleLiteral(s_contents); | ||
}, | ||
languagedLiteralE(s_contents, s_language) { | ||
if(!RT_LITERAL_CONTENTS_VALID.test(s_contents)) { | ||
return k_self._error(`Invalid languaged-literal contents: "${s_contents}"`); | ||
} | ||
if(!RT_LANGUAGE_VALID.test(s_language)) { | ||
return k_self._error(`Invalid literal language tag: ${s_language}`); | ||
} | ||
try { | ||
s_contents = unescape_literal(s_contents); | ||
} | ||
catch(e_parse) { | ||
return this._error(`Invalid escaping within languaged-literal contents: "${s_contents}"`); | ||
} | ||
return dc_factory.languagedLiteral(s_contents, s_language); | ||
}, | ||
datatypedLiteralE(s_contents, p_datatype) { | ||
if(!RT_LITERAL_CONTENTS_VALID.test(s_contents)) { | ||
return k_self._error(`Invalid datatyped-literal contents: "${s_contents}"`); | ||
} | ||
try { | ||
s_contents = unescape_literal(s_contents); | ||
} | ||
catch(e_parse) { | ||
return this._error(`Invalid escaping within datatyped-literal contents: "${s_contents}"`); | ||
} | ||
return dc_factory.datatypedLiteral(s_contents, k_self.namedNode(p_datatype)); | ||
}, | ||
} | ||
: { | ||
namedNode: dc_factory.namedNode, | ||
create_named_node: namedNode, | ||
blankNode: dc_factory.blankNode, | ||
create_named_node_escapeless: namedNode, | ||
create_blank_node: blankNode, | ||
simpleLiteral: dc_factory.simpleLiteral, | ||
languagedLiteral: dc_factory.languagedLiteral, | ||
datatypedLiteral: (s_contents, p_datatype) => dc_factory.datatypedLiteral(s_contents, dc_factory.namedNode(p_datatype)), | ||
simpleLiteralE(s_contents) { | ||
try { | ||
s_contents = unescape_literal(s_contents); | ||
} | ||
catch(e_parse) { | ||
return this._error(`Invalid escaping within simple-literal contents: "${s_contents}"`); | ||
} | ||
return dc_factory.simpleLiteral(s_contents); | ||
}, | ||
languagedLiteralE(s_contents, s_language) { | ||
try { | ||
s_contents = unescape_literal(s_contents); | ||
} | ||
catch(e_parse) { | ||
return this._error(`Invalid escaping within languaged-literal contents: "${s_contents}"`); | ||
} | ||
return dc_factory.languagedLiteral(s_contents, s_language); | ||
}, | ||
datatypedLiteralE(s_contents, p_datatype) { | ||
try { | ||
s_contents = unescape_literal(s_contents); | ||
} | ||
catch(e_parse) { | ||
return this._error(`Invalid escaping within datatyped-literal contents: "${s_contents}"`); | ||
} | ||
return dc_factory.datatypedLiteral(s_contents, dc_factory.namedNode(p_datatype)); | ||
}, | ||
create_languaged_literal: languagedLiteral, | ||
}); | ||
@@ -342,11 +360,17 @@ | ||
// remove whitespace & comments from beginning | ||
r_clean.lastIndex = 0; | ||
let m_clean = r_clean.exec(s); | ||
if(this.emit_comments) { | ||
this.emit_comments(m_clean[1]); | ||
if(this._b_trim_start) { | ||
r_clean.lastIndex = 0; | ||
let m_clean = r_clean.exec(s); | ||
if(this.emit_comments) { | ||
this.emit_comments(m_clean[1]); | ||
} | ||
// update index and prepare to match statement | ||
this.i = r_clean.lastIndex; | ||
} | ||
// do not remove whitespace; reset index | ||
else { | ||
this.i = 0; | ||
} | ||
// update index and prepare to match statement | ||
this.i = r_clean.lastIndex; | ||
// cache chunk length | ||
@@ -357,3 +381,3 @@ this.n = s.length; | ||
try { | ||
this.safe_parse(true); | ||
this.parse(true); | ||
} | ||
@@ -379,5 +403,21 @@ // read error occurred; emit and destroy stream | ||
// parse safely | ||
// remove whitespace & comments from beginning | ||
if(this._b_trim_start) { | ||
r_clean.lastIndex = 0; | ||
let m_clean = r_clean.exec(this.s); | ||
if(this.emit_comments) { | ||
this.emit_comments(m_clean[1]); | ||
} | ||
// update index and prepare to match statement | ||
this.i = r_clean.lastIndex; | ||
} | ||
// do not remove whitespace; reset index | ||
else { | ||
this.i = 0; | ||
} | ||
// parse | ||
try { | ||
this.safe_parse(); | ||
this.parse(); | ||
} | ||
@@ -396,2 +436,7 @@ // read error occurred; pass to flush errback and exit method | ||
// invalid state | ||
if(this._f_state !== this.statement) { | ||
return ds_transform.demolish(new Error(`parsing error occurred in state: ${this._f_state.name}\n ${this.s.substr(0, 50)}\n ^ starting here`)); | ||
} | ||
// make buffer's alloc eligible for gc | ||
@@ -416,4 +461,4 @@ this.s = null; | ||
// data event | ||
this.data = g_quad => ds_transform.push(g_quad); | ||
// data quad | ||
this._f_data_quad = (kt_subject, kt_predicate, kt_object, kt_graph) => ds_transform.push(f_quad(kt_subject, kt_predicate, kt_object, kt_graph)); | ||
@@ -487,16 +532,21 @@ // new listener added | ||
safe_parse() { | ||
// begin parsing, keep applying until no more stack bail-outs | ||
parse() { | ||
let f_sync = this._f_state(); | ||
while('function' === typeof f_sync) { | ||
f_sync = f_sync.apply(this); | ||
} | ||
} | ||
statement() { | ||
let s = this.s; | ||
let n = this.n; | ||
let i = this.i; | ||
let fk_data = this.data; | ||
let f_quad = this._f_quad; | ||
let namedNode = this.namedNode; | ||
let blankNode = this.blankNode; | ||
let simpleLiteral = this.simpleLiteral; | ||
let languagedLiteral = this.languagedLiteral; | ||
let datatypedLiteral = this.datatypedLiteral; | ||
let simpleLiteralE = this.simpleLiteralE; | ||
let languagedLiteralE = this.languagedLiteralE; | ||
let datatypedLiteralE = this.datatypedLiteralE; | ||
let f_data_quad = this._f_data_quad; | ||
let create_named_node = this.create_named_node; | ||
let create_named_node_escapeless = this.create_named_node_escapeless; | ||
let create_languaged_literal = this.create_languaged_literal; | ||
let create_blank_node = this.create_blank_node; | ||
let simpleLiteral = this._dc_factory.simpleLiteral; | ||
let datatypedLiteral = this._dc_factory.datatypedLiteral; | ||
let kt_default_graph = this._kt_default_graph; | ||
@@ -515,4 +565,7 @@ | ||
i = R_QUAD_ESCAPELESS_SP.lastIndex; | ||
// prep object term | ||
let kt_object; | ||
// where to find the graph component | ||
let b_graph_late = false; | ||
@@ -522,48 +575,132 @@ // object term type is named node | ||
let p_object = m_statement_e_sp[4].slice(1); | ||
kt_object = namedNode(p_object); | ||
kt_object = create_named_node_escapeless(p_object); | ||
} | ||
// object term type is blank node | ||
else if(m_statement_e_sp[5]) { | ||
kt_object = blankNode(m_statement_e_sp[5]); | ||
kt_object = create_blank_node(m_statement_e_sp[5]); | ||
} | ||
// object term type is literal | ||
else { | ||
// graph is in late capture group | ||
b_graph_late = true; | ||
// contents | ||
let s_contents = m_statement_e_sp[6]; | ||
let s_contents = m_statement_e_sp[9]; | ||
// string terminator | ||
if(m_statement_e_sp[10]) { | ||
// datatype is present | ||
if(m_statement_e_sp[11]) { | ||
// create datatype term | ||
let kt_datatype = this.create_named_node_escapeless(m_statement_e_sp[11]); | ||
// create object term | ||
kt_object = datatypedLiteral(s_contents, kt_datatype); | ||
} | ||
// language tag is present | ||
else if(m_statement_e_sp[12]) { | ||
// normalize language | ||
let s_language = m_statement_e_sp[12].toLowerCase(); | ||
// create object term | ||
kt_object = create_languaged_literal(s_contents, s_language); | ||
} | ||
// simple literal | ||
else { | ||
kt_object = simpleLiteral(s_contents); | ||
} | ||
} | ||
// no string terminator | ||
else { | ||
// save contents | ||
this._s_literal = s_contents; | ||
// update index | ||
this.i = i; | ||
// save subject | ||
{ | ||
let s_subject = m_statement_e_sp[1]; | ||
// named node | ||
if(s_subject || 'string' === typeof s_subject) { | ||
this._kt_subject = create_named_node_escapeless(s_subject); | ||
} | ||
// blank node | ||
else { | ||
this._kt_subject = create_blank_node(m_statement_e_sp[2]); | ||
} | ||
} | ||
// save predicate | ||
this._kt_predicate = create_named_node_escapeless(m_statement_e_sp[3]); | ||
// parse contents | ||
let z_bail = this.strlit_contents(); | ||
// bail out of stack | ||
if(z_bail && this.statement !== z_bail) { | ||
return z_bail; | ||
} | ||
// statement completed | ||
else { | ||
// clean | ||
let r_clean = this._r_clean; | ||
r_clean.lastIndex = this.i; | ||
let m_clean = r_clean.exec(s); | ||
if(this.emit_comments) { | ||
this.emit_comments(m_clean[1]); | ||
} | ||
// update local index and prepare to match next statement | ||
i = r_clean.lastIndex; | ||
// resume | ||
continue; | ||
} | ||
} | ||
} | ||
let kt_graph = kt_default_graph; | ||
// set datatype if present | ||
if(m_statement_e_sp[7]) { | ||
kt_object = datatypedLiteral(s_contents, m_statement_e_sp[7]); | ||
// graph after literal | ||
if(b_graph_late) { | ||
// ref capture group | ||
let s_graph = m_statement_e_sp[13]; | ||
// named node | ||
if(s_graph || 'string' === typeof s_graph) { | ||
kt_graph = create_named_node_escapeless(s_graph); | ||
} | ||
// otherwise, set language tag if present | ||
else if(m_statement_e_sp[8]) { | ||
kt_object = languagedLiteral(s_contents, m_statement_e_sp[8].toLowerCase()); | ||
// blank node | ||
else if(m_statement_e_sp[14]) { | ||
kt_graph = create_blank_node(m_statement_e_sp[14]); | ||
} | ||
// simple literal | ||
} | ||
// graph after node | ||
else { | ||
// ref capture group | ||
let s_graph = m_statement_e_sp[6]; | ||
// named node | ||
if(s_graph || 'string' === typeof s_graph) { | ||
kt_graph = create_named_node_escapeless(s_graph); | ||
} | ||
// blank node | ||
else if(m_statement_e_sp[7]) { | ||
kt_graph = create_blank_node(m_statement_e_sp[7]); | ||
} | ||
} | ||
let kt_subject; | ||
{ | ||
let s_subject = m_statement_e_sp[1]; | ||
// named node | ||
if(s_subject || 'string' === typeof s_subject) { | ||
kt_subject = create_named_node_escapeless(s_subject); | ||
} | ||
// blank node | ||
else { | ||
kt_object = simpleLiteral(s_contents); | ||
kt_subject = create_blank_node(m_statement_e_sp[2]); | ||
} | ||
} | ||
let s_predicate = m_statement_e_sp[3]; | ||
// emit data event | ||
fk_data( | ||
f_quad( | ||
m_statement_e_sp[2] | ||
? blankNode(m_statement_e_sp[2]) | ||
: namedNode(m_statement_e_sp[1]), | ||
namedNode(m_statement_e_sp[3]), | ||
kt_object, | ||
m_statement_e_sp[9] | ||
? namedNode(m_statement_e_sp[9]) | ||
: (m_statement_e_sp[10] | ||
? blankNode(m_statement_e_sp[10]) | ||
: ('string' === typeof m_statement_e_sp[9] | ||
? namedNode(m_statement_e_sp[9]) | ||
: kt_default_graph)), | ||
), | ||
f_data_quad( | ||
kt_subject, | ||
create_named_node_escapeless(s_predicate), | ||
kt_object, | ||
kt_graph, | ||
); | ||
// comments | ||
if(this.emit_comments) { | ||
this.emit_comments(m_statement_e_sp[11]); | ||
this.emit_comments(m_statement_e_sp[8] || m_statement_e_sp[15]); | ||
} | ||
@@ -581,4 +718,7 @@ } | ||
i = R_QUAD.lastIndex; | ||
// prep object term | ||
let kt_object; | ||
// where to find the graph component | ||
let b_graph_late = false; | ||
@@ -588,48 +728,135 @@ // object term type is named node | ||
let p_object = m_statement[4].slice(1); | ||
kt_object = namedNode(RT_HAS_ESCAPES.test(p_object)? p_object.replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): p_object); | ||
kt_object = create_named_node(RT_HAS_ESCAPES.test(p_object)? p_object.replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): p_object); | ||
} | ||
// object term type is blank node | ||
else if(m_statement[5]) { | ||
kt_object = blankNode(RT_HAS_ESCAPES.test(m_statement[5])? m_statement[5].replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): m_statement[5]); | ||
kt_object = create_blank_node(RT_HAS_ESCAPES.test(m_statement[5])? m_statement[5].replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): m_statement[5]); | ||
} | ||
// object term type is literal | ||
else { | ||
// graph is in late capture group | ||
b_graph_late = true; | ||
// contents | ||
let s_contents = m_statement[6]; | ||
let s_contents = m_statement[9]; | ||
// string terminator | ||
if(m_statement[10]) { | ||
// unescape contents | ||
s_contents = unescape_literal_short_hard(s_contents); | ||
// set datatype if present | ||
if(m_statement[7]) { | ||
kt_object = datatypedLiteralE(s_contents, m_statement[7]); | ||
// datatype is present | ||
if(m_statement[11]) { | ||
// create datatype term | ||
let kt_datatype = this.create_named_node(m_statement[11]); | ||
// create object term | ||
kt_object = datatypedLiteral(s_contents, kt_datatype); | ||
} | ||
// language tag is present | ||
else if(m_statement[12]) { | ||
// normalize language | ||
let s_language = m_statement[12].toLowerCase(); | ||
// create object term | ||
kt_object = create_languaged_literal(s_contents, s_language); | ||
} | ||
// simple literal | ||
else { | ||
kt_object = simpleLiteral(s_contents); | ||
} | ||
} | ||
// otherwise, set language tag if present | ||
else if(m_statement[8]) { | ||
kt_object = languagedLiteralE(s_contents, m_statement[8].toLowerCase()); | ||
// no string terminator | ||
else { | ||
// save contents | ||
this._s_literal = s_contents; | ||
// update index | ||
this.i = i; | ||
// save subject | ||
{ | ||
let s_subject = m_statement[1]; | ||
// named node | ||
if(s_subject || 'string' === typeof s_subject) { | ||
this._kt_subject = create_named_node(RT_HAS_ESCAPES.test(s_subject)? s_subject.replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): s_subject); | ||
} | ||
// blank node | ||
else { | ||
this._kt_subject = create_blank_node(m_statement[2]); | ||
} | ||
} | ||
// save predicate | ||
this._kt_predicate = create_named_node(RT_HAS_ESCAPES.test(m_statement[3])? m_statement[3].replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): m_statement[3]); | ||
// parse contents | ||
let z_bail = this.strlit_contents(); | ||
// bail out of stack | ||
if(z_bail && this.statement !== z_bail) { | ||
return z_bail; | ||
} | ||
// statement completed | ||
else { | ||
// clean | ||
let r_clean = this._r_clean; | ||
r_clean.lastIndex = this.i; | ||
let m_clean = r_clean.exec(s); | ||
if(this.emit_comments) { | ||
this.emit_comments(m_clean[1]); | ||
} | ||
// update local index and prepare to match next statement | ||
i = r_clean.lastIndex; | ||
// resume | ||
continue; | ||
} | ||
} | ||
// simple literal | ||
} | ||
let kt_graph = kt_default_graph; | ||
// graph after literal | ||
if(b_graph_late) { | ||
// ref capture group | ||
let s_graph = m_statement[13]; | ||
// named node | ||
if(s_graph || 'string' === typeof s_graph) { | ||
kt_graph = create_named_node(RT_HAS_ESCAPES.test(s_graph)? s_graph.replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): s_graph); | ||
} | ||
// blank node | ||
else if(m_statement[14]) { | ||
kt_graph = create_blank_node(m_statement[14]); | ||
} | ||
} | ||
// graph after node | ||
else { | ||
// ref capture group | ||
let s_graph = m_statement[6]; | ||
// named node | ||
if(s_graph || 'string' === typeof s_graph) { | ||
kt_graph = create_named_node(RT_HAS_ESCAPES.test(s_graph)? s_graph.replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): s_graph); | ||
} | ||
// blank node | ||
else if(m_statement[7]) { | ||
kt_graph = create_blank_node(m_statement[7]); | ||
} | ||
} | ||
let kt_subject; | ||
{ | ||
let s_subject = m_statement[1]; | ||
// named node | ||
if(s_subject || 'string' === typeof s_subject) { | ||
kt_subject = create_named_node(RT_HAS_ESCAPES.test(s_subject)? s_subject.replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): s_subject); | ||
} | ||
// blank node | ||
else { | ||
kt_object = simpleLiteralE(s_contents); | ||
kt_subject = create_blank_node(m_statement[2]); | ||
} | ||
} | ||
let s_predicate = m_statement[3]; | ||
// emit data event | ||
fk_data( | ||
f_quad( | ||
m_statement[2] | ||
? blankNode(m_statement[2]) | ||
: namedNode(RT_HAS_ESCAPES.test(m_statement[1])? m_statement[1].replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): m_statement[1]), | ||
namedNode(RT_HAS_ESCAPES.test(m_statement[3])? m_statement[3].replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): m_statement[3]), | ||
kt_object, | ||
m_statement[9] | ||
? namedNode(RT_HAS_ESCAPES.test(m_statement[9])? m_statement[9].replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): m_statement[9]) | ||
: (m_statement[10] | ||
? blankNode(m_statement[10]) | ||
: ('string' === typeof m_statement[9] | ||
? namedNode(RT_HAS_ESCAPES.test(m_statement[9])? m_statement[9].replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): m_statement[9]) | ||
: kt_default_graph)), | ||
), | ||
f_data_quad( | ||
kt_subject, | ||
create_named_node(RT_HAS_ESCAPES.test(s_predicate)? s_predicate.replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY): s_predicate), | ||
kt_object, | ||
kt_graph, | ||
); | ||
// comments | ||
if(this.emit_comments) { | ||
this.emit_comments(m_statement[11]); | ||
this.emit_comments(m_statement[8] || m_statement[15]); | ||
} | ||
@@ -643,4 +870,4 @@ } | ||
// advance index | ||
this.i = R_EOL.lastIndex; | ||
this._error(`Failed to read statement:\n\`${this.s.substr(i, 80).replace(/\n/g, '\u23CE')} [...]\`\n ^ starting here`); | ||
i = R_EOL.lastIndex; | ||
this._error(`Failed to read statement:\n\`${s.substr(i, 80).replace(/\n/g, '\u23CE')} [...]\`\n ^ starting here`); | ||
@@ -659,6 +886,383 @@ // match counter: 2 | ||
this.s = s.substr(i); | ||
// resume here | ||
this._f_state = this.statement; | ||
// exit | ||
return 1; | ||
} | ||
strlit_contents() { | ||
let {s, n, i} = this; | ||
// try to find end | ||
R_STRLIT_SHORT_DOUBLE_TERM.lastIndex = i; | ||
let m_term = R_STRLIT_SHORT_DOUBLE_TERM.exec(s); | ||
// end is in this chunk | ||
if(m_term) { | ||
// index of terminator | ||
let i_term = m_term.index; | ||
// extract dirty potion | ||
let s_dirty = s.slice(i, i_term); | ||
// clean and save | ||
this._s_literal += unescape_literal_short_hard(s_dirty); | ||
// advance index beyond terminator | ||
this.i = i_term + m_term[0].length; | ||
// resume eating whitespace at start of next chunk | ||
this._b_trim_start = true; | ||
// proceed with datatype_or_lang, then bail out of stack or resume parsing | ||
return this.datatype_or_langtag() || this.statement; | ||
} | ||
// end is not in this chunk | ||
else { | ||
// extract whole portion | ||
let s_dirty = s.slice(i); | ||
// unescape to clean part | ||
let [s_clean, s_incomplete] = unescape_literal_short_soft(s_dirty); | ||
// save | ||
this._s_literal += s_clean; | ||
// set unparsed index | ||
this.i = i = n - s_incomplete.length; | ||
// do not eat whitespace at start of next chunk | ||
this._b_trim_start = false; | ||
} | ||
// not yet eos | ||
if(i < this.n) { | ||
// expected token was not found | ||
if(0 === i) { | ||
// we've exceeded the maximum token length | ||
if(this.n > this.max_token_length) { | ||
return this.parse_error('strlit_contents'); | ||
} | ||
} | ||
} | ||
// resume here | ||
this._f_state = this.strlit_contents; | ||
// store what is unparsed | ||
this.s = s.slice(i); | ||
// if we're not parsing a stream, then this is an error | ||
if(this.eos) this.eos(); | ||
return 1; | ||
} | ||
// parse state for datatype_or_langtag | ||
datatype_or_langtag() { | ||
// destruct chunk, length, and index | ||
let {s, n, i} = this; | ||
// ref character | ||
let x = s[i]; | ||
while(i < n) { // eslint-disable-line no-unmodified-loop-condition | ||
// datatype | ||
if('^' === x) { | ||
// enough to speculate datatype | ||
if((i+2) < n) { | ||
// correct token | ||
if('^' === s[i+1]) { | ||
// advance index beyond token | ||
R_IRIREF.lastIndex = i + 2; | ||
// execute regex | ||
let m_iriref = R_IRIREF.exec(s); | ||
// regex was a match | ||
if(m_iriref) { | ||
// advance index | ||
this.i = R_IRIREF.lastIndex; | ||
// prepare iri | ||
let p_datatype = m_iriref[1].replace(R_UNICODE_ANY, F_REPLACE_UNICODE_ANY); | ||
// create datatype term | ||
let kt_datatype = this.create_named_node(p_datatype); | ||
// create object term | ||
this._kt_object = this._dc_factory.datatypedLiteral(this._s_literal, kt_datatype); | ||
// free literal string | ||
this._s_literal = ''; | ||
// graph state | ||
return this.post_object(); | ||
} | ||
// failed to match; try again next chunk | ||
else { | ||
break; | ||
} | ||
} | ||
// invalid | ||
else { | ||
this._error(`Failed to read token after literal:\n\`${s.substr(i+1, 80).replace(/\n/g, '\u23CE')} [...]\`\n ^ starting here`); | ||
} | ||
} | ||
// not enough to speculate; try again next chunk | ||
else { | ||
break; | ||
} | ||
} | ||
// language tag | ||
else if('@' === x) { | ||
// prepare sticky regex index | ||
R_LANGTAG.lastIndex = i; | ||
// execute regex | ||
let m_langtag = R_LANGTAG.exec(s); | ||
// regex was a match | ||
if(m_langtag) { | ||
// advance index | ||
this.i = R_LANGTAG.lastIndex; | ||
// use direct factory method since regex is validation | ||
this._kt_object = this._dc_factory.languagedLiteral(this._s_literal, m_langtag[1]); | ||
// free literal string | ||
this._s_literal = ''; | ||
// graph state | ||
return this.post_object(); | ||
} | ||
// interrupted by eos; try again next chunk | ||
else { | ||
break; | ||
} | ||
} | ||
// graph component | ||
else if('<' === x || '_' === x) { | ||
// save simple literal | ||
this._kt_object = this._dc_factory.simpleLiteral(this._s_literal); | ||
// free literal string | ||
this._s_literal = ''; | ||
// continue parsing graph component | ||
return this.graph(); | ||
} | ||
// triple terminator | ||
else if('.' === x) { | ||
// save simple literal | ||
let kt_object = this._dc_factory.simpleLiteral(this._s_literal); | ||
// free literal string | ||
this._s_literal = ''; | ||
// advance index beyond terminator | ||
this.i = i + 1; | ||
// emit data event | ||
this._f_data_quad(this._kt_subject, this._kt_predicate, kt_object, this._kt_default_graph); | ||
// reset state | ||
return this.statement; | ||
// // consume whitespace (and incidentally reset index) | ||
// R_WS.lastIndex = i + 1; | ||
// R_WS.exec(s); | ||
// this.i = R_WS.lastIndex; | ||
// // done | ||
// return; | ||
} | ||
// other | ||
else { | ||
break; | ||
} | ||
} | ||
// ran out of characters | ||
// update index value | ||
this.i = i; | ||
// not yet eos | ||
if(i < this.n) { | ||
// expected token was not found | ||
if(0 === i) { | ||
// we've exceeded the maximum token length | ||
if(this.n > this.max_token_length) { | ||
return this.parse_error('datatype_or_langtag'); | ||
} | ||
} | ||
} | ||
// resume here | ||
this._f_state = this.datatype_or_langtag; | ||
// store what is unparsed | ||
this.s = s.slice(i); | ||
// if we're not parsing a stream, then this is an error | ||
if(this.eos) this.eos(); | ||
return 1; | ||
} | ||
statement_term() { | ||
let {s, n, i} = this; | ||
// find full stop | ||
let i_stop = s.indexOf('.', i); | ||
// found | ||
if(i_stop > -1) { | ||
// consume whitespace again | ||
this._b_trim_start = true; | ||
// advance beyond token | ||
this.i = i_stop + 1; | ||
// reset state | ||
return this.statement; | ||
} | ||
// anything other than whitespace | ||
else if(!/^\s*$/.test(s.slice(i))) { | ||
this.parse_error('statement_term'); | ||
} | ||
// do not consume whitespace | ||
this._b_trim_start = false; | ||
// resume here | ||
this._f_state = this.statement_term; | ||
// store what is unparsed | ||
this.s = s.slice(i); | ||
// if we're not parsing a stream, then this is an error | ||
if(this.eos) this.eos(); | ||
return 1; | ||
} | ||
post_object() { | ||
let {s, n, i} = this; | ||
// eat horizontal whitespace | ||
R_HWS.lastIndex = i; | ||
R_HWS.exec(s); | ||
i = R_HWS.lastIndex; | ||
// ran out of characters | ||
if(i >= n) { | ||
// resume here | ||
this._f_state = this.post_object; | ||
// store what is unparsed | ||
this.s = s.slice(i); | ||
// if we're not parsing a stream, then this is an error | ||
if(this.eos) this.eos(); | ||
return 1; | ||
} | ||
// depending on char | ||
switch(s[i]) { | ||
// statement term | ||
case '.': { | ||
// advance index beyond terminator | ||
this.i = i + 1; | ||
// emit data event | ||
this._f_data_quad(this._kt_subject, this._kt_predicate, this._kt_object, this._kt_default_graph); | ||
// reset state | ||
return this.statement; | ||
} | ||
// graph | ||
case '<': | ||
case '_': { | ||
// save index | ||
this.i = i; | ||
// consume graph component | ||
return this.graph(); | ||
} | ||
// invalid | ||
default: { | ||
// save index | ||
this.i = i; | ||
// emit parsing error | ||
this.parse_error('post_object'); | ||
} | ||
} | ||
} | ||
graph() { | ||
let {s, n, i} = this; | ||
// prepare sticky regex index | ||
R_IRIREF.lastIndex = i; | ||
// execute regex | ||
let m_iriref = R_IRIREF.exec(s); | ||
// regex was a match | ||
if(m_iriref) { | ||
// advance index | ||
this.i = R_IRIREF.lastIndex; | ||
// create graph term | ||
let kt_graph = this.create_named_node(m_iriref[1]); | ||
// emit data event | ||
this._f_data_quad(this._kt_subject, this._kt_predicate, this._kt_object, kt_graph); | ||
// complete with statement_term | ||
return this.statement_term(); | ||
} | ||
else { | ||
// prepare sticky regex index | ||
R_BLANK_NODE.lastIndex = i; | ||
// execute regex | ||
let m_blank = R_BLANK_NODE.exec(s); | ||
// regex was a match | ||
if(m_blank) { | ||
// advance index | ||
this.i = R_BLANK_NODE.lastIndex; | ||
// create graph term | ||
let kt_graph = this._dc_factory.blankNode(m_blank[1]); | ||
// emit data event | ||
this._f_data_quad(this._kt_subject, this._kt_predicate, this._kt_object, kt_graph); | ||
// complete with statement_term | ||
return this.statement_term(); | ||
} | ||
} // brace #1 | ||
// resume here | ||
this._f_state = this.graph; | ||
// store what is unparsed | ||
this.s = s.slice(i); | ||
// if we're not parsing a stream, then this is an error | ||
if(this.eos) this.eos(); | ||
return 1; | ||
} | ||
parse_error(s_state) { | ||
return this._error(`Failed to read ${s_state}:\n\`${this.s.substr(this.i, 80).replace(/\n/g, '\u23CE')} [...]\`\n ^ starting here`); | ||
} | ||
destroy(e_destroy) { | ||
this.data = () => {}; | ||
this._f_data_quad = () => {}; | ||
@@ -665,0 +1269,0 @@ if(!e_destroy && this._ds_input) { |
{ | ||
"name": "@graphy/content.nq.read", | ||
"version": "4.0.0", | ||
"version": "4.0.1", | ||
"description": "Single-threaded RDF N-Quads content reader", | ||
"keywords": [ | ||
"linked-data", | ||
"n-quads", | ||
"n-triples", | ||
"nq", | ||
"nt", | ||
"rdf", | ||
"rdfjs", | ||
"linked-data", | ||
"semantic-web", | ||
"turtle", | ||
"ttl", | ||
"trig", | ||
"ttl", | ||
"turtle" | ||
"n-triples", | ||
"nt", | ||
"n-quads", | ||
"nq" | ||
], | ||
@@ -26,4 +26,4 @@ "repository": "blake-regalia/graphy.js", | ||
"dependencies": { | ||
"@graphy/core.data.factory": "^4.0.0", | ||
"@graphy/core.iso.stream": "^4.0.0" | ||
"@graphy/core.data.factory": "^4.0.1", | ||
"@graphy/core.iso.stream": "^4.0.1" | ||
}, | ||
@@ -30,0 +30,0 @@ "engines": { |
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
35658
1089
1