Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

chevrotain

Package Overview
Dependencies
Maintainers
1
Versions
167
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

chevrotain - npm Package Compare versions

Comparing version 9.0.2 to 9.1.0

2

lib/src/lang/lang_extensions.js

@@ -34,3 +34,3 @@ "use strict";

/* istanbul ignore else -> will only run in old versions of node.js */
if (utils_1.isUndefined(namePropDescriptor) || namePropDescriptor.configurable) {
if ((0, utils_1.isUndefined)(namePropDescriptor) || namePropDescriptor.configurable) {
Object.defineProperty(obj, NAME, {

@@ -37,0 +37,0 @@ enumerable: false,

@@ -7,3 +7,3 @@ "use strict";

function defaultVisit(ctx, param) {
var childrenNames = utils_1.keys(ctx);
var childrenNames = (0, utils_1.keys)(ctx);
var childrenNamesLength = childrenNames.length;

@@ -31,7 +31,7 @@ for (var i = 0; i < childrenNamesLength; i++) {

// name?redirectlocale=en-US&redirectslug=JavaScript%2FReference%2FGlobal_Objects%2FFunction%2Fname
lang_extensions_1.defineNameProp(derivedConstructor, grammarName + "BaseSemantics");
(0, lang_extensions_1.defineNameProp)(derivedConstructor, grammarName + "BaseSemantics");
var semanticProto = {
visit: function (cstNode, param) {
// enables writing more concise visitor methods when CstNode has only a single child
if (utils_1.isArray(cstNode)) {
if ((0, utils_1.isArray)(cstNode)) {
// A CST Node's children dictionary can never have empty arrays as values

@@ -42,3 +42,3 @@ // If a key is defined there will be at least one element in the corresponding value array.

// enables passing optional CstNodes concisely.
if (utils_1.isUndefined(cstNode)) {
if ((0, utils_1.isUndefined)(cstNode)) {
return undefined;

@@ -50,5 +50,5 @@ }

var semanticDefinitionErrors = validateVisitor(this, ruleNames);
if (!utils_1.isEmpty(semanticDefinitionErrors)) {
var errorMessages = utils_1.map(semanticDefinitionErrors, function (currDefError) { return currDefError.msg; });
throw Error("Errors Detected in CST Visitor <" + lang_extensions_1.functionName(this.constructor) + ">:\n\t" + ("" + errorMessages.join("\n\n").replace(/\n/g, "\n\t")));
if (!(0, utils_1.isEmpty)(semanticDefinitionErrors)) {
var errorMessages = (0, utils_1.map)(semanticDefinitionErrors, function (currDefError) { return currDefError.msg; });
throw Error("Errors Detected in CST Visitor <" + (0, lang_extensions_1.functionName)(this.constructor) + ">:\n\t" + ("" + errorMessages.join("\n\n").replace(/\n/g, "\n\t")));
}

@@ -68,5 +68,5 @@ }

// name?redirectlocale=en-US&redirectslug=JavaScript%2FReference%2FGlobal_Objects%2FFunction%2Fname
lang_extensions_1.defineNameProp(derivedConstructor, grammarName + "BaseSemanticsWithDefaults");
(0, lang_extensions_1.defineNameProp)(derivedConstructor, grammarName + "BaseSemanticsWithDefaults");
var withDefaultsProto = Object.create(baseConstructor.prototype);
utils_1.forEach(ruleNames, function (ruleName) {
(0, utils_1.forEach)(ruleNames, function (ruleName) {
withDefaultsProto[ruleName] = defaultVisit;

@@ -91,6 +91,6 @@ });

function validateMissingCstMethods(visitorInstance, ruleNames) {
var errors = utils_1.map(ruleNames, function (currRuleName) {
if (!utils_1.isFunction(visitorInstance[currRuleName])) {
var errors = (0, utils_1.map)(ruleNames, function (currRuleName) {
if (!(0, utils_1.isFunction)(visitorInstance[currRuleName])) {
return {
msg: "Missing visitor method: <" + currRuleName + "> on " + lang_extensions_1.functionName(visitorInstance.constructor) + " CST Visitor.",
msg: "Missing visitor method: <" + currRuleName + "> on " + (0, lang_extensions_1.functionName)(visitorInstance.constructor) + " CST Visitor.",
type: CstVisitorDefinitionError.MISSING_METHOD,

@@ -101,3 +101,3 @@ methodName: currRuleName

});
return utils_1.compact(errors);
return (0, utils_1.compact)(errors);
}

@@ -109,7 +109,7 @@ exports.validateMissingCstMethods = validateMissingCstMethods;

for (var prop in visitorInstance) {
if (utils_1.isFunction(visitorInstance[prop]) &&
!utils_1.contains(VALID_PROP_NAMES, prop) &&
!utils_1.contains(ruleNames, prop)) {
if ((0, utils_1.isFunction)(visitorInstance[prop]) &&
!(0, utils_1.contains)(VALID_PROP_NAMES, prop) &&
!(0, utils_1.contains)(ruleNames, prop)) {
errors.push({
msg: "Redundant visitor method: <" + prop + "> on " + lang_extensions_1.functionName(visitorInstance.constructor) + " CST Visitor\n" +
msg: "Redundant visitor method: <" + prop + "> on " + (0, lang_extensions_1.functionName)(visitorInstance.constructor) + " CST Visitor\n" +
"There is no Grammar Rule corresponding to this method's name.\n",

@@ -116,0 +116,0 @@ type: CstVisitorDefinitionError.REDUNDANT_METHOD,

@@ -12,5 +12,5 @@ "use strict";

var expected = _a.expected, actual = _a.actual, previous = _a.previous, ruleName = _a.ruleName;
var hasLabel = tokens_public_1.hasTokenLabel(expected);
var hasLabel = (0, tokens_public_1.hasTokenLabel)(expected);
var expectedMsg = hasLabel
? "--> " + tokens_public_1.tokenLabel(expected) + " <--"
? "--> " + (0, tokens_public_1.tokenLabel)(expected) + " <--"
: "token of type --> " + expected.name + " <--";

@@ -28,3 +28,3 @@ var msg = "Expecting " + expectedMsg + " but found --> '" + actual.image + "' <--";

// TODO: issue: No Viable Alternative Error may have incomplete details. #502
var actualText = utils_1.first(actual).image;
var actualText = (0, utils_1.first)(actual).image;
var errSuffix = "\nbut found: '" + actualText + "'";

@@ -35,7 +35,7 @@ if (customUserDescription) {

else {
var allLookAheadPaths = utils_1.reduce(expectedPathsPerAlt, function (result, currAltPaths) { return result.concat(currAltPaths); }, []);
var nextValidTokenSequences = utils_1.map(allLookAheadPaths, function (currPath) {
return "[" + utils_1.map(currPath, function (currTokenType) { return tokens_public_1.tokenLabel(currTokenType); }).join(", ") + "]";
var allLookAheadPaths = (0, utils_1.reduce)(expectedPathsPerAlt, function (result, currAltPaths) { return result.concat(currAltPaths); }, []);
var nextValidTokenSequences = (0, utils_1.map)(allLookAheadPaths, function (currPath) {
return "[" + (0, utils_1.map)(currPath, function (currTokenType) { return (0, tokens_public_1.tokenLabel)(currTokenType); }).join(", ") + "]";
});
var nextValidSequenceItems = utils_1.map(nextValidTokenSequences, function (itemMsg, idx) { return " " + (idx + 1) + ". " + itemMsg; });
var nextValidSequenceItems = (0, utils_1.map)(nextValidTokenSequences, function (itemMsg, idx) { return " " + (idx + 1) + ". " + itemMsg; });
var calculatedDescription = "one of these possible Token sequences:\n" + nextValidSequenceItems.join("\n");

@@ -49,3 +49,3 @@ return errPrefix + calculatedDescription + errSuffix;

// TODO: issue: No Viable Alternative Error may have incomplete details. #502
var actualText = utils_1.first(actual).image;
var actualText = (0, utils_1.first)(actual).image;
var errSuffix = "\nbut found: '" + actualText + "'";

@@ -56,4 +56,4 @@ if (customUserDescription) {

else {
var nextValidTokenSequences = utils_1.map(expectedIterationPaths, function (currPath) {
return "[" + utils_1.map(currPath, function (currTokenType) { return tokens_public_1.tokenLabel(currTokenType); }).join(",") + "]";
var nextValidTokenSequences = (0, utils_1.map)(expectedIterationPaths, function (currPath) {
return "[" + (0, utils_1.map)(currPath, function (currTokenType) { return (0, tokens_public_1.tokenLabel)(currTokenType); }).join(",") + "]";
});

@@ -92,5 +92,5 @@ var calculatedDescription = "expecting at least one iteration which starts with one of these possible Token sequences::\n " +

var topLevelName = topLevelRule.name;
var duplicateProd = utils_1.first(duplicateProds);
var duplicateProd = (0, utils_1.first)(duplicateProds);
var index = duplicateProd.idx;
var dslName = gast_1.getProductionDslName(duplicateProd);
var dslName = (0, gast_1.getProductionDslName)(duplicateProd);
var extraArgument = getExtraProductionArgument(duplicateProd);

@@ -113,4 +113,4 @@ var hasExplicitIndex = index > 0;

buildAlternationPrefixAmbiguityError: function (options) {
var pathMsg = utils_1.map(options.prefixPath, function (currTok) {
return tokens_public_1.tokenLabel(currTok);
var pathMsg = (0, utils_1.map)(options.prefixPath, function (currTok) {
return (0, tokens_public_1.tokenLabel)(currTok);
}).join(", ");

@@ -126,4 +126,4 @@ var occurrence = options.alternation.idx === 0 ? "" : options.alternation.idx;

buildAlternationAmbiguityError: function (options) {
var pathMsg = utils_1.map(options.prefixPath, function (currtok) {
return tokens_public_1.tokenLabel(currtok);
var pathMsg = (0, utils_1.map)(options.prefixPath, function (currtok) {
return (0, tokens_public_1.tokenLabel)(currtok);
}).join(", ");

@@ -141,3 +141,3 @@ var occurrence = options.alternation.idx === 0 ? "" : options.alternation.idx;

buildEmptyRepetitionError: function (options) {
var dslName = gast_1.getProductionDslName(options.repetition);
var dslName = (0, gast_1.getProductionDslName)(options.repetition);
if (options.repetition.idx !== 0) {

@@ -144,0 +144,0 @@ dslName += options.repetition.idx;

@@ -34,3 +34,3 @@ "use strict";

// can't do instanceof on hacked custom js exceptions
return utils_1.contains(RECOGNITION_EXCEPTION_NAMES, error.name);
return (0, utils_1.contains)(RECOGNITION_EXCEPTION_NAMES, error.name);
}

@@ -37,0 +37,0 @@ exports.isRecognitionException = isRecognitionException;

@@ -39,7 +39,7 @@ "use strict";

// It is safest to first have the user fix the left recursion errors first and only then examine Further issues.
if (utils_1.every(leftRecursionErrors, utils_1.isEmpty)) {
emptyAltErrors = utils_1.map(topLevels, function (currTopRule) {
if ((0, utils_1.every)(leftRecursionErrors, utils_1.isEmpty)) {
emptyAltErrors = (0, utils_1.map)(topLevels, function (currTopRule) {
return validateEmptyOrAlternative(currTopRule, errMsgProvider);
});
ambiguousAltsErrors = utils_1.map(topLevels, function (currTopRule) {
ambiguousAltsErrors = (0, utils_1.map)(topLevels, function (currTopRule) {
return validateAmbiguousAlternationAlternatives(currTopRule, globalMaxLookahead, errMsgProvider);

@@ -50,6 +50,6 @@ });

var termsNamespaceConflictErrors = checkTerminalAndNoneTerminalsNameSpace(topLevels, tokenTypes, errMsgProvider);
var tooManyAltsErrors = utils_1.map(topLevels, function (curRule) {
var tooManyAltsErrors = (0, utils_1.map)(topLevels, function (curRule) {
return validateTooManyAlts(curRule, errMsgProvider);
});
var duplicateRulesError = utils_1.map(topLevels, function (curRule) {
var duplicateRulesError = (0, utils_1.map)(topLevels, function (curRule) {
return validateRuleDoesNotAlreadyExist(curRule, topLevels, grammarName, errMsgProvider);

@@ -71,3 +71,3 @@ });

var msg = errMsgProvider.buildDuplicateFoundError(topLevelRule, currDuplicates);
var dslName = gast_1.getProductionDslName(firstProd);
var dslName = (0, gast_1.getProductionDslName)(firstProd);
var defError = {

@@ -89,3 +89,3 @@ message: msg,

function identifyProductionForDuplicates(prod) {
return gast_1.getProductionDslName(prod) + "_#_" + prod.idx + "_#_" + getExtraProductionArgument(prod);
return (0, gast_1.getProductionDslName)(prod) + "_#_" + prod.idx + "_#_" + getExtraProductionArgument(prod);
}

@@ -140,3 +140,3 @@ exports.identifyProductionForDuplicates = identifyProductionForDuplicates;

var errors = [];
var occurrences = utils_1.reduce(allRules, function (result, curRule) {
var occurrences = (0, utils_1.reduce)(allRules, function (result, curRule) {
if (curRule.name === rule.name) {

@@ -242,3 +242,3 @@ return result + 1;

}
var isFirstOptional = gast_1.isOptionalProd(firstProd);
var isFirstOptional = (0, gast_1.isOptionalProd)(firstProd);
var hasMore = definition.length > 1;

@@ -273,3 +273,3 @@ if (isFirstOptional && hasMore) {

var currErrors = utils.map(exceptLast, function (currAlternative, currAltIdx) {
var possibleFirstInAlt = interpreter_1.nextPossibleTokensAfter([currAlternative], [], null, 1);
var possibleFirstInAlt = (0, interpreter_1.nextPossibleTokensAfter)([currAlternative], [], null, 1);
if (utils.isEmpty(possibleFirstInAlt)) {

@@ -303,7 +303,7 @@ return {

// - https://github.com/chevrotain/chevrotain/issues/869
ors = utils_1.reject(ors, function (currOr) { return currOr.ignoreAmbiguities === true; });
ors = (0, utils_1.reject)(ors, function (currOr) { return currOr.ignoreAmbiguities === true; });
var errors = utils.reduce(ors, function (result, currOr) {
var currOccurrence = currOr.idx;
var actualMaxLookahead = currOr.maxLookahead || globalMaxLookahead;
var alternatives = lookahead_1.getLookaheadPathsForOr(currOccurrence, topLevelRule, actualMaxLookahead, currOr);
var alternatives = (0, lookahead_1.getLookaheadPathsForOr)(currOccurrence, topLevelRule, actualMaxLookahead, currOr);
var altsAmbiguityErrors = checkAlternativesAmbiguities(alternatives, currOr, topLevelRule, errMsgProvider);

@@ -361,13 +361,13 @@ var altsPrefixAmbiguityErrors = checkPrefixAlternativesAmbiguities(alternatives, currOr, topLevelRule, errMsgProvider);

var errors = [];
utils_1.forEach(topLevelRules, function (currTopRule) {
(0, utils_1.forEach)(topLevelRules, function (currTopRule) {
var collectorVisitor = new RepetionCollector();
currTopRule.accept(collectorVisitor);
var allRuleProductions = collectorVisitor.allProductions;
utils_1.forEach(allRuleProductions, function (currProd) {
var prodType = lookahead_1.getProdType(currProd);
(0, utils_1.forEach)(allRuleProductions, function (currProd) {
var prodType = (0, lookahead_1.getProdType)(currProd);
var actualMaxLookahead = currProd.maxLookahead || maxLookahead;
var currOccurrence = currProd.idx;
var paths = lookahead_1.getLookaheadPathsForOptionalProd(currOccurrence, currTopRule, prodType, actualMaxLookahead);
var paths = (0, lookahead_1.getLookaheadPathsForOptionalProd)(currOccurrence, currTopRule, prodType, actualMaxLookahead);
var pathsInsideProduction = paths[0];
if (utils_1.isEmpty(utils_1.flatten(pathsInsideProduction))) {
if ((0, utils_1.isEmpty)((0, utils_1.flatten)(pathsInsideProduction))) {
var errMsg = errMsgProvider.buildEmptyRepetitionError({

@@ -390,3 +390,3 @@ topLevelRule: currTopRule,

var foundAmbiguousPaths = [];
var identicalAmbiguities = utils_1.reduce(alternatives, function (result, currAlt, currAltIdx) {
var identicalAmbiguities = (0, utils_1.reduce)(alternatives, function (result, currAlt, currAltIdx) {
// ignore (skip) ambiguities with this alternative

@@ -396,7 +396,7 @@ if (alternation.definition[currAltIdx].ignoreAmbiguities === true) {

}
utils_1.forEach(currAlt, function (currPath) {
(0, utils_1.forEach)(currAlt, function (currPath) {
var altsCurrPathAppearsIn = [currAltIdx];
utils_1.forEach(alternatives, function (currOtherAlt, currOtherAltIdx) {
(0, utils_1.forEach)(alternatives, function (currOtherAlt, currOtherAltIdx) {
if (currAltIdx !== currOtherAltIdx &&
lookahead_1.containsPath(currOtherAlt, currPath) &&
(0, lookahead_1.containsPath)(currOtherAlt, currPath) &&
// ignore (skip) ambiguities with this "other" alternative

@@ -408,3 +408,3 @@ alternation.definition[currOtherAltIdx].ignoreAmbiguities !== true) {

if (altsCurrPathAppearsIn.length > 1 &&
!lookahead_1.containsPath(foundAmbiguousPaths, currPath)) {
!(0, lookahead_1.containsPath)(foundAmbiguousPaths, currPath)) {
foundAmbiguousPaths.push(currPath);

@@ -420,3 +420,3 @@ result.push({

var currErrors = utils.map(identicalAmbiguities, function (currAmbDescriptor) {
var ambgIndices = utils_1.map(currAmbDescriptor.alts, function (currAltIdx) { return currAltIdx + 1; });
var ambgIndices = (0, utils_1.map)(currAmbDescriptor.alts, function (currAltIdx) { return currAltIdx + 1; });
var currMessage = errMsgProvider.buildAlternationAmbiguityError({

@@ -441,4 +441,4 @@ topLevelRule: rule,

// flatten
var pathsAndIndices = utils_1.reduce(alternatives, function (result, currAlt, idx) {
var currPathsAndIdx = utils_1.map(currAlt, function (currPath) {
var pathsAndIndices = (0, utils_1.reduce)(alternatives, function (result, currAlt, idx) {
var currPathsAndIdx = (0, utils_1.map)(currAlt, function (currPath) {
return { idx: idx, path: currPath };

@@ -448,3 +448,3 @@ });

}, []);
utils_1.forEach(pathsAndIndices, function (currPathAndIdx) {
(0, utils_1.forEach)(pathsAndIndices, function (currPathAndIdx) {
var alternativeGast = alternation.definition[currPathAndIdx.idx];

@@ -457,3 +457,3 @@ // ignore (skip) ambiguities with this alternative

var targetPath = currPathAndIdx.path;
var prefixAmbiguitiesPathsAndIndices = utils_1.findAll(pathsAndIndices, function (searchPathAndIdx) {
var prefixAmbiguitiesPathsAndIndices = (0, utils_1.findAll)(pathsAndIndices, function (searchPathAndIdx) {
// prefix ambiguity can only be created from lower idx (higher priority) path

@@ -467,5 +467,5 @@ return (

// will be be detected using a different validation.
lookahead_1.isStrictPrefixOfPath(searchPathAndIdx.path, targetPath));
(0, lookahead_1.isStrictPrefixOfPath)(searchPathAndIdx.path, targetPath));
});
var currPathPrefixErrors = utils_1.map(prefixAmbiguitiesPathsAndIndices, function (currAmbPathAndIdx) {
var currPathPrefixErrors = (0, utils_1.map)(prefixAmbiguitiesPathsAndIndices, function (currAmbPathAndIdx) {
var ambgIndices = [currAmbPathAndIdx.idx + 1, targetIdx + 1];

@@ -494,6 +494,6 @@ var occurrence = alternation.idx === 0 ? "" : alternation.idx;

var errors = [];
var tokenNames = utils_1.map(tokenTypes, function (currToken) { return currToken.name; });
utils_1.forEach(topLevels, function (currRule) {
var tokenNames = (0, utils_1.map)(tokenTypes, function (currToken) { return currToken.name; });
(0, utils_1.forEach)(topLevels, function (currRule) {
var currRuleName = currRule.name;
if (utils_1.contains(tokenNames, currRuleName)) {
if ((0, utils_1.contains)(tokenNames, currRuleName)) {
var errMsg = errMsgProvider.buildNamespaceConflictError(currRule);

@@ -500,0 +500,0 @@ errors.push({

@@ -23,6 +23,6 @@ "use strict";

}
else if (gast_1.isSequenceProd(prod)) {
else if ((0, gast_1.isSequenceProd)(prod)) {
return firstForSequence(prod);
}
else if (gast_1.isBranchingProd(prod)) {
else if ((0, gast_1.isBranchingProd)(prod)) {
return firstForBranching(prod);

@@ -46,3 +46,3 @@ }

currSubProd = seq[nextSubProdIdx];
isLastInnerProdOptional = gast_1.isOptionalProd(currSubProd);
isLastInnerProdOptional = (0, gast_1.isOptionalProd)(currSubProd);
firstSet = firstSet.concat(first(currSubProd));

@@ -52,10 +52,10 @@ nextSubProdIdx = nextSubProdIdx + 1;

}
return utils_1.uniq(firstSet);
return (0, utils_1.uniq)(firstSet);
}
exports.firstForSequence = firstForSequence;
function firstForBranching(prod) {
var allAlternativesFirsts = utils_1.map(prod.definition, function (innerProd) {
var allAlternativesFirsts = (0, utils_1.map)(prod.definition, function (innerProd) {
return first(innerProd);
});
return utils_1.uniq(utils_1.flatten(allAlternativesFirsts));
return (0, utils_1.uniq)((0, utils_1.flatten)(allAlternativesFirsts));
}

@@ -62,0 +62,0 @@ exports.firstForBranching = firstForBranching;

@@ -46,3 +46,3 @@ "use strict";

var restProd = new gast_public_1.Alternative({ definition: fullRest });
var t_in_topProd_follows = first_1.first(restProd);
var t_in_topProd_follows = (0, first_1.first)(restProd);
this.follows[followName] = t_in_topProd_follows;

@@ -55,5 +55,5 @@ };

var reSyncFollows = {};
utils_1.forEach(topProductions, function (topProd) {
(0, utils_1.forEach)(topProductions, function (topProd) {
var currRefsFollow = new ResyncFollowsWalker(topProd).startWalking();
utils_1.assign(reSyncFollows, currRefsFollow);
(0, utils_1.assign)(reSyncFollows, currRefsFollow);
});

@@ -60,0 +60,0 @@ return reSyncFollows;

@@ -37,3 +37,3 @@ "use strict";

visitor.visit(this);
utils_1.forEach(this.definition, function (prod) {
(0, utils_1.forEach)(this.definition, function (prod) {
prod.accept(visitor);

@@ -50,3 +50,3 @@ });

_this.idx = 1;
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -79,3 +79,3 @@ }

_this.orgText = "";
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -91,3 +91,3 @@ }

_this.ignoreAmbiguities = false;
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -103,3 +103,3 @@ }

_this.idx = 1;
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -115,3 +115,3 @@ }

_this.idx = 1;
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -127,3 +127,3 @@ }

_this.idx = 1;
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -139,3 +139,3 @@ }

_this.idx = 1;
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -151,3 +151,3 @@ }

_this.idx = 1;
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -165,3 +165,3 @@ }

_this.hasPredicates = false;
utils_1.assign(_this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(_this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
return _this;

@@ -185,3 +185,3 @@ }

this.idx = 1;
utils_1.assign(this, utils_1.pick(options, function (v) { return v !== undefined; }));
(0, utils_1.assign)(this, (0, utils_1.pick)(options, function (v) { return v !== undefined; }));
}

@@ -195,3 +195,3 @@ Terminal.prototype.accept = function (visitor) {

function serializeGrammar(topRules) {
return utils_1.map(topRules, serializeProduction);
return (0, utils_1.map)(topRules, serializeProduction);
}

@@ -201,3 +201,3 @@ exports.serializeGrammar = serializeGrammar;

function convertDefinition(definition) {
return utils_1.map(definition, serializeProduction);
return (0, utils_1.map)(definition, serializeProduction);
}

@@ -211,3 +211,3 @@ /* istanbul ignore else */

};
if (utils_1.isString(node.label)) {
if ((0, utils_1.isString)(node.label)) {
serializedNonTerminal.label = node.label;

@@ -271,6 +271,6 @@ }

name: node.terminalType.name,
label: tokens_public_1.tokenLabel(node.terminalType),
label: (0, tokens_public_1.tokenLabel)(node.terminalType),
idx: node.idx
};
if (utils_1.isString(node.label)) {
if ((0, utils_1.isString)(node.label)) {
serializedTerminal.terminalLabel = node.label;

@@ -280,3 +280,3 @@ }

if (node.terminalType.PATTERN) {
serializedTerminal.pattern = utils_1.isRegExp(pattern)
serializedTerminal.pattern = (0, utils_1.isRegExp)(pattern)
? pattern.source

@@ -283,0 +283,0 @@ : pattern;

@@ -9,19 +9,19 @@ "use strict";

function resolveGrammar(options) {
options = utils_1.defaults(options, {
options = (0, utils_1.defaults)(options, {
errMsgProvider: errors_public_1.defaultGrammarResolverErrorProvider
});
var topRulesTable = {};
utils_1.forEach(options.rules, function (rule) {
(0, utils_1.forEach)(options.rules, function (rule) {
topRulesTable[rule.name] = rule;
});
return resolver_1.resolveGrammar(topRulesTable, options.errMsgProvider);
return (0, resolver_1.resolveGrammar)(topRulesTable, options.errMsgProvider);
}
exports.resolveGrammar = resolveGrammar;
function validateGrammar(options) {
options = utils_1.defaults(options, {
options = (0, utils_1.defaults)(options, {
errMsgProvider: errors_public_1.defaultGrammarValidatorErrorProvider
});
return checks_1.validateGrammar(options.rules, options.maxLookahead, options.tokenTypes, options.errMsgProvider, options.grammarName);
return (0, checks_1.validateGrammar)(options.rules, options.maxLookahead, options.tokenTypes, options.errMsgProvider, options.grammarName);
}
exports.validateGrammar = validateGrammar;
//# sourceMappingURL=gast_resolver_public.js.map

@@ -46,7 +46,7 @@ "use strict";

// for OR its enough for just one of the alternatives to be optional
return utils_1.some(prod.definition, function (subProd) {
return (0, utils_1.some)(prod.definition, function (subProd) {
return isOptionalProd(subProd, alreadyVisited);
});
}
else if (prod instanceof gast_public_1.NonTerminal && utils_1.contains(alreadyVisited, prod)) {
else if (prod instanceof gast_public_1.NonTerminal && (0, utils_1.contains)(alreadyVisited, prod)) {
// avoiding stack overflow due to infinite recursion

@@ -59,3 +59,3 @@ return false;

}
return utils_1.every(prod.definition, function (subProd) {
return (0, utils_1.every)(prod.definition, function (subProd) {
return isOptionalProd(subProd, alreadyVisited);

@@ -132,3 +132,3 @@ });

var key = terminal.terminalType.name + this.separator + "Terminal";
if (!utils_1.has(this.dslMethods, key)) {
if (!(0, utils_1.has)(this.dslMethods, key)) {
this.dslMethods[key] = [];

@@ -140,3 +140,3 @@ }

var key = subrule.nonTerminalName + this.separator + "Terminal";
if (!utils_1.has(this.dslMethods, key)) {
if (!(0, utils_1.has)(this.dslMethods, key)) {
this.dslMethods[key] = [];

@@ -143,0 +143,0 @@ }

@@ -42,4 +42,4 @@ "use strict";

// immutable for the win
this.ruleStack = utils_1.cloneArr(this.path.ruleStack).reverse(); // intelij bug requires assertion
this.occurrenceStack = utils_1.cloneArr(this.path.occurrenceStack).reverse(); // intelij bug requires assertion
this.ruleStack = (0, utils_1.cloneArr)(this.path.ruleStack).reverse(); // intelij bug requires assertion
this.occurrenceStack = (0, utils_1.cloneArr)(this.path.occurrenceStack).reverse(); // intelij bug requires assertion
// already verified that the first production is valid, we now seek the 2nd production

@@ -70,3 +70,3 @@ this.ruleStack.pop();

// need to consume the Terminal
if (utils_1.isEmpty(this.ruleStack)) {
if ((0, utils_1.isEmpty)(this.ruleStack)) {
// must reset nextProductionXXX to avoid walking down another Top Level production while what we are

@@ -104,3 +104,3 @@ // really seeking is the last Terminal...

var restProd = new gast_public_1.Alternative({ definition: fullRest });
this.possibleTokTypes = first_1.first(restProd);
this.possibleTokTypes = (0, first_1.first)(restProd);
this.found = true;

@@ -143,3 +143,3 @@ }

if (manyProd.idx === this.occurrence) {
var firstAfterMany = utils_1.first(currRest.concat(prevRest));
var firstAfterMany = (0, utils_1.first)(currRest.concat(prevRest));
this.result.isEndOfRule = firstAfterMany === undefined;

@@ -165,3 +165,3 @@ if (firstAfterMany instanceof gast_public_1.Terminal) {

if (manySepProd.idx === this.occurrence) {
var firstAfterManySep = utils_1.first(currRest.concat(prevRest));
var firstAfterManySep = (0, utils_1.first)(currRest.concat(prevRest));
this.result.isEndOfRule = firstAfterManySep === undefined;

@@ -187,3 +187,3 @@ if (firstAfterManySep instanceof gast_public_1.Terminal) {

if (atLeastOneProd.idx === this.occurrence) {
var firstAfterAtLeastOne = utils_1.first(currRest.concat(prevRest));
var firstAfterAtLeastOne = (0, utils_1.first)(currRest.concat(prevRest));
this.result.isEndOfRule = firstAfterAtLeastOne === undefined;

@@ -210,3 +210,3 @@ if (firstAfterAtLeastOne instanceof gast_public_1.Terminal) {

if (atleastOneSepProd.idx === this.occurrence) {
var firstAfterfirstAfterAtLeastOneSep = utils_1.first(currRest.concat(prevRest));
var firstAfterfirstAfterAtLeastOneSep = (0, utils_1.first)(currRest.concat(prevRest));
this.result.isEndOfRule = firstAfterfirstAfterAtLeastOneSep === undefined;

@@ -228,3 +228,3 @@ if (firstAfterfirstAfterAtLeastOneSep instanceof gast_public_1.Terminal) {

// avoid side effects
currPath = utils_1.cloneArr(currPath);
currPath = (0, utils_1.cloneArr)(currPath);
var result = [];

@@ -234,3 +234,3 @@ var i = 0;

function remainingPathWith(nextDef) {
return nextDef.concat(utils_1.drop(targetDef, i + 1));
return nextDef.concat((0, utils_1.drop)(targetDef, i + 1));
}

@@ -295,7 +295,7 @@ // TODO: avoid inner funcs

else if (prod instanceof gast_public_1.Alternation) {
utils_1.forEach(prod.definition, function (currAlt) {
(0, utils_1.forEach)(prod.definition, function (currAlt) {
// TODO: this is a limited check for empty alternatives
// It would prevent a common case of infinite loops during parser initialization.
// However **in-directly** empty alternatives may still cause issues.
if (utils_1.isEmpty(currAlt.definition) === false) {
if ((0, utils_1.isEmpty)(currAlt.definition) === false) {
result = getAlternativesForProd(currAlt.definition);

@@ -316,3 +316,3 @@ }

partialPath: currPath,
suffixDef: utils_1.drop(targetDef, i)
suffixDef: (0, utils_1.drop)(targetDef, i)
});

@@ -338,3 +338,3 @@ return result;

});
while (!utils_1.isEmpty(possiblePaths)) {
while (!(0, utils_1.isEmpty)(possiblePaths)) {
var currPath = possiblePaths.pop();

@@ -344,3 +344,3 @@ // skip alternatives if no more results can be found (assuming deterministic grammar with fixed lookahead)

if (foundCompletePath &&
utils_1.last(possiblePaths).idx <= minimalAlternativesIndex) {
(0, utils_1.last)(possiblePaths).idx <= minimalAlternativesIndex) {
// remove irrelevant alternative

@@ -356,3 +356,3 @@ possiblePaths.pop();

// For Example: an empty path could exist in a valid grammar in the case of an EMPTY_ALT
if (utils_1.isEmpty(currDef)) {
if ((0, utils_1.isEmpty)(currDef)) {
continue;

@@ -365,5 +365,5 @@ }

idx: currIdx,
def: utils_1.drop(currDef),
ruleStack: utils_1.dropRight(currRuleStack),
occurrenceStack: utils_1.dropRight(currOccurrenceStack)
def: (0, utils_1.drop)(currDef),
ruleStack: (0, utils_1.dropRight)(currRuleStack),
occurrenceStack: (0, utils_1.dropRight)(currOccurrenceStack)
};

@@ -380,3 +380,3 @@ possiblePaths.push(nextPath);

idx: nextIdx,
def: utils_1.drop(currDef),
def: (0, utils_1.drop)(currDef),
ruleStack: currRuleStack,

@@ -404,9 +404,9 @@ occurrenceStack: currOccurrenceStack

else if (prod instanceof gast_public_1.NonTerminal) {
var newRuleStack = utils_1.cloneArr(currRuleStack);
var newRuleStack = (0, utils_1.cloneArr)(currRuleStack);
newRuleStack.push(prod.nonTerminalName);
var newOccurrenceStack = utils_1.cloneArr(currOccurrenceStack);
var newOccurrenceStack = (0, utils_1.cloneArr)(currOccurrenceStack);
newOccurrenceStack.push(prod.idx);
var nextPath = {
idx: currIdx,
def: prod.definition.concat(EXIT_NON_TERMINAL_ARR, utils_1.drop(currDef)),
def: prod.definition.concat(EXIT_NON_TERMINAL_ARR, (0, utils_1.drop)(currDef)),
ruleStack: newRuleStack,

@@ -421,3 +421,3 @@ occurrenceStack: newOccurrenceStack

idx: currIdx,
def: utils_1.drop(currDef),
def: (0, utils_1.drop)(currDef),
ruleStack: currRuleStack,

@@ -431,3 +431,3 @@ occurrenceStack: currOccurrenceStack

idx: currIdx,
def: prod.definition.concat(utils_1.drop(currDef)),
def: prod.definition.concat((0, utils_1.drop)(currDef)),
ruleStack: currRuleStack,

@@ -444,3 +444,3 @@ occurrenceStack: currOccurrenceStack

});
var nextDef = prod.definition.concat([secondIteration], utils_1.drop(currDef));
var nextDef = prod.definition.concat([secondIteration], (0, utils_1.drop)(currDef));
var nextPath = {

@@ -463,3 +463,3 @@ idx: currIdx,

});
var nextDef = prod.definition.concat([secondIteration], utils_1.drop(currDef));
var nextDef = prod.definition.concat([secondIteration], (0, utils_1.drop)(currDef));
var nextPath = {

@@ -477,3 +477,3 @@ idx: currIdx,

idx: currIdx,
def: utils_1.drop(currDef),
def: (0, utils_1.drop)(currDef),
ruleStack: currRuleStack,

@@ -492,3 +492,3 @@ occurrenceStack: currOccurrenceStack

});
var nextDef = prod.definition.concat([nthRepetition], utils_1.drop(currDef));
var nextDef = prod.definition.concat([nthRepetition], (0, utils_1.drop)(currDef));
var nextPathWith = {

@@ -506,3 +506,3 @@ idx: currIdx,

idx: currIdx,
def: utils_1.drop(currDef),
def: (0, utils_1.drop)(currDef),
ruleStack: currRuleStack,

@@ -519,3 +519,3 @@ occurrenceStack: currOccurrenceStack

});
var nextDef = prod.definition.concat([nthRepetition], utils_1.drop(currDef));
var nextDef = prod.definition.concat([nthRepetition], (0, utils_1.drop)(currDef));
var nextPathWith = {

@@ -535,3 +535,3 @@ idx: currIdx,

idx: currIdx,
def: currAlt.definition.concat(utils_1.drop(currDef)),
def: currAlt.definition.concat((0, utils_1.drop)(currDef)),
ruleStack: currRuleStack,

@@ -547,3 +547,3 @@ occurrenceStack: currOccurrenceStack

idx: currIdx,
def: prod.definition.concat(utils_1.drop(currDef)),
def: prod.definition.concat((0, utils_1.drop)(currDef)),
ruleStack: currRuleStack,

@@ -565,5 +565,5 @@ occurrenceStack: currOccurrenceStack

function expandTopLevelRule(topRule, currIdx, currRuleStack, currOccurrenceStack) {
var newRuleStack = utils_1.cloneArr(currRuleStack);
var newRuleStack = (0, utils_1.cloneArr)(currRuleStack);
newRuleStack.push(topRule.name);
var newCurrOccurrenceStack = utils_1.cloneArr(currOccurrenceStack);
var newCurrOccurrenceStack = (0, utils_1.cloneArr)(currOccurrenceStack);
// top rule is always assumed to have been called with occurrence index 1

@@ -570,0 +570,0 @@ newCurrOccurrenceStack.push(1);

@@ -89,4 +89,4 @@ "use strict";

var numOfAlts = alts.length;
var areAllOneTokenLookahead = utils_1.every(alts, function (currAlt) {
return utils_1.every(currAlt, function (currPath) {
var areAllOneTokenLookahead = (0, utils_1.every)(alts, function (currAlt) {
return (0, utils_1.every)(currAlt, function (currPath) {
return currPath.length === 1;

@@ -104,3 +104,3 @@ });

// note that in the common case of no predicates, no cpu time will be wasted on this (see else block)
var predicates = utils_1.map(orAlts, function (currAlt) { return currAlt.GATE; });
var predicates = (0, utils_1.map)(orAlts, function (currAlt) { return currAlt.GATE; });
for (var t = 0; t < numOfAlts; t++) {

@@ -139,12 +139,12 @@ var currAlt = alts[t];

// a single token lookahead. These Optimizations cannot work if dynamically defined Tokens are used.
var singleTokenAlts = utils_1.map(alts, function (currAlt) {
return utils_1.flatten(currAlt);
var singleTokenAlts = (0, utils_1.map)(alts, function (currAlt) {
return (0, utils_1.flatten)(currAlt);
});
var choiceToAlt_1 = utils_1.reduce(singleTokenAlts, function (result, currAlt, idx) {
utils_1.forEach(currAlt, function (currTokType) {
if (!utils_1.has(result, currTokType.tokenTypeIdx)) {
var choiceToAlt_1 = (0, utils_1.reduce)(singleTokenAlts, function (result, currAlt, idx) {
(0, utils_1.forEach)(currAlt, function (currTokType) {
if (!(0, utils_1.has)(result, currTokType.tokenTypeIdx)) {
result[currTokType.tokenTypeIdx] = idx;
}
utils_1.forEach(currTokType.categoryMatches, function (currExtendingType) {
if (!utils_1.has(result, currExtendingType)) {
(0, utils_1.forEach)(currTokType.categoryMatches, function (currExtendingType) {
if (!(0, utils_1.has)(result, currExtendingType)) {
result[currExtendingType] = idx;

@@ -199,3 +199,3 @@ }

function buildSingleAlternativeLookaheadFunction(alt, tokenMatcher, dynamicTokensEnabled) {
var areAllOneTokenLookahead = utils_1.every(alt, function (currPath) {
var areAllOneTokenLookahead = (0, utils_1.every)(alt, function (currPath) {
return currPath.length === 1;

@@ -207,5 +207,5 @@ });

if (areAllOneTokenLookahead && !dynamicTokensEnabled) {
var singleTokensTypes = utils_1.flatten(alt);
var singleTokensTypes = (0, utils_1.flatten)(alt);
if (singleTokensTypes.length === 1 &&
utils_1.isEmpty(singleTokensTypes[0].categoryMatches)) {
(0, utils_1.isEmpty)(singleTokensTypes[0].categoryMatches)) {
var expectedTokenType = singleTokensTypes[0];

@@ -218,5 +218,5 @@ var expectedTokenUniqueKey_1 = expectedTokenType.tokenTypeIdx;

else {
var choiceToAlt_2 = utils_1.reduce(singleTokensTypes, function (result, currTokType, idx) {
var choiceToAlt_2 = (0, utils_1.reduce)(singleTokensTypes, function (result, currTokType, idx) {
result[currTokType.tokenTypeIdx] = true;
utils_1.forEach(currTokType.categoryMatches, function (currExtendingType) {
(0, utils_1.forEach)(currTokType.categoryMatches, function (currExtendingType) {
result[currExtendingType] = true;

@@ -393,11 +393,11 @@ });

function lookAheadSequenceFromAlternatives(altsDefs, k) {
var partialAlts = utils_1.map(altsDefs, function (currAlt) {
return interpreter_1.possiblePathsFrom([currAlt], 1);
var partialAlts = (0, utils_1.map)(altsDefs, function (currAlt) {
return (0, interpreter_1.possiblePathsFrom)([currAlt], 1);
});
var finalResult = initializeArrayOfArrays(partialAlts.length);
var altsHashes = utils_1.map(partialAlts, function (currAltPaths) {
var altsHashes = (0, utils_1.map)(partialAlts, function (currAltPaths) {
var dict = {};
utils_1.forEach(currAltPaths, function (item) {
(0, utils_1.forEach)(currAltPaths, function (item) {
var keys = pathToHashKeys(item.partialPath);
utils_1.forEach(keys, function (currKey) {
(0, utils_1.forEach)(keys, function (currKey) {
dict[currKey] = true;

@@ -422,3 +422,3 @@ });

// End of the line for this path.
if (isUnique || utils_1.isEmpty(suffixDef) || currPathPrefix.length === k) {
if (isUnique || (0, utils_1.isEmpty)(suffixDef) || currPathPrefix.length === k) {
var currAltResult = finalResult[altIdx];

@@ -437,8 +437,8 @@ // TODO: Can we implement a containsPath using Maps/Dictionaries?

else {
var newPartialPathsAndSuffixes = interpreter_1.possiblePathsFrom(suffixDef, pathLength + 1, currPathPrefix);
var newPartialPathsAndSuffixes = (0, interpreter_1.possiblePathsFrom)(suffixDef, pathLength + 1, currPathPrefix);
newData[altIdx] = newData[altIdx].concat(newPartialPathsAndSuffixes);
// Update keys for new known paths
utils_1.forEach(newPartialPathsAndSuffixes, function (item) {
(0, utils_1.forEach)(newPartialPathsAndSuffixes, function (item) {
var prefixKeys = pathToHashKeys(item.partialPath);
utils_1.forEach(prefixKeys, function (key) {
(0, utils_1.forEach)(prefixKeys, function (key) {
altsHashes[altIdx][key] = true;

@@ -497,3 +497,3 @@ });

return (prefix.length < other.length &&
utils_1.every(prefix, function (tokType, idx) {
(0, utils_1.every)(prefix, function (tokType, idx) {
var otherTokType = other[idx];

@@ -506,5 +506,5 @@ return (tokType === otherTokType ||

function areTokenCategoriesNotUsed(lookAheadPaths) {
return utils_1.every(lookAheadPaths, function (singleAltPaths) {
return utils_1.every(singleAltPaths, function (singlePath) {
return utils_1.every(singlePath, function (token) { return utils_1.isEmpty(token.categoryMatches); });
return (0, utils_1.every)(lookAheadPaths, function (singleAltPaths) {
return (0, utils_1.every)(singleAltPaths, function (singlePath) {
return (0, utils_1.every)(singlePath, function (token) { return (0, utils_1.isEmpty)(token.categoryMatches); });
});

@@ -511,0 +511,0 @@ });

@@ -39,3 +39,3 @@ "use strict";

var _this = this;
utils_1.forEach(utils_1.values(this.nameToTopRule), function (prod) {
(0, utils_1.forEach)((0, utils_1.values)(this.nameToTopRule), function (prod) {
_this.currTopLevel = prod;

@@ -42,0 +42,0 @@ prod.accept(_this);

@@ -15,4 +15,4 @@ "use strict";

if (prevRest === void 0) { prevRest = []; }
utils_1.forEach(prod.definition, function (subProd, index) {
var currRest = utils_1.drop(prod.definition, index + 1);
(0, utils_1.forEach)(prod.definition, function (subProd, index) {
var currRest = (0, utils_1.drop)(prod.definition, index + 1);
/* istanbul ignore else */

@@ -92,3 +92,3 @@ if (subProd instanceof gast_public_1.NonTerminal) {

// walk all different alternatives
utils_1.forEach(orProd.definition, function (alt) {
(0, utils_1.forEach)(orProd.definition, function (alt) {
// wrapping each alternative in a single definition wrapper

@@ -95,0 +95,0 @@ // to avoid errors in computing the rest of that alternative in the invocation to computeInProdFollows

@@ -35,3 +35,3 @@ "use strict";

var apply_mixins_1 = require("./utils/apply_mixins");
exports.END_OF_FILE = tokens_public_1.createTokenInstance(tokens_public_1.EOF, "", NaN, NaN, NaN, NaN, NaN, NaN);
exports.END_OF_FILE = (0, tokens_public_1.createTokenInstance)(tokens_public_1.EOF, "", NaN, NaN, NaN, NaN, NaN, NaN);
Object.freeze(exports.END_OF_FILE);

@@ -89,3 +89,3 @@ exports.DEFAULT_PARSER_CONFIG = Object.freeze({

that.initPerformanceTracer(config);
if (utils_1.has(config, "ignoredIssues")) {
if ((0, utils_1.has)(config, "ignoredIssues")) {
throw new Error("The <ignoredIssues> IParserConfig property has been deprecated.\n\t" +

@@ -96,3 +96,3 @@ "Please use the <IGNORE_AMBIGUITIES> flag on the relevant DSL method instead.\n\t" +

}
this.skipValidations = utils_1.has(config, "skipValidations")
this.skipValidations = (0, utils_1.has)(config, "skipValidations")
? config.skipValidations

@@ -118,3 +118,3 @@ : exports.DEFAULT_PARSER_CONFIG.skipValidations;

// Any manipulations of the `this` object done during the recording phase.
utils_1.toFastProperties(_this);
(0, utils_1.toFastProperties)(_this);
});

@@ -125,3 +125,3 @@ _this.TRACE_INIT("Grammar Recording", function () {

// Building the GAST
utils_1.forEach(_this.definedRulesNames, function (currRuleName) {
(0, utils_1.forEach)(_this.definedRulesNames, function (currRuleName) {
var wrappedRule = _this[currRuleName];

@@ -142,4 +142,4 @@ var originalGrammarAction = wrappedRule["originalGrammarAction"];

_this.TRACE_INIT("Grammar Resolving", function () {
resolverErrors = gast_resolver_public_1.resolveGrammar({
rules: utils_1.values(_this.gastProductionsCache)
resolverErrors = (0, gast_resolver_public_1.resolveGrammar)({
rules: (0, utils_1.values)(_this.gastProductionsCache)
});

@@ -151,7 +151,7 @@ _this.definitionErrors = _this.definitionErrors.concat(resolverErrors);

// as unresolved grammar may lead to unhandled runtime exceptions in the follow up validations.
if (utils_1.isEmpty(resolverErrors) && _this.skipValidations === false) {
var validationErrors = gast_resolver_public_1.validateGrammar({
rules: utils_1.values(_this.gastProductionsCache),
if ((0, utils_1.isEmpty)(resolverErrors) && _this.skipValidations === false) {
var validationErrors = (0, gast_resolver_public_1.validateGrammar)({
rules: (0, utils_1.values)(_this.gastProductionsCache),
maxLookahead: _this.maxLookahead,
tokenTypes: utils_1.values(_this.tokensMap),
tokenTypes: (0, utils_1.values)(_this.tokensMap),
errMsgProvider: errors_public_1.defaultGrammarValidatorErrorProvider,

@@ -164,7 +164,7 @@ grammarName: className

// this analysis may fail if the grammar is not perfectly valid
if (utils_1.isEmpty(_this.definitionErrors)) {
if ((0, utils_1.isEmpty)(_this.definitionErrors)) {
// The results of these computations are not needed unless error recovery is enabled.
if (_this.recoveryEnabled) {
_this.TRACE_INIT("computeAllProdsFollows", function () {
var allFollows = follow_1.computeAllProdsFollows(utils_1.values(_this.gastProductionsCache));
var allFollows = (0, follow_1.computeAllProdsFollows)((0, utils_1.values)(_this.gastProductionsCache));
_this.resyncFollows = allFollows;

@@ -174,8 +174,8 @@ });

_this.TRACE_INIT("ComputeLookaheadFunctions", function () {
_this.preComputeLookaheadFunctions(utils_1.values(_this.gastProductionsCache));
_this.preComputeLookaheadFunctions((0, utils_1.values)(_this.gastProductionsCache));
});
}
if (!Parser.DEFER_DEFINITION_ERRORS_HANDLING &&
!utils_1.isEmpty(_this.definitionErrors)) {
defErrorsMsgs = utils_1.map(_this.definitionErrors, function (defError) { return defError.message; });
!(0, utils_1.isEmpty)(_this.definitionErrors)) {
defErrorsMsgs = (0, utils_1.map)(_this.definitionErrors, function (defError) { return defError.message; });
throw new Error("Parser Definition Errors detected:\n " + defErrorsMsgs.join("\n-------------------------------\n"));

@@ -195,3 +195,3 @@ }

exports.Parser = Parser;
apply_mixins_1.applyMixins(Parser, [
(0, apply_mixins_1.applyMixins)(Parser, [
recoverable_1.Recoverable,

@@ -213,3 +213,3 @@ looksahead_1.LooksAhead,

var _this = this;
var configClone = utils_1.cloneObj(config);
var configClone = (0, utils_1.cloneObj)(config);
configClone.outputCst = true;

@@ -227,3 +227,3 @@ _this = _super.call(this, tokenVocabulary, configClone) || this;

var _this = this;
var configClone = utils_1.cloneObj(config);
var configClone = (0, utils_1.cloneObj)(config);
configClone.outputCst = false;

@@ -230,0 +230,0 @@ _this = _super.call(this, tokenVocabulary, configClone) || this;

@@ -12,6 +12,6 @@ "use strict";

var startRuleGast = this.gastProductionsCache[startRuleName];
if (utils_1.isUndefined(startRuleGast)) {
if ((0, utils_1.isUndefined)(startRuleGast)) {
throw Error("Rule ->" + startRuleName + "<- does not exist in this grammar.");
}
return interpreter_1.nextPossibleTokensAfter([startRuleGast], precedingInput, this.tokenMatcher, this.maxLookahead);
return (0, interpreter_1.nextPossibleTokensAfter)([startRuleGast], precedingInput, this.tokenMatcher, this.maxLookahead);
};

@@ -21,3 +21,3 @@ // TODO: should this be a member method or a utility? it does not have any state or usage of 'this'...

ContentAssist.prototype.getNextPossibleTokenTypes = function (grammarPath) {
var topRuleName = utils_1.first(grammarPath.ruleStack);
var topRuleName = (0, utils_1.first)(grammarPath.ruleStack);
var gastProductions = this.getGAstProductions();

@@ -24,0 +24,0 @@ var topProduction = gastProductions[topRuleName];

@@ -16,3 +16,3 @@ "use strict";

this._errors = [];
this.errorMessageProvider = utils_1.has(config, "errorMessageProvider")
this.errorMessageProvider = (0, utils_1.has)(config, "errorMessageProvider")
? config.errorMessageProvider

@@ -22,6 +22,6 @@ : parser_1.DEFAULT_PARSER_CONFIG.errorMessageProvider;

ErrorHandler.prototype.SAVE_ERROR = function (error) {
if (exceptions_public_1.isRecognitionException(error)) {
if ((0, exceptions_public_1.isRecognitionException)(error)) {
error.context = {
ruleStack: this.getHumanReadableRuleStack(),
ruleOccurrenceStack: utils_1.cloneArr(this.RULE_OCCURRENCE_STACK)
ruleOccurrenceStack: (0, utils_1.cloneArr)(this.RULE_OCCURRENCE_STACK)
};

@@ -37,3 +37,3 @@ this._errors.push(error);

get: function () {
return utils_1.cloneArr(this._errors);
return (0, utils_1.cloneArr)(this._errors);
},

@@ -50,3 +50,3 @@ set: function (newErrors) {

var ruleGrammar = this.getGAstProductions()[ruleName];
var lookAheadPathsPerAlternative = lookahead_1.getLookaheadPathsForOptionalProd(occurrence, ruleGrammar, prodType, this.maxLookahead);
var lookAheadPathsPerAlternative = (0, lookahead_1.getLookaheadPathsForOptionalProd)(occurrence, ruleGrammar, prodType, this.maxLookahead);
var insideProdPaths = lookAheadPathsPerAlternative[0];

@@ -71,3 +71,3 @@ var actualTokens = [];

// TODO: getLookaheadPathsForOr can be slow for large enough maxLookahead and certain grammars, consider caching ?
var lookAheadPathsPerAlternative = lookahead_1.getLookaheadPathsForOr(occurrence, ruleGrammar, this.maxLookahead);
var lookAheadPathsPerAlternative = (0, lookahead_1.getLookaheadPathsForOr)(occurrence, ruleGrammar, this.maxLookahead);
var actualTokens = [];

@@ -74,0 +74,0 @@ for (var i = 1; i <= this.maxLookahead; i++) {

@@ -17,5 +17,5 @@ "use strict";

var MAX_METHOD_IDX = Math.pow(2, keys_1.BITS_FOR_OCCURRENCE_IDX) - 1;
var RFT = tokens_public_1.createToken({ name: "RECORDING_PHASE_TOKEN", pattern: lexer_public_1.Lexer.NA });
tokens_1.augmentTokenTypes([RFT]);
var RECORDING_PHASE_TOKEN = tokens_public_1.createTokenInstance(RFT, "This IToken indicates the Parser is in Recording Phase\n\t" +
var RFT = (0, tokens_public_1.createToken)({ name: "RECORDING_PHASE_TOKEN", pattern: lexer_public_1.Lexer.NA });
(0, tokens_1.augmentTokenTypes)([RFT]);
var RECORDING_PHASE_TOKEN = (0, tokens_public_1.createTokenInstance)(RFT, "This IToken indicates the Parser is in Recording Phase\n\t" +
"" +

@@ -203,3 +203,3 @@ "See: https://chevrotain.io/docs/guide/internals.html#grammar-recording for details",

assertMethodIdxIsValid(occurrence);
if (!ruleToCall || utils_1.has(ruleToCall, "ruleName") === false) {
if (!ruleToCall || (0, utils_1.has)(ruleToCall, "ruleName") === false) {
var error = new Error("<SUBRULE" + getIdxSuffix(occurrence) + "> argument is invalid" +

@@ -211,3 +211,3 @@ (" expecting a Parser method reference but got: <" + JSON.stringify(ruleToCall) + ">") +

}
var prevProd = utils_1.peek(this.recordingProdStack);
var prevProd = (0, utils_1.peek)(this.recordingProdStack);
var ruleName = ruleToCall["ruleName"];

@@ -226,3 +226,3 @@ var newNoneTerminal = new gast_public_1.NonTerminal({

assertMethodIdxIsValid(occurrence);
if (!tokens_1.hasShortKeyProperty(tokType)) {
if (!(0, tokens_1.hasShortKeyProperty)(tokType)) {
var error = new Error("<CONSUME" + getIdxSuffix(occurrence) + "> argument is invalid" +

@@ -234,3 +234,3 @@ (" expecting a TokenType reference but got: <" + JSON.stringify(tokType) + ">") +

}
var prevProd = utils_1.peek(this.recordingProdStack);
var prevProd = (0, utils_1.peek)(this.recordingProdStack);
var newNoneTerminal = new gast_public_1.Terminal({

@@ -250,4 +250,4 @@ idx: occurrence,

assertMethodIdxIsValid(occurrence);
var prevProd = utils_1.peek(this.recordingProdStack);
var grammarAction = utils_1.isFunction(mainProdArg) ? mainProdArg : mainProdArg.DEF;
var prevProd = (0, utils_1.peek)(this.recordingProdStack);
var grammarAction = (0, utils_1.isFunction)(mainProdArg) ? mainProdArg : mainProdArg.DEF;
var newProd = new prodConstructor({ definition: [], idx: occurrence });

@@ -257,3 +257,3 @@ if (handleSep) {

}
if (utils_1.has(mainProdArg, "MAX_LOOKAHEAD")) {
if ((0, utils_1.has)(mainProdArg, "MAX_LOOKAHEAD")) {
newProd.maxLookahead = mainProdArg.MAX_LOOKAHEAD;

@@ -270,5 +270,5 @@ }

assertMethodIdxIsValid(occurrence);
var prevProd = utils_1.peek(this.recordingProdStack);
var prevProd = (0, utils_1.peek)(this.recordingProdStack);
// Only an array of alternatives
var hasOptions = utils_1.isArray(mainProdArg) === false;
var hasOptions = (0, utils_1.isArray)(mainProdArg) === false;
var alts = hasOptions === false ? mainProdArg : mainProdArg.DEF;

@@ -280,16 +280,16 @@ var newOrProd = new gast_public_1.Alternation({

});
if (utils_1.has(mainProdArg, "MAX_LOOKAHEAD")) {
if ((0, utils_1.has)(mainProdArg, "MAX_LOOKAHEAD")) {
newOrProd.maxLookahead = mainProdArg.MAX_LOOKAHEAD;
}
var hasPredicates = utils_1.some(alts, function (currAlt) { return utils_1.isFunction(currAlt.GATE); });
var hasPredicates = (0, utils_1.some)(alts, function (currAlt) { return (0, utils_1.isFunction)(currAlt.GATE); });
newOrProd.hasPredicates = hasPredicates;
prevProd.definition.push(newOrProd);
utils_1.forEach(alts, function (currAlt) {
(0, utils_1.forEach)(alts, function (currAlt) {
var currAltFlat = new gast_public_1.Alternative({ definition: [] });
newOrProd.definition.push(currAltFlat);
if (utils_1.has(currAlt, "IGNORE_AMBIGUITIES")) {
if ((0, utils_1.has)(currAlt, "IGNORE_AMBIGUITIES")) {
currAltFlat.ignoreAmbiguities = currAlt.IGNORE_AMBIGUITIES;
}
// **implicit** ignoreAmbiguities due to usage of gate
else if (utils_1.has(currAlt, "GATE")) {
else if ((0, utils_1.has)(currAlt, "GATE")) {
currAltFlat.ignoreAmbiguities = true;

@@ -296,0 +296,0 @@ }

@@ -16,14 +16,14 @@ "use strict";

LooksAhead.prototype.initLooksAhead = function (config) {
this.dynamicTokensEnabled = utils_1.has(config, "dynamicTokensEnabled")
this.dynamicTokensEnabled = (0, utils_1.has)(config, "dynamicTokensEnabled")
? config.dynamicTokensEnabled
: parser_1.DEFAULT_PARSER_CONFIG.dynamicTokensEnabled;
this.maxLookahead = utils_1.has(config, "maxLookahead")
this.maxLookahead = (0, utils_1.has)(config, "maxLookahead")
? config.maxLookahead
: parser_1.DEFAULT_PARSER_CONFIG.maxLookahead;
/* istanbul ignore next - Using plain array as dictionary will be tested on older node.js versions and IE11 */
this.lookAheadFuncsCache = utils_1.isES2015MapSupported() ? new Map() : [];
this.lookAheadFuncsCache = (0, utils_1.isES2015MapSupported)() ? new Map() : [];
// Performance optimization on newer engines that support ES6 Map
// For larger Maps this is slightly faster than using a plain object (array in our case).
/* istanbul ignore else - The else branch will be tested on older node.js versions and IE11 */
if (utils_1.isES2015MapSupported()) {
if ((0, utils_1.isES2015MapSupported)()) {
this.getLaFuncFromCache = this.getLaFuncFromMap;

@@ -39,27 +39,27 @@ this.setLaFuncCache = this.setLaFuncCacheUsingMap;

var _this = this;
utils_1.forEach(rules, function (currRule) {
(0, utils_1.forEach)(rules, function (currRule) {
_this.TRACE_INIT(currRule.name + " Rule Lookahead", function () {
var _a = gast_1.collectMethods(currRule), alternation = _a.alternation, repetition = _a.repetition, option = _a.option, repetitionMandatory = _a.repetitionMandatory, repetitionMandatoryWithSeparator = _a.repetitionMandatoryWithSeparator, repetitionWithSeparator = _a.repetitionWithSeparator;
utils_1.forEach(alternation, function (currProd) {
var _a = (0, gast_1.collectMethods)(currRule), alternation = _a.alternation, repetition = _a.repetition, option = _a.option, repetitionMandatory = _a.repetitionMandatory, repetitionMandatoryWithSeparator = _a.repetitionMandatoryWithSeparator, repetitionWithSeparator = _a.repetitionWithSeparator;
(0, utils_1.forEach)(alternation, function (currProd) {
var prodIdx = currProd.idx === 0 ? "" : currProd.idx;
_this.TRACE_INIT("" + gast_1.getProductionDslName(currProd) + prodIdx, function () {
var laFunc = lookahead_1.buildLookaheadFuncForOr(currProd.idx, currRule, currProd.maxLookahead || _this.maxLookahead, currProd.hasPredicates, _this.dynamicTokensEnabled, _this.lookAheadBuilderForAlternatives);
var key = keys_1.getKeyForAutomaticLookahead(_this.fullRuleNameToShort[currRule.name], keys_1.OR_IDX, currProd.idx);
_this.TRACE_INIT("" + (0, gast_1.getProductionDslName)(currProd) + prodIdx, function () {
var laFunc = (0, lookahead_1.buildLookaheadFuncForOr)(currProd.idx, currRule, currProd.maxLookahead || _this.maxLookahead, currProd.hasPredicates, _this.dynamicTokensEnabled, _this.lookAheadBuilderForAlternatives);
var key = (0, keys_1.getKeyForAutomaticLookahead)(_this.fullRuleNameToShort[currRule.name], keys_1.OR_IDX, currProd.idx);
_this.setLaFuncCache(key, laFunc);
});
});
utils_1.forEach(repetition, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.MANY_IDX, lookahead_1.PROD_TYPE.REPETITION, currProd.maxLookahead, gast_1.getProductionDslName(currProd));
(0, utils_1.forEach)(repetition, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.MANY_IDX, lookahead_1.PROD_TYPE.REPETITION, currProd.maxLookahead, (0, gast_1.getProductionDslName)(currProd));
});
utils_1.forEach(option, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.OPTION_IDX, lookahead_1.PROD_TYPE.OPTION, currProd.maxLookahead, gast_1.getProductionDslName(currProd));
(0, utils_1.forEach)(option, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.OPTION_IDX, lookahead_1.PROD_TYPE.OPTION, currProd.maxLookahead, (0, gast_1.getProductionDslName)(currProd));
});
utils_1.forEach(repetitionMandatory, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.AT_LEAST_ONE_IDX, lookahead_1.PROD_TYPE.REPETITION_MANDATORY, currProd.maxLookahead, gast_1.getProductionDslName(currProd));
(0, utils_1.forEach)(repetitionMandatory, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.AT_LEAST_ONE_IDX, lookahead_1.PROD_TYPE.REPETITION_MANDATORY, currProd.maxLookahead, (0, gast_1.getProductionDslName)(currProd));
});
utils_1.forEach(repetitionMandatoryWithSeparator, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.AT_LEAST_ONE_SEP_IDX, lookahead_1.PROD_TYPE.REPETITION_MANDATORY_WITH_SEPARATOR, currProd.maxLookahead, gast_1.getProductionDslName(currProd));
(0, utils_1.forEach)(repetitionMandatoryWithSeparator, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.AT_LEAST_ONE_SEP_IDX, lookahead_1.PROD_TYPE.REPETITION_MANDATORY_WITH_SEPARATOR, currProd.maxLookahead, (0, gast_1.getProductionDslName)(currProd));
});
utils_1.forEach(repetitionWithSeparator, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.MANY_SEP_IDX, lookahead_1.PROD_TYPE.REPETITION_WITH_SEPARATOR, currProd.maxLookahead, gast_1.getProductionDslName(currProd));
(0, utils_1.forEach)(repetitionWithSeparator, function (currProd) {
_this.computeLookaheadFunc(currRule, currProd.idx, keys_1.MANY_SEP_IDX, lookahead_1.PROD_TYPE.REPETITION_WITH_SEPARATOR, currProd.maxLookahead, (0, gast_1.getProductionDslName)(currProd));
});

@@ -72,4 +72,4 @@ });

this.TRACE_INIT("" + dslMethodName + (prodOccurrence === 0 ? "" : prodOccurrence), function () {
var laFunc = lookahead_1.buildLookaheadFuncForOptionalProd(prodOccurrence, rule, prodMaxLookahead || _this.maxLookahead, _this.dynamicTokensEnabled, prodType, _this.lookAheadBuilderForOptional);
var key = keys_1.getKeyForAutomaticLookahead(_this.fullRuleNameToShort[rule.name], prodKey, prodOccurrence);
var laFunc = (0, lookahead_1.buildLookaheadFuncForOptionalProd)(prodOccurrence, rule, prodMaxLookahead || _this.maxLookahead, _this.dynamicTokensEnabled, prodType, _this.lookAheadBuilderForOptional);
var key = (0, keys_1.getKeyForAutomaticLookahead)(_this.fullRuleNameToShort[rule.name], prodKey, prodOccurrence);
_this.setLaFuncCache(key, laFunc);

@@ -79,6 +79,6 @@ });

LooksAhead.prototype.lookAheadBuilderForOptional = function (alt, tokenMatcher, dynamicTokensEnabled) {
return lookahead_1.buildSingleAlternativeLookaheadFunction(alt, tokenMatcher, dynamicTokensEnabled);
return (0, lookahead_1.buildSingleAlternativeLookaheadFunction)(alt, tokenMatcher, dynamicTokensEnabled);
};
LooksAhead.prototype.lookAheadBuilderForAlternatives = function (alts, hasPredicates, tokenMatcher, dynamicTokensEnabled) {
return lookahead_1.buildAlternativesLookAheadFunc(alts, hasPredicates, tokenMatcher, dynamicTokensEnabled);
return (0, lookahead_1.buildAlternativesLookAheadFunc)(alts, hasPredicates, tokenMatcher, dynamicTokensEnabled);
};

@@ -88,3 +88,3 @@ // this actually returns a number, but it is always used as a string (object prop key)

var currRuleShortName = this.getLastExplicitRuleShortName();
return keys_1.getKeyForAutomaticLookahead(currRuleShortName, dslMethodIdx, occurrence);
return (0, keys_1.getKeyForAutomaticLookahead)(currRuleShortName, dslMethodIdx, occurrence);
};

@@ -91,0 +91,0 @@ /* istanbul ignore next */

@@ -13,3 +13,3 @@ "use strict";

PerformanceTracer.prototype.initPerformanceTracer = function (config) {
if (utils_1.has(config, "traceInitPerf")) {
if ((0, utils_1.has)(config, "traceInitPerf")) {
var userTraceInitPerf = config.traceInitPerf;

@@ -39,3 +39,3 @@ var traceIsNumber = typeof userTraceInitPerf === "number";

}
var _a = utils_1.timer(phaseImpl), time = _a.time, value = _a.value;
var _a = (0, utils_1.timer)(phaseImpl), time = _a.time, value = _a.value;
/* istanbul ignore next - Difficult to reproduce specific performance behavior (>10ms) in tests */

@@ -42,0 +42,0 @@ var traceMethod = time > 10 ? console.warn : console.log;

@@ -284,3 +284,3 @@ "use strict";

if (config === void 0) { config = parser_1.DEFAULT_RULE_CONFIG; }
if (utils_1.contains(this.definedRulesNames, name)) {
if ((0, utils_1.contains)(this.definedRulesNames, name)) {
var errMsg = errors_public_1.defaultGrammarValidatorErrorProvider.buildDuplicateRuleNameError({

@@ -305,3 +305,3 @@ topLevelRule: name,

var ruleErrors = [];
ruleErrors = ruleErrors.concat(checks_1.validateRuleIsOverridden(name, this.definedRulesNames, this.className));
ruleErrors = ruleErrors.concat((0, checks_1.validateRuleIsOverridden)(name, this.definedRulesNames, this.className));
this.definitionErrors = this.definitionErrors.concat(ruleErrors);

@@ -323,3 +323,3 @@ var ruleImplementation = this.defineRule(name, impl, config);

catch (e) {
if (exceptions_public_1.isRecognitionException(e)) {
if ((0, exceptions_public_1.isRecognitionException)(e)) {
return false;

@@ -342,3 +342,3 @@ }

RecognizerApi.prototype.getSerializedGastProductions = function () {
return gast_public_1.serializeGrammar(utils_1.values(this.gastProductionsCache));
return (0, gast_public_1.serializeGrammar)((0, utils_1.values)(this.gastProductionsCache));
};

@@ -345,0 +345,0 @@ return RecognizerApi;

@@ -22,3 +22,3 @@ "use strict";

RecognizerEngine.prototype.initRecognizerEngine = function (tokenVocabulary, config) {
this.className = lang_extensions_1.classNameFromInstance(this);
this.className = (0, lang_extensions_1.classNameFromInstance)(this);
// TODO: would using an ES6 Map or plain object be faster (CST building scenario)

@@ -35,3 +35,3 @@ this.shortRuleNameToFull = {};

this.gastProductionsCache = {};
if (utils_1.has(config, "serializedGrammar")) {
if ((0, utils_1.has)(config, "serializedGrammar")) {
throw Error("The Parser's configuration can no longer contain a <serializedGrammar> property.\n" +

@@ -41,7 +41,7 @@ "\tSee: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_6-0-0\n" +

}
if (utils_1.isArray(tokenVocabulary)) {
if ((0, utils_1.isArray)(tokenVocabulary)) {
// This only checks for Token vocabularies provided as arrays.
// That is good enough because the main objective is to detect users of pre-V4.0 APIs
// rather than all edge cases of empty Token vocabularies.
if (utils_1.isEmpty(tokenVocabulary)) {
if ((0, utils_1.isEmpty)(tokenVocabulary)) {
throw Error("A Token Vocabulary cannot be empty.\n" +

@@ -57,4 +57,4 @@ "\tNote that the first argument for the parser constructor\n" +

}
if (utils_1.isArray(tokenVocabulary)) {
this.tokensMap = utils_1.reduce(tokenVocabulary, function (acc, tokType) {
if ((0, utils_1.isArray)(tokenVocabulary)) {
this.tokensMap = (0, utils_1.reduce)(tokenVocabulary, function (acc, tokType) {
acc[tokType.name] = tokType;

@@ -64,7 +64,7 @@ return acc;

}
else if (utils_1.has(tokenVocabulary, "modes") &&
utils_1.every(utils_1.flatten(utils_1.values(tokenVocabulary.modes)), tokens_1.isTokenType)) {
var allTokenTypes = utils_1.flatten(utils_1.values(tokenVocabulary.modes));
var uniqueTokens = utils_1.uniq(allTokenTypes);
this.tokensMap = utils_1.reduce(uniqueTokens, function (acc, tokType) {
else if ((0, utils_1.has)(tokenVocabulary, "modes") &&
(0, utils_1.every)((0, utils_1.flatten)((0, utils_1.values)(tokenVocabulary.modes)), tokens_1.isTokenType)) {
var allTokenTypes = (0, utils_1.flatten)((0, utils_1.values)(tokenVocabulary.modes));
var uniqueTokens = (0, utils_1.uniq)(allTokenTypes);
this.tokensMap = (0, utils_1.reduce)(uniqueTokens, function (acc, tokType) {
acc[tokType.name] = tokType;

@@ -74,4 +74,4 @@ return acc;

}
else if (utils_1.isObject(tokenVocabulary)) {
this.tokensMap = utils_1.cloneObj(tokenVocabulary);
else if ((0, utils_1.isObject)(tokenVocabulary)) {
this.tokensMap = (0, utils_1.cloneObj)(tokenVocabulary);
}

@@ -87,3 +87,3 @@ else {

// TODO: This check may not be accurate for multi mode lexers
var noTokenCategoriesUsed = utils_1.every(utils_1.values(tokenVocabulary), function (tokenConstructor) { return utils_1.isEmpty(tokenConstructor.categoryMatches); });
var noTokenCategoriesUsed = (0, utils_1.every)((0, utils_1.values)(tokenVocabulary), function (tokenConstructor) { return (0, utils_1.isEmpty)(tokenConstructor.categoryMatches); });
this.tokenMatcher = noTokenCategoriesUsed

@@ -95,3 +95,3 @@ ? tokens_1.tokenStructuredMatcherNoCategories

// Therefore we must augment the Token classes both on Lexer initialization and on Parser initialization
tokens_1.augmentTokenTypes(utils_1.values(this.tokensMap));
(0, tokens_1.augmentTokenTypes)((0, utils_1.values)(this.tokensMap));
};

@@ -103,6 +103,6 @@ RecognizerEngine.prototype.defineRule = function (ruleName, impl, config) {

}
var resyncEnabled = utils_1.has(config, "resyncEnabled")
var resyncEnabled = (0, utils_1.has)(config, "resyncEnabled")
? config.resyncEnabled
: parser_1.DEFAULT_RULE_CONFIG.resyncEnabled;
var recoveryValueFunc = utils_1.has(config, "recoveryValueFunc")
var recoveryValueFunc = (0, utils_1.has)(config, "recoveryValueFunc")
? config.recoveryValueFunc

@@ -154,3 +154,3 @@ : parser_1.DEFAULT_RULE_CONFIG.recoveryValueFunc;

var reSyncEnabled = resyncEnabledConfig && !this.isBackTracking() && this.recoveryEnabled;
if (exceptions_public_1.isRecognitionException(e)) {
if ((0, exceptions_public_1.isRecognitionException)(e)) {
var recogError = e;

@@ -403,3 +403,3 @@ if (reSyncEnabled) {

var laKey = this.getKeyForAutomaticLookahead(keys_1.OR_IDX, occurrence);
var alts = utils_1.isArray(altsOrOpts)
var alts = (0, utils_1.isArray)(altsOrOpts)
? altsOrOpts

@@ -444,3 +444,3 @@ : altsOrOpts.DEF;

RecognizerEngine.prototype.subruleInternalError = function (e, options, ruleName) {
if (exceptions_public_1.isRecognitionException(e) && e.partialCstResult !== undefined) {
if ((0, exceptions_public_1.isRecognitionException)(e) && e.partialCstResult !== undefined) {
this.cstPostNonTerminal(e.partialCstResult, options !== undefined && options.LABEL !== undefined

@@ -518,3 +518,3 @@ ? options.LABEL

var savedErrors = this.errors;
var savedRuleStack = utils_1.cloneArr(this.RULE_STACK);
var savedRuleStack = (0, utils_1.cloneArr)(this.RULE_STACK);
return {

@@ -521,0 +521,0 @@ errors: savedErrors,

@@ -26,3 +26,3 @@ "use strict";

this.resyncFollows = {};
this.recoveryEnabled = utils_1.has(config, "recoveryEnabled")
this.recoveryEnabled = (0, utils_1.has)(config, "recoveryEnabled")
? config.recoveryEnabled

@@ -38,3 +38,3 @@ : parser_1.DEFAULT_PARSER_CONFIG.recoveryEnabled;

Recoverable.prototype.getTokenToInsert = function (tokType) {
var tokToInsert = tokens_public_1.createTokenInstance(tokType, "", NaN, NaN, NaN, NaN, NaN, NaN);
var tokToInsert = (0, tokens_public_1.createTokenInstance)(tokType, "", NaN, NaN, NaN, NaN, NaN, NaN);
tokToInsert.isInsertedInRecovery = true;

@@ -67,3 +67,3 @@ return tokToInsert;

// the first token here will be the original cause of the error, this is not part of the resyncedTokens property.
error.resyncedTokens = utils_1.dropRight(resyncedTokens);
error.resyncedTokens = (0, utils_1.dropRight)(resyncedTokens);
_this.SAVE_ERROR(error);

@@ -152,7 +152,7 @@ };

// must know the possible following tokens to perform single token insertion
if (utils_1.isEmpty(follows)) {
if ((0, utils_1.isEmpty)(follows)) {
return false;
}
var mismatchedTok = this.LA(1);
var isMisMatchedTokInFollows = utils_1.find(follows, function (possibleFollowsTokType) {
var isMisMatchedTokInFollows = (0, utils_1.find)(follows, function (possibleFollowsTokType) {
return _this.tokenMatcher(mismatchedTok, possibleFollowsTokType);

@@ -169,3 +169,3 @@ }) !== undefined;

var currentRuleReSyncSet = this.getFollowSetFromFollowKey(followKey);
return utils_1.contains(currentRuleReSyncSet, tokenTypeIdx);
return (0, utils_1.contains)(currentRuleReSyncSet, tokenTypeIdx);
};

@@ -179,3 +179,3 @@ Recoverable.prototype.findReSyncTokenType = function () {

var nextTokenType = nextToken.tokenType;
if (utils_1.contains(allPossibleReSyncTokTypes, nextTokenType)) {
if ((0, utils_1.contains)(allPossibleReSyncTokTypes, nextTokenType)) {
return nextTokenType;

@@ -205,3 +205,3 @@ }

var explicitOccurrenceStack = this.RULE_OCCURRENCE_STACK;
return utils_1.map(explicitRuleStack, function (ruleName, idx) {
return (0, utils_1.map)(explicitRuleStack, function (ruleName, idx) {
if (idx === 0) {

@@ -219,6 +219,6 @@ return exports.EOF_FOLLOW_KEY;

var _this = this;
var followStack = utils_1.map(this.buildFullFollowKeyStack(), function (currKey) {
var followStack = (0, utils_1.map)(this.buildFullFollowKeyStack(), function (currKey) {
return _this.getFollowSetFromFollowKey(currKey);
});
return utils_1.flatten(followStack);
return (0, utils_1.flatten)(followStack);
};

@@ -248,3 +248,3 @@ Recoverable.prototype.getFollowSetFromFollowKey = function (followKey) {

// the last token is not part of the error.
return utils_1.dropRight(resyncedTokens);
return (0, utils_1.dropRight)(resyncedTokens);
};

@@ -257,3 +257,3 @@ Recoverable.prototype.attemptInRepetitionRecovery = function (prodFunc, args, lookaheadFunc, dslMethodIdx, prodOccurrence, nextToksWalker, notStuck) {

var pathRuleStack = this.getHumanReadableRuleStack();
var pathOccurrenceStack = utils_1.cloneArr(this.RULE_OCCURRENCE_STACK);
var pathOccurrenceStack = (0, utils_1.cloneArr)(this.RULE_OCCURRENCE_STACK);
var grammarPath = {

@@ -269,3 +269,3 @@ ruleStack: pathRuleStack,

var _this = this;
return utils_1.map(this.RULE_STACK, function (currShortName) {
return (0, utils_1.map)(this.RULE_STACK, function (currShortName) {
return _this.shortRuleNameToFullName(currShortName);

@@ -272,0 +272,0 @@ });

@@ -18,3 +18,3 @@ "use strict";

this.outputCst = config.outputCst;
this.nodeLocationTracking = utils_1.has(config, "nodeLocationTracking")
this.nodeLocationTracking = (0, utils_1.has)(config, "nodeLocationTracking")
? config.nodeLocationTracking

@@ -49,3 +49,4 @@ : parser_1.DEFAULT_PARSER_CONFIG.nodeLocationTracking;

this.cstPostRule = utils_1.NOOP;
this.setInitialNodeLocation = this.setInitialNodeLocationOnlyOffsetRecovery;
this.setInitialNodeLocation =
this.setInitialNodeLocationOnlyOffsetRecovery;
}

@@ -56,3 +57,4 @@ else {

this.cstPostRule = this.cstPostRuleOnlyOffset;
this.setInitialNodeLocation = this.setInitialNodeLocationOnlyOffsetRegular;
this.setInitialNodeLocation =
this.setInitialNodeLocationOnlyOffsetRegular;
}

@@ -156,3 +158,3 @@ }

var rootCst = this.CST_STACK[this.CST_STACK.length - 1];
cst_1.addTerminalToCst(rootCst, consumedToken, key);
(0, cst_1.addTerminalToCst)(rootCst, consumedToken, key);
// This is only used when **both** error recovery and CST Output are enabled.

@@ -163,3 +165,3 @@ this.setNodeLocationFromToken(rootCst.location, consumedToken);

var preCstNode = this.CST_STACK[this.CST_STACK.length - 1];
cst_1.addNoneTerminalToCst(preCstNode, ruleName, ruleCstResult);
(0, cst_1.addNoneTerminalToCst)(preCstNode, ruleName, ruleCstResult);
// This is only used when **both** error recovery and CST Output are enabled.

@@ -169,4 +171,4 @@ this.setNodeLocationFromNode(preCstNode.location, ruleCstResult.location);

TreeBuilder.prototype.getBaseCstVisitorConstructor = function () {
if (utils_1.isUndefined(this.baseCstVisitorConstructor)) {
var newBaseCstVisitorConstructor = cst_visitor_1.createBaseSemanticVisitorConstructor(this.className, utils_1.keys(this.gastProductionsCache));
if ((0, utils_1.isUndefined)(this.baseCstVisitorConstructor)) {
var newBaseCstVisitorConstructor = (0, cst_visitor_1.createBaseSemanticVisitorConstructor)(this.className, (0, utils_1.keys)(this.gastProductionsCache));
this.baseCstVisitorConstructor = newBaseCstVisitorConstructor;

@@ -178,4 +180,4 @@ return newBaseCstVisitorConstructor;

TreeBuilder.prototype.getBaseCstVisitorConstructorWithDefaults = function () {
if (utils_1.isUndefined(this.baseCstVisitorWithDefaultsConstructor)) {
var newConstructor = cst_visitor_1.createBaseVisitorConstructorWithDefaults(this.className, utils_1.keys(this.gastProductionsCache), this.getBaseCstVisitorConstructor());
if ((0, utils_1.isUndefined)(this.baseCstVisitorWithDefaultsConstructor)) {
var newConstructor = (0, cst_visitor_1.createBaseVisitorConstructorWithDefaults)(this.className, (0, utils_1.keys)(this.gastProductionsCache), this.getBaseCstVisitorConstructor());
this.baseCstVisitorWithDefaultsConstructor = newConstructor;

@@ -182,0 +184,0 @@ return newConstructor;

@@ -62,3 +62,3 @@ "use strict";

// todo: defaults func?
this.config = utils_1.merge(DEFAULT_LEXER_CONFIG, config);
this.config = (0, utils_1.merge)(DEFAULT_LEXER_CONFIG, config);
var traceInitVal = this.config.traceInitPerf;

@@ -96,5 +96,5 @@ if (traceInitVal === true) {

// Convert SingleModeLexerDefinition into a IMultiModeLexerDefinition.
if (utils_1.isArray(lexerDefinition)) {
if ((0, utils_1.isArray)(lexerDefinition)) {
actualDefinition = { modes: {} };
actualDefinition.modes[lexer_1.DEFAULT_MODE] = utils_1.cloneArr(lexerDefinition);
actualDefinition.modes[lexer_1.DEFAULT_MODE] = (0, utils_1.cloneArr)(lexerDefinition);
actualDefinition[lexer_1.DEFAULT_MODE] = lexer_1.DEFAULT_MODE;

@@ -105,3 +105,3 @@ }

hasOnlySingleMode = false;
actualDefinition = utils_1.cloneObj(lexerDefinition);
actualDefinition = (0, utils_1.cloneObj)(lexerDefinition);
}

@@ -111,6 +111,6 @@ });

_this.TRACE_INIT("performRuntimeChecks", function () {
_this.lexerDefinitionErrors = _this.lexerDefinitionErrors.concat(lexer_1.performRuntimeChecks(actualDefinition, _this.trackStartLines, _this.config.lineTerminatorCharacters));
_this.lexerDefinitionErrors = _this.lexerDefinitionErrors.concat((0, lexer_1.performRuntimeChecks)(actualDefinition, _this.trackStartLines, _this.config.lineTerminatorCharacters));
});
_this.TRACE_INIT("performWarningRuntimeChecks", function () {
_this.lexerDefinitionWarning = _this.lexerDefinitionWarning.concat(lexer_1.performWarningRuntimeChecks(actualDefinition, _this.trackStartLines, _this.config.lineTerminatorCharacters));
_this.lexerDefinitionWarning = _this.lexerDefinitionWarning.concat((0, lexer_1.performWarningRuntimeChecks)(actualDefinition, _this.trackStartLines, _this.config.lineTerminatorCharacters));
});

@@ -124,7 +124,7 @@ }

// this transformation is to increase robustness in the case of partially invalid lexer definition.
utils_1.forEach(actualDefinition.modes, function (currModeValue, currModeName) {
actualDefinition.modes[currModeName] = utils_1.reject(currModeValue, function (currTokType) { return utils_1.isUndefined(currTokType); });
(0, utils_1.forEach)(actualDefinition.modes, function (currModeValue, currModeName) {
actualDefinition.modes[currModeName] = (0, utils_1.reject)(currModeValue, function (currTokType) { return (0, utils_1.isUndefined)(currTokType); });
});
var allModeNames = utils_1.keys(actualDefinition.modes);
utils_1.forEach(actualDefinition.modes, function (currModDef, currModName) {
var allModeNames = (0, utils_1.keys)(actualDefinition.modes);
(0, utils_1.forEach)(actualDefinition.modes, function (currModDef, currModName) {
_this.TRACE_INIT("Mode: <" + currModName + "> processing", function () {

@@ -134,3 +134,3 @@ _this.modes.push(currModName);

_this.TRACE_INIT("validatePatterns", function () {
_this.lexerDefinitionErrors = _this.lexerDefinitionErrors.concat(lexer_1.validatePatterns(currModDef, allModeNames));
_this.lexerDefinitionErrors = _this.lexerDefinitionErrors.concat((0, lexer_1.validatePatterns)(currModDef, allModeNames));
});

@@ -141,9 +141,8 @@ }

// to performing the analysis anyhow...
if (utils_1.isEmpty(_this.lexerDefinitionErrors)) {
tokens_1.augmentTokenTypes(currModDef);
if ((0, utils_1.isEmpty)(_this.lexerDefinitionErrors)) {
(0, tokens_1.augmentTokenTypes)(currModDef);
var currAnalyzeResult_1;
_this.TRACE_INIT("analyzeTokenTypes", function () {
currAnalyzeResult_1 = lexer_1.analyzeTokenTypes(currModDef, {
lineTerminatorCharacters: _this.config
.lineTerminatorCharacters,
currAnalyzeResult_1 = (0, lexer_1.analyzeTokenTypes)(currModDef, {
lineTerminatorCharacters: _this.config.lineTerminatorCharacters,
positionTracking: config.positionTracking,

@@ -159,3 +158,3 @@ ensureOptimizations: config.ensureOptimizations,

currAnalyzeResult_1.charCodeToPatternIdxToConfig;
_this.emptyGroups = utils_1.merge(_this.emptyGroups, currAnalyzeResult_1.emptyGroups);
_this.emptyGroups = (0, utils_1.merge)(_this.emptyGroups, currAnalyzeResult_1.emptyGroups);
_this.hasCustom = currAnalyzeResult_1.hasCustom || _this.hasCustom;

@@ -168,5 +167,5 @@ _this.canModeBeOptimized[currModName] =

_this.defaultMode = actualDefinition.defaultMode;
if (!utils_1.isEmpty(_this.lexerDefinitionErrors) &&
if (!(0, utils_1.isEmpty)(_this.lexerDefinitionErrors) &&
!_this.config.deferDefinitionErrorsHandling) {
var allErrMessages = utils_1.map(_this.lexerDefinitionErrors, function (error) {
var allErrMessages = (0, utils_1.map)(_this.lexerDefinitionErrors, function (error) {
return error.message;

@@ -178,4 +177,4 @@ });

// Only print warning if there are no errors, This will avoid pl
utils_1.forEach(_this.lexerDefinitionWarning, function (warningDescriptor) {
utils_1.PRINT_WARNING(warningDescriptor.message);
(0, utils_1.forEach)(_this.lexerDefinitionWarning, function (warningDescriptor) {
(0, utils_1.PRINT_WARNING)(warningDescriptor.message);
});

@@ -225,3 +224,3 @@ _this.TRACE_INIT("Choosing sub-methods implementations", function () {

_this.TRACE_INIT("Failed Optimization Warnings", function () {
var unOptimizedModes = utils_1.reduce(_this.canModeBeOptimized, function (cannotBeOptimized, canBeOptimized, modeName) {
var unOptimizedModes = (0, utils_1.reduce)(_this.canModeBeOptimized, function (cannotBeOptimized, canBeOptimized, modeName) {
if (canBeOptimized === false) {

@@ -232,3 +231,3 @@ cannotBeOptimized.push(modeName);

}, []);
if (config.ensureOptimizations && !utils_1.isEmpty(unOptimizedModes)) {
if (config.ensureOptimizations && !(0, utils_1.isEmpty)(unOptimizedModes)) {
throw Error("Lexer Modes: < " + unOptimizedModes.join(", ") + " > cannot be optimized.\n" +

@@ -240,6 +239,6 @@ '\t Disable the "ensureOptimizations" lexer config flag to silently ignore this and run the lexer in an un-optimized mode.\n' +

_this.TRACE_INIT("clearRegExpParserCache", function () {
reg_exp_parser_1.clearRegExpParserCache();
(0, reg_exp_parser_1.clearRegExpParserCache)();
});
_this.TRACE_INIT("toFastProperties", function () {
utils_1.toFastProperties(_this);
(0, utils_1.toFastProperties)(_this);
});

@@ -250,4 +249,4 @@ });

if (initialMode === void 0) { initialMode = this.defaultMode; }
if (!utils_1.isEmpty(this.lexerDefinitionErrors)) {
var allErrMessages = utils_1.map(this.lexerDefinitionErrors, function (error) {
if (!(0, utils_1.isEmpty)(this.lexerDefinitionErrors)) {
var allErrMessages = (0, utils_1.map)(this.lexerDefinitionErrors, function (error) {
return error.message;

@@ -266,3 +265,3 @@ });

var _this = this;
var i, j, matchAltImage, longerAltIdx, matchedImage, payload, altPayload, imageLength, group, tokType, newToken, errLength, droppedChar, msg, match;
var i, j, k, matchAltImage, longerAlt, matchedImage, payload, altPayload, imageLength, group, tokType, newToken, errLength, droppedChar, msg, match;
var orgText = text;

@@ -283,3 +282,3 @@ var orgLength = orgText.length;

var column = this.trackStartLines ? 1 : undefined;
var groups = lexer_1.cloneEmptyGroups(this.emptyGroups);
var groups = (0, lexer_1.cloneEmptyGroups)(this.emptyGroups);
var trackLines = this.trackStartLines;

@@ -298,3 +297,3 @@ var lineTerminatorPattern = this.config.lineTerminatorsPattern;

function getPossiblePatternsOptimized(charCode) {
var optimizedCharIdx = lexer_1.charCodeToOptimizedIndex(charCode);
var optimizedCharIdx = (0, lexer_1.charCodeToOptimizedIndex)(charCode);
var possiblePatterns = currCharCodeToPatternIdxToConfig[optimizedCharIdx];

@@ -329,5 +328,6 @@ if (possiblePatterns === undefined) {

modeStack.pop();
var newMode = utils_1.last(modeStack);
var newMode = (0, utils_1.last)(modeStack);
patternIdxToConfig = _this.patternIdxToConfig[newMode];
currCharCodeToPatternIdxToConfig = _this.charCodeToPatternIdxToConfig[newMode];
currCharCodeToPatternIdxToConfig =
_this.charCodeToPatternIdxToConfig[newMode];
currModePatternsLength = patternIdxToConfig.length;

@@ -345,3 +345,4 @@ var modeCanBeOptimized = _this.canModeBeOptimized[newMode] && _this.config.safeMode === false;

modeStack.push(newMode);
currCharCodeToPatternIdxToConfig = this.charCodeToPatternIdxToConfig[newMode];
currCharCodeToPatternIdxToConfig =
this.charCodeToPatternIdxToConfig[newMode];
patternIdxToConfig = this.patternIdxToConfig[newMode];

@@ -398,32 +399,38 @@ currModePatternsLength = patternIdxToConfig.length;

// this can be used to prioritize keywords over identifiers
longerAltIdx = currConfig.longerAlt;
if (longerAltIdx !== undefined) {
longerAlt = currConfig.longerAlt;
if (longerAlt !== undefined) {
// TODO: micro optimize, avoid extra prop access
// by saving/linking longerAlt on the original config?
var longerAltConfig = patternIdxToConfig[longerAltIdx];
var longerAltPattern = longerAltConfig.pattern;
altPayload = null;
// single Char can never be a longer alt so no need to test it.
// manually in-lined because > 600 chars won't be in-lined in V8
if (longerAltConfig.isCustom === true) {
match = longerAltPattern.exec(orgText, offset, matchedTokens, groups);
if (match !== null) {
matchAltImage = match[0];
if (match.payload !== undefined) {
altPayload = match.payload;
var longerAltLength = longerAlt.length;
for (k = 0; k < longerAltLength; k++) {
var longerAltConfig = patternIdxToConfig[longerAlt[k]];
var longerAltPattern = longerAltConfig.pattern;
altPayload = null;
// single Char can never be a longer alt so no need to test it.
// manually in-lined because > 600 chars won't be in-lined in V8
if (longerAltConfig.isCustom === true) {
match = longerAltPattern.exec(orgText, offset, matchedTokens, groups);
if (match !== null) {
matchAltImage = match[0];
if (match.payload !== undefined) {
altPayload = match.payload;
}
}
else {
matchAltImage = null;
}
}
else {
matchAltImage = null;
this.updateLastIndex(longerAltPattern, offset);
matchAltImage = this.match(longerAltPattern, text, offset);
}
if (matchAltImage && matchAltImage.length > matchedImage.length) {
matchedImage = matchAltImage;
payload = altPayload;
currConfig = longerAltConfig;
// Exit the loop early after matching one of the longer alternatives
// The first matched alternative takes precedence
break;
}
}
else {
this.updateLastIndex(longerAltPattern, offset);
matchAltImage = this.match(longerAltPattern, text, offset);
}
if (matchAltImage && matchAltImage.length > matchedImage.length) {
matchedImage = matchAltImage;
payload = altPayload;
currConfig = longerAltConfig;
}
}

@@ -666,3 +673,3 @@ break;

}
var _a = utils_1.timer(phaseImpl), time = _a.time, value = _a.value;
var _a = (0, utils_1.timer)(phaseImpl), time = _a.time, value = _a.value;
/* istanbul ignore next - Difficult to reproduce specific performance behavior (>10ms) in tests */

@@ -669,0 +676,0 @@ var traceMethod = time > 10 ? console.warn : console.log;

@@ -37,3 +37,3 @@ "use strict";

function analyzeTokenTypes(tokenTypes, options) {
options = utils_1.defaults(options, {
options = (0, utils_1.defaults)(options, {
useSticky: exports.SUPPORT_STICKY,

@@ -52,3 +52,3 @@ debug: false,

tracer("Reject Lexer.NA", function () {
onlyRelevantTypes = utils_1.reject(tokenTypes, function (currType) {
onlyRelevantTypes = (0, utils_1.reject)(tokenTypes, function (currType) {
return currType[PATTERN] === lexer_public_1.Lexer.NA;

@@ -61,6 +61,6 @@ });

hasCustom = false;
allTransformedPatterns = utils_1.map(onlyRelevantTypes, function (currType) {
allTransformedPatterns = (0, utils_1.map)(onlyRelevantTypes, function (currType) {
var currPattern = currType[PATTERN];
/* istanbul ignore else */
if (utils_1.isRegExp(currPattern)) {
if ((0, utils_1.isRegExp)(currPattern)) {
var regExpSource = currPattern.source;

@@ -78,3 +78,3 @@ if (regExpSource.length === 1 &&

// not a meta character
!utils_1.contains([
!(0, utils_1.contains)([
"d",

@@ -108,3 +108,3 @@ "D",

}
else if (utils_1.isFunction(currPattern)) {
else if ((0, utils_1.isFunction)(currPattern)) {
hasCustom = true;

@@ -114,3 +114,3 @@ // CustomPatternMatcherFunc - custom patterns do not require any transformations, only wrapping in a RegExp Like object

}
else if (utils_1.has(currPattern, "exec")) {
else if ((0, utils_1.has)(currPattern, "exec")) {
hasCustom = true;

@@ -139,8 +139,8 @@ // ICustomPattern

var patternIdxToGroup;
var patternIdxToLongerAltIdx;
var patternIdxToLongerAltIdxArr;
var patternIdxToPushMode;
var patternIdxToPopMode;
tracer("misc mapping", function () {
patternIdxToType = utils_1.map(onlyRelevantTypes, function (currType) { return currType.tokenTypeIdx; });
patternIdxToGroup = utils_1.map(onlyRelevantTypes, function (clazz) {
patternIdxToType = (0, utils_1.map)(onlyRelevantTypes, function (currType) { return currType.tokenTypeIdx; });
patternIdxToGroup = (0, utils_1.map)(onlyRelevantTypes, function (clazz) {
var groupName = clazz.GROUP;

@@ -151,6 +151,6 @@ /* istanbul ignore next */

}
else if (utils_1.isString(groupName)) {
else if ((0, utils_1.isString)(groupName)) {
return groupName;
}
else if (utils_1.isUndefined(groupName)) {
else if ((0, utils_1.isUndefined)(groupName)) {
return false;

@@ -162,12 +162,14 @@ }

});
patternIdxToLongerAltIdx = utils_1.map(onlyRelevantTypes, function (clazz) {
patternIdxToLongerAltIdxArr = (0, utils_1.map)(onlyRelevantTypes, function (clazz) {
var longerAltType = clazz.LONGER_ALT;
if (longerAltType) {
var longerAltIdx = utils_1.indexOf(onlyRelevantTypes, longerAltType);
return longerAltIdx;
var longerAltIdxArr = (0, utils_1.isArray)(longerAltType)
? (0, utils_1.map)(longerAltType, function (type) { return (0, utils_1.indexOf)(onlyRelevantTypes, type); })
: [(0, utils_1.indexOf)(onlyRelevantTypes, longerAltType)];
return longerAltIdxArr;
}
});
patternIdxToPushMode = utils_1.map(onlyRelevantTypes, function (clazz) { return clazz.PUSH_MODE; });
patternIdxToPopMode = utils_1.map(onlyRelevantTypes, function (clazz) {
return utils_1.has(clazz, "POP_MODE");
patternIdxToPushMode = (0, utils_1.map)(onlyRelevantTypes, function (clazz) { return clazz.PUSH_MODE; });
patternIdxToPopMode = (0, utils_1.map)(onlyRelevantTypes, function (clazz) {
return (0, utils_1.has)(clazz, "POP_MODE");
});

@@ -178,6 +180,6 @@ });

var lineTerminatorCharCodes = getCharCodes(options.lineTerminatorCharacters);
patternIdxToCanLineTerminator = utils_1.map(onlyRelevantTypes, function (tokType) { return false; });
patternIdxToCanLineTerminator = (0, utils_1.map)(onlyRelevantTypes, function (tokType) { return false; });
if (options.positionTracking !== "onlyOffset") {
patternIdxToCanLineTerminator = utils_1.map(onlyRelevantTypes, function (tokType) {
if (utils_1.has(tokType, "LINE_BREAKS")) {
patternIdxToCanLineTerminator = (0, utils_1.map)(onlyRelevantTypes, function (tokType) {
if ((0, utils_1.has)(tokType, "LINE_BREAKS")) {
return tokType.LINE_BREAKS;

@@ -187,3 +189,3 @@ }

if (checkLineBreaksIssues(tokType, lineTerminatorCharCodes) === false) {
return reg_exp_1.canMatchCharCode(lineTerminatorCharCodes, tokType.PATTERN);
return (0, reg_exp_1.canMatchCharCode)(lineTerminatorCharCodes, tokType.PATTERN);
}

@@ -199,7 +201,7 @@ }

tracer("Misc Mapping #2", function () {
patternIdxToIsCustom = utils_1.map(onlyRelevantTypes, isCustomPattern);
patternIdxToShort = utils_1.map(allTransformedPatterns, isShortPattern);
emptyGroups = utils_1.reduce(onlyRelevantTypes, function (acc, clazz) {
patternIdxToIsCustom = (0, utils_1.map)(onlyRelevantTypes, isCustomPattern);
patternIdxToShort = (0, utils_1.map)(allTransformedPatterns, isShortPattern);
emptyGroups = (0, utils_1.reduce)(onlyRelevantTypes, function (acc, clazz) {
var groupName = clazz.GROUP;
if (utils_1.isString(groupName) && !(groupName === lexer_public_1.Lexer.SKIPPED)) {
if ((0, utils_1.isString)(groupName) && !(groupName === lexer_public_1.Lexer.SKIPPED)) {
acc[groupName] = [];

@@ -209,6 +211,6 @@ }

}, {});
patternIdxToConfig = utils_1.map(allTransformedPatterns, function (x, idx) {
patternIdxToConfig = (0, utils_1.map)(allTransformedPatterns, function (x, idx) {
return {
pattern: allTransformedPatterns[idx],
longerAlt: patternIdxToLongerAltIdx[idx],
longerAlt: patternIdxToLongerAltIdxArr[idx],
canLineTerminator: patternIdxToCanLineTerminator[idx],

@@ -229,3 +231,3 @@ isCustom: patternIdxToIsCustom[idx],

tracer("First Char Optimization", function () {
charCodeToPatternIdxToConfig = utils_1.reduce(onlyRelevantTypes, function (result, currTokType, idx) {
charCodeToPatternIdxToConfig = (0, utils_1.reduce)(onlyRelevantTypes, function (result, currTokType, idx) {
if (typeof currTokType.PATTERN === "string") {

@@ -236,5 +238,5 @@ var charCode = currTokType.PATTERN.charCodeAt(0);

}
else if (utils_1.isArray(currTokType.START_CHARS_HINT)) {
else if ((0, utils_1.isArray)(currTokType.START_CHARS_HINT)) {
var lastOptimizedIdx_1;
utils_1.forEach(currTokType.START_CHARS_HINT, function (charOrInt) {
(0, utils_1.forEach)(currTokType.START_CHARS_HINT, function (charOrInt) {
var charCode = typeof charOrInt === "string"

@@ -254,7 +256,7 @@ ? charOrInt.charCodeAt(0)

}
else if (utils_1.isRegExp(currTokType.PATTERN)) {
else if ((0, utils_1.isRegExp)(currTokType.PATTERN)) {
if (currTokType.PATTERN.unicode) {
canBeOptimized = false;
if (options.ensureOptimizations) {
utils_1.PRINT_ERROR("" + reg_exp_1.failedOptimizationPrefixMsg +
(0, utils_1.PRINT_ERROR)("" + reg_exp_1.failedOptimizationPrefixMsg +
("\tUnable to analyze < " + currTokType.PATTERN.toString() + " > pattern.\n") +

@@ -267,7 +269,7 @@ "\tThe regexp unicode flag is not currently supported by the regexp-to-ast library.\n" +

else {
var optimizedCodes = reg_exp_1.getOptimizedStartCodesIndices(currTokType.PATTERN, options.ensureOptimizations);
var optimizedCodes = (0, reg_exp_1.getOptimizedStartCodesIndices)(currTokType.PATTERN, options.ensureOptimizations);
/* istanbul ignore if */
// start code will only be empty given an empty regExp or failure of regexp-to-ast library
// the first should be a different validation and the second cannot be tested.
if (utils_1.isEmpty(optimizedCodes)) {
if ((0, utils_1.isEmpty)(optimizedCodes)) {
// we cannot understand what codes may start possible matches

@@ -278,3 +280,3 @@ // The optimization correctness requires knowing start codes for ALL patterns.

}
utils_1.forEach(optimizedCodes, function (code) {
(0, utils_1.forEach)(optimizedCodes, function (code) {
addToMapOfArrays(result, code, patternIdxToConfig[idx]);

@@ -286,3 +288,3 @@ });

if (options.ensureOptimizations) {
utils_1.PRINT_ERROR("" + reg_exp_1.failedOptimizationPrefixMsg +
(0, utils_1.PRINT_ERROR)("" + reg_exp_1.failedOptimizationPrefixMsg +
("\tTokenType: <" + currTokType.name + "> is using a custom token pattern without providing <start_chars_hint> parameter.\n") +

@@ -299,3 +301,3 @@ "\tThis will disable the lexer's first char optimizations.\n" +

tracer("ArrayPacking", function () {
charCodeToPatternIdxToConfig = utils_1.packArray(charCodeToPatternIdxToConfig);
charCodeToPatternIdxToConfig = (0, utils_1.packArray)(charCodeToPatternIdxToConfig);
});

@@ -327,4 +329,4 @@ return {

var errors = [];
var withRegExpPatterns = utils_1.filter(tokenTypes, function (currTokType) {
return utils_1.isRegExp(currTokType[PATTERN]);
var withRegExpPatterns = (0, utils_1.filter)(tokenTypes, function (currTokType) {
return (0, utils_1.isRegExp)(currTokType[PATTERN]);
});

@@ -339,6 +341,6 @@ errors = errors.concat(findEndOfInputAnchor(withRegExpPatterns));

function findMissingPatterns(tokenTypes) {
var tokenTypesWithMissingPattern = utils_1.filter(tokenTypes, function (currType) {
return !utils_1.has(currType, PATTERN);
var tokenTypesWithMissingPattern = (0, utils_1.filter)(tokenTypes, function (currType) {
return !(0, utils_1.has)(currType, PATTERN);
});
var errors = utils_1.map(tokenTypesWithMissingPattern, function (currType) {
var errors = (0, utils_1.map)(tokenTypesWithMissingPattern, function (currType) {
return {

@@ -352,3 +354,3 @@ message: "Token Type: ->" +

});
var valid = utils_1.difference(tokenTypes, tokenTypesWithMissingPattern);
var valid = (0, utils_1.difference)(tokenTypes, tokenTypesWithMissingPattern);
return { errors: errors, valid: valid };

@@ -358,10 +360,10 @@ }

function findInvalidPatterns(tokenTypes) {
var tokenTypesWithInvalidPattern = utils_1.filter(tokenTypes, function (currType) {
var tokenTypesWithInvalidPattern = (0, utils_1.filter)(tokenTypes, function (currType) {
var pattern = currType[PATTERN];
return (!utils_1.isRegExp(pattern) &&
!utils_1.isFunction(pattern) &&
!utils_1.has(pattern, "exec") &&
!utils_1.isString(pattern));
return (!(0, utils_1.isRegExp)(pattern) &&
!(0, utils_1.isFunction)(pattern) &&
!(0, utils_1.has)(pattern, "exec") &&
!(0, utils_1.isString)(pattern));
});
var errors = utils_1.map(tokenTypesWithInvalidPattern, function (currType) {
var errors = (0, utils_1.map)(tokenTypesWithInvalidPattern, function (currType) {
return {

@@ -376,3 +378,3 @@ message: "Token Type: ->" +

});
var valid = utils_1.difference(tokenTypes, tokenTypesWithInvalidPattern);
var valid = (0, utils_1.difference)(tokenTypes, tokenTypesWithInvalidPattern);
return { errors: errors, valid: valid };

@@ -395,6 +397,6 @@ }

}(regexp_to_ast_1.BaseRegExpVisitor));
var invalidRegex = utils_1.filter(tokenTypes, function (currType) {
var invalidRegex = (0, utils_1.filter)(tokenTypes, function (currType) {
var pattern = currType[PATTERN];
try {
var regexpAst = reg_exp_parser_1.getRegExpAst(pattern);
var regexpAst = (0, reg_exp_parser_1.getRegExpAst)(pattern);
var endAnchorVisitor = new EndAnchorFinder();

@@ -410,3 +412,3 @@ endAnchorVisitor.visit(regexpAst);

});
var errors = utils_1.map(invalidRegex, function (currType) {
var errors = (0, utils_1.map)(invalidRegex, function (currType) {
return {

@@ -427,7 +429,7 @@ message: "Unexpected RegExp Anchor Error:\n" +

function findEmptyMatchRegExps(tokenTypes) {
var matchesEmptyString = utils_1.filter(tokenTypes, function (currType) {
var matchesEmptyString = (0, utils_1.filter)(tokenTypes, function (currType) {
var pattern = currType[PATTERN];
return pattern.test("");
});
var errors = utils_1.map(matchesEmptyString, function (currType) {
var errors = (0, utils_1.map)(matchesEmptyString, function (currType) {
return {

@@ -458,6 +460,6 @@ message: "Token Type: ->" +

}(regexp_to_ast_1.BaseRegExpVisitor));
var invalidRegex = utils_1.filter(tokenTypes, function (currType) {
var invalidRegex = (0, utils_1.filter)(tokenTypes, function (currType) {
var pattern = currType[PATTERN];
try {
var regexpAst = reg_exp_parser_1.getRegExpAst(pattern);
var regexpAst = (0, reg_exp_parser_1.getRegExpAst)(pattern);
var startAnchorVisitor = new StartAnchorFinder();

@@ -473,3 +475,3 @@ startAnchorVisitor.visit(regexpAst);

});
var errors = utils_1.map(invalidRegex, function (currType) {
var errors = (0, utils_1.map)(invalidRegex, function (currType) {
return {

@@ -490,7 +492,7 @@ message: "Unexpected RegExp Anchor Error:\n" +

function findUnsupportedFlags(tokenTypes) {
var invalidFlags = utils_1.filter(tokenTypes, function (currType) {
var invalidFlags = (0, utils_1.filter)(tokenTypes, function (currType) {
var pattern = currType[PATTERN];
return pattern instanceof RegExp && (pattern.multiline || pattern.global);
});
var errors = utils_1.map(invalidFlags, function (currType) {
var errors = (0, utils_1.map)(invalidFlags, function (currType) {
return {

@@ -510,6 +512,6 @@ message: "Token Type: ->" +

var found = [];
var identicalPatterns = utils_1.map(tokenTypes, function (outerType) {
return utils_1.reduce(tokenTypes, function (result, innerType) {
var identicalPatterns = (0, utils_1.map)(tokenTypes, function (outerType) {
return (0, utils_1.reduce)(tokenTypes, function (result, innerType) {
if (outerType.PATTERN.source === innerType.PATTERN.source &&
!utils_1.contains(found, innerType) &&
!(0, utils_1.contains)(found, innerType) &&
innerType.PATTERN !== lexer_public_1.Lexer.NA) {

@@ -525,11 +527,11 @@ // this avoids duplicates in the result, each Token Type may only appear in one "set"

});
identicalPatterns = utils_1.compact(identicalPatterns);
var duplicatePatterns = utils_1.filter(identicalPatterns, function (currIdenticalSet) {
identicalPatterns = (0, utils_1.compact)(identicalPatterns);
var duplicatePatterns = (0, utils_1.filter)(identicalPatterns, function (currIdenticalSet) {
return currIdenticalSet.length > 1;
});
var errors = utils_1.map(duplicatePatterns, function (setOfIdentical) {
var tokenTypeNames = utils_1.map(setOfIdentical, function (currType) {
var errors = (0, utils_1.map)(duplicatePatterns, function (setOfIdentical) {
var tokenTypeNames = (0, utils_1.map)(setOfIdentical, function (currType) {
return currType.name;
});
var dupPatternSrc = utils_1.first(setOfIdentical).PATTERN;
var dupPatternSrc = (0, utils_1.first)(setOfIdentical).PATTERN;
return {

@@ -546,10 +548,10 @@ message: "The same RegExp pattern ->" + dupPatternSrc + "<-" +

function findInvalidGroupType(tokenTypes) {
var invalidTypes = utils_1.filter(tokenTypes, function (clazz) {
if (!utils_1.has(clazz, "GROUP")) {
var invalidTypes = (0, utils_1.filter)(tokenTypes, function (clazz) {
if (!(0, utils_1.has)(clazz, "GROUP")) {
return false;
}
var group = clazz.GROUP;
return group !== lexer_public_1.Lexer.SKIPPED && group !== lexer_public_1.Lexer.NA && !utils_1.isString(group);
return group !== lexer_public_1.Lexer.SKIPPED && group !== lexer_public_1.Lexer.NA && !(0, utils_1.isString)(group);
});
var errors = utils_1.map(invalidTypes, function (currType) {
var errors = (0, utils_1.map)(invalidTypes, function (currType) {
return {

@@ -567,6 +569,6 @@ message: "Token Type: ->" +

function findModesThatDoNotExist(tokenTypes, validModes) {
var invalidModes = utils_1.filter(tokenTypes, function (clazz) {
return (clazz.PUSH_MODE !== undefined && !utils_1.contains(validModes, clazz.PUSH_MODE));
var invalidModes = (0, utils_1.filter)(tokenTypes, function (clazz) {
return (clazz.PUSH_MODE !== undefined && !(0, utils_1.contains)(validModes, clazz.PUSH_MODE));
});
var errors = utils_1.map(invalidModes, function (tokType) {
var errors = (0, utils_1.map)(invalidModes, function (tokType) {
var msg = "Token Type: ->" + tokType.name + "<- static 'PUSH_MODE' value cannot refer to a Lexer Mode ->" + tokType.PUSH_MODE + "<-" +

@@ -585,3 +587,3 @@ "which does not exist";

var errors = [];
var canBeTested = utils_1.reduce(tokenTypes, function (result, tokType, idx) {
var canBeTested = (0, utils_1.reduce)(tokenTypes, function (result, tokType, idx) {
var pattern = tokType.PATTERN;

@@ -593,6 +595,6 @@ if (pattern === lexer_public_1.Lexer.NA) {

// deeper regExp analysis capabilities
if (utils_1.isString(pattern)) {
if ((0, utils_1.isString)(pattern)) {
result.push({ str: pattern, idx: idx, tokenType: tokType });
}
else if (utils_1.isRegExp(pattern) && noMetaChar(pattern)) {
else if ((0, utils_1.isRegExp)(pattern) && noMetaChar(pattern)) {
result.push({ str: pattern.source, idx: idx, tokenType: tokType });

@@ -602,4 +604,4 @@ }

}, []);
utils_1.forEach(tokenTypes, function (tokType, testIdx) {
utils_1.forEach(canBeTested, function (_a) {
(0, utils_1.forEach)(tokenTypes, function (tokType, testIdx) {
(0, utils_1.forEach)(canBeTested, function (_a) {
var str = _a.str, idx = _a.idx, tokenType = _a.tokenType;

@@ -624,11 +626,11 @@ if (testIdx < idx && testTokenType(str, tokType.PATTERN)) {

/* istanbul ignore else */
if (utils_1.isRegExp(pattern)) {
if ((0, utils_1.isRegExp)(pattern)) {
var regExpArray = pattern.exec(str);
return regExpArray !== null && regExpArray.index === 0;
}
else if (utils_1.isFunction(pattern)) {
else if ((0, utils_1.isFunction)(pattern)) {
// maintain the API of custom patterns
return pattern(str, 0, [], {});
}
else if (utils_1.has(pattern, "exec")) {
else if ((0, utils_1.has)(pattern, "exec")) {
// maintain the API of custom patterns

@@ -661,3 +663,3 @@ return pattern.exec(str, 0, [], {});

];
return (utils_1.find(metaChars, function (char) { return regExp.source.indexOf(char) !== -1; }) === undefined);
return ((0, utils_1.find)(metaChars, function (char) { return regExp.source.indexOf(char) !== -1; }) === undefined);
}

@@ -681,3 +683,3 @@ function addStartOfInput(pattern) {

// some run time checks to help the end users.
if (!utils_1.has(lexerDefinition, exports.DEFAULT_MODE)) {
if (!(0, utils_1.has)(lexerDefinition, exports.DEFAULT_MODE)) {
errors.push({

@@ -690,3 +692,3 @@ message: "A MultiMode Lexer cannot be initialized without a <" +

}
if (!utils_1.has(lexerDefinition, exports.MODES)) {
if (!(0, utils_1.has)(lexerDefinition, exports.MODES)) {
errors.push({

@@ -699,5 +701,5 @@ message: "A MultiMode Lexer cannot be initialized without a <" +

}
if (utils_1.has(lexerDefinition, exports.MODES) &&
utils_1.has(lexerDefinition, exports.DEFAULT_MODE) &&
!utils_1.has(lexerDefinition.modes, lexerDefinition.defaultMode)) {
if ((0, utils_1.has)(lexerDefinition, exports.MODES) &&
(0, utils_1.has)(lexerDefinition, exports.DEFAULT_MODE) &&
!(0, utils_1.has)(lexerDefinition.modes, lexerDefinition.defaultMode)) {
errors.push({

@@ -709,6 +711,6 @@ message: "A MultiMode Lexer cannot be initialized with a " + exports.DEFAULT_MODE + ": <" + lexerDefinition.defaultMode + ">" +

}
if (utils_1.has(lexerDefinition, exports.MODES)) {
utils_1.forEach(lexerDefinition.modes, function (currModeValue, currModeName) {
utils_1.forEach(currModeValue, function (currTokType, currIdx) {
if (utils_1.isUndefined(currTokType)) {
if ((0, utils_1.has)(lexerDefinition, exports.MODES)) {
(0, utils_1.forEach)(lexerDefinition.modes, function (currModeValue, currModeName) {
(0, utils_1.forEach)(currModeValue, function (currTokType, currIdx) {
if ((0, utils_1.isUndefined)(currTokType)) {
errors.push({

@@ -729,7 +731,7 @@ message: "A Lexer cannot be initialized using an undefined Token Type. Mode:" +

var hasAnyLineBreak = false;
var allTokenTypes = utils_1.compact(utils_1.flatten(utils_1.mapValues(lexerDefinition.modes, function (tokTypes) { return tokTypes; })));
var concreteTokenTypes = utils_1.reject(allTokenTypes, function (currType) { return currType[PATTERN] === lexer_public_1.Lexer.NA; });
var allTokenTypes = (0, utils_1.compact)((0, utils_1.flatten)((0, utils_1.mapValues)(lexerDefinition.modes, function (tokTypes) { return tokTypes; })));
var concreteTokenTypes = (0, utils_1.reject)(allTokenTypes, function (currType) { return currType[PATTERN] === lexer_public_1.Lexer.NA; });
var terminatorCharCodes = getCharCodes(lineTerminatorCharacters);
if (trackLines) {
utils_1.forEach(concreteTokenTypes, function (tokType) {
(0, utils_1.forEach)(concreteTokenTypes, function (tokType) {
var currIssue = checkLineBreaksIssues(tokType, terminatorCharCodes);

@@ -747,3 +749,3 @@ if (currIssue !== false) {

// we don't want to attempt to scan if the user explicitly specified the line_breaks option.
if (utils_1.has(tokType, "LINE_BREAKS")) {
if ((0, utils_1.has)(tokType, "LINE_BREAKS")) {
if (tokType.LINE_BREAKS === true) {

@@ -754,3 +756,3 @@ hasAnyLineBreak = true;

else {
if (reg_exp_1.canMatchCharCode(terminatorCharCodes, tokType.PATTERN)) {
if ((0, reg_exp_1.canMatchCharCode)(terminatorCharCodes, tokType.PATTERN)) {
hasAnyLineBreak = true;

@@ -777,7 +779,7 @@ }

var clonedResult = {};
var groupKeys = utils_1.keys(emptyGroups);
utils_1.forEach(groupKeys, function (currKey) {
var groupKeys = (0, utils_1.keys)(emptyGroups);
(0, utils_1.forEach)(groupKeys, function (currKey) {
var currGroupValue = emptyGroups[currKey];
/* istanbul ignore else */
if (utils_1.isArray(currGroupValue)) {
if ((0, utils_1.isArray)(currGroupValue)) {
clonedResult[currKey] = [];

@@ -796,14 +798,14 @@ }

/* istanbul ignore else */
if (utils_1.isRegExp(pattern)) {
if ((0, utils_1.isRegExp)(pattern)) {
return false;
}
else if (utils_1.isFunction(pattern)) {
else if ((0, utils_1.isFunction)(pattern)) {
// CustomPatternMatcherFunc - custom patterns do not require any transformations, only wrapping in a RegExp Like object
return true;
}
else if (utils_1.has(pattern, "exec")) {
else if ((0, utils_1.has)(pattern, "exec")) {
// ICustomPattern
return true;
}
else if (utils_1.isString(pattern)) {
else if ((0, utils_1.isString)(pattern)) {
return false;

@@ -817,3 +819,3 @@ }

function isShortPattern(pattern) {
if (utils_1.isString(pattern) && pattern.length === 1) {
if ((0, utils_1.isString)(pattern) && pattern.length === 1) {
return pattern.charCodeAt(0);

@@ -854,3 +856,3 @@ }

function checkLineBreaksIssues(tokType, lineTerminatorCharCodes) {
if (utils_1.has(tokType, "LINE_BREAKS")) {
if ((0, utils_1.has)(tokType, "LINE_BREAKS")) {
// if the user explicitly declared the line_breaks option we will respect their choice

@@ -862,6 +864,6 @@ // and assume it is correct.

/* istanbul ignore else */
if (utils_1.isRegExp(tokType.PATTERN)) {
if ((0, utils_1.isRegExp)(tokType.PATTERN)) {
try {
// TODO: why is the casting suddenly needed?
reg_exp_1.canMatchCharCode(lineTerminatorCharCodes, tokType.PATTERN);
(0, reg_exp_1.canMatchCharCode)(lineTerminatorCharCodes, tokType.PATTERN);
}

@@ -877,3 +879,3 @@ catch (e) {

}
else if (utils_1.isString(tokType.PATTERN)) {
else if ((0, utils_1.isString)(tokType.PATTERN)) {
// string literal patterns can always be analyzed to detect line terminator usage

@@ -910,4 +912,4 @@ return false;

function getCharCodes(charsOrCodes) {
var charCodes = utils_1.map(charsOrCodes, function (numOrString) {
if (utils_1.isString(numOrString) && numOrString.length > 0) {
var charCodes = (0, utils_1.map)(charsOrCodes, function (numOrString) {
if ((0, utils_1.isString)(numOrString) && numOrString.length > 0) {
return numOrString.charCodeAt(0);

@@ -961,3 +963,3 @@ }

function initCharCodeToOptimizedIndexMap() {
if (utils_1.isEmpty(charCodeToOptimizedIdxMap)) {
if ((0, utils_1.isEmpty)(charCodeToOptimizedIdxMap)) {
charCodeToOptimizedIdxMap = new Array(65536);

@@ -964,0 +966,0 @@ for (var i = 0; i < 65536; i++) {

@@ -28,3 +28,3 @@ "use strict";

try {
var ast = reg_exp_parser_1.getRegExpAst(regExp);
var ast = (0, reg_exp_parser_1.getRegExpAst)(regExp);
var firstChars = firstCharOptimizedIndices(ast.value, {}, ast.flags.ignoreCase);

@@ -39,3 +39,3 @@ return firstChars;

if (ensureOptimizations) {
utils_1.PRINT_WARNING("" + exports.failedOptimizationPrefixMsg +
(0, utils_1.PRINT_WARNING)("" + exports.failedOptimizationPrefixMsg +
("\tUnable to optimize: < " + regExp.toString() + " >\n") +

@@ -54,3 +54,3 @@ "\tComplement Sets cannot be automatically optimized.\n" +

}
utils_1.PRINT_ERROR(exports.failedOptimizationPrefixMsg + "\n" +
(0, utils_1.PRINT_ERROR)(exports.failedOptimizationPrefixMsg + "\n" +
("\tFailed parsing: < " + regExp.toString() + " >\n") +

@@ -100,3 +100,3 @@ ("\tUsing the regexp-to-ast library version: " + regexp_to_ast_1.VERSION + "\n") +

}
utils_1.forEach(atom.value, function (code) {
(0, utils_1.forEach)(atom.value, function (code) {
if (typeof code === "number") {

@@ -126,4 +126,4 @@ addOptimizedIdxToResult(code, result, ignoreCase);

var maxUnOptVal = range.to;
var minOptIdx = lexer_1.charCodeToOptimizedIndex(minUnOptVal);
var maxOptIdx = lexer_1.charCodeToOptimizedIndex(maxUnOptVal);
var minOptIdx = (0, lexer_1.charCodeToOptimizedIndex)(minUnOptVal);
var maxOptIdx = (0, lexer_1.charCodeToOptimizedIndex)(maxUnOptVal);
for (var currOptIdx = minOptIdx; currOptIdx <= maxOptIdx; currOptIdx++) {

@@ -161,7 +161,7 @@ result[currOptIdx] = currOptIdx;

// console.log(Object.keys(result).length)
return utils_1.values(result);
return (0, utils_1.values)(result);
}
exports.firstCharOptimizedIndices = firstCharOptimizedIndices;
function addOptimizedIdxToResult(code, result, ignoreCase) {
var optimizedCharIdx = lexer_1.charCodeToOptimizedIndex(code);
var optimizedCharIdx = (0, lexer_1.charCodeToOptimizedIndex)(code);
result[optimizedCharIdx] = optimizedCharIdx;

@@ -177,3 +177,3 @@ if (ignoreCase === true) {

if (upperChar !== char) {
var optimizedCharIdx = lexer_1.charCodeToOptimizedIndex(upperChar.charCodeAt(0));
var optimizedCharIdx = (0, lexer_1.charCodeToOptimizedIndex)(upperChar.charCodeAt(0));
result[optimizedCharIdx] = optimizedCharIdx;

@@ -184,3 +184,3 @@ }

if (lowerChar !== char) {
var optimizedCharIdx = lexer_1.charCodeToOptimizedIndex(lowerChar.charCodeAt(0));
var optimizedCharIdx = (0, lexer_1.charCodeToOptimizedIndex)(lowerChar.charCodeAt(0));
result[optimizedCharIdx] = optimizedCharIdx;

@@ -191,5 +191,5 @@ }

function findCode(setNode, targetCharCodes) {
return utils_1.find(setNode.value, function (codeOrRange) {
return (0, utils_1.find)(setNode.value, function (codeOrRange) {
if (typeof codeOrRange === "number") {
return utils_1.contains(targetCharCodes, codeOrRange);
return (0, utils_1.contains)(targetCharCodes, codeOrRange);
}

@@ -199,3 +199,3 @@ else {

var range_1 = codeOrRange;
return (utils_1.find(targetCharCodes, function (targetCode) { return range_1.from <= targetCode && targetCode <= range_1.to; }) !== undefined);
return ((0, utils_1.find)(targetCharCodes, function (targetCode) { return range_1.from <= targetCode && targetCode <= range_1.to; }) !== undefined);
}

@@ -211,4 +211,4 @@ });

}
return utils_1.isArray(ast.value)
? utils_1.every(ast.value, isWholeOptional)
return (0, utils_1.isArray)(ast.value)
? (0, utils_1.every)(ast.value, isWholeOptional)
: isWholeOptional(ast.value);

@@ -242,3 +242,3 @@ }

CharCodeFinder.prototype.visitCharacter = function (node) {
if (utils_1.contains(this.targetCharCodes, node.value)) {
if ((0, utils_1.contains)(this.targetCharCodes, node.value)) {
this.found = true;

@@ -263,3 +263,3 @@ }

if (pattern instanceof RegExp) {
var ast = reg_exp_parser_1.getRegExpAst(pattern);
var ast = (0, reg_exp_parser_1.getRegExpAst)(pattern);
var charCodeFinder = new CharCodeFinder(charCodes);

@@ -270,4 +270,4 @@ charCodeFinder.visit(ast);

else {
return (utils_1.find(pattern, function (char) {
return utils_1.contains(charCodes, char.charCodeAt(0));
return ((0, utils_1.find)(pattern, function (char) {
return (0, utils_1.contains)(charCodes, char.charCodeAt(0));
}) !== undefined);

@@ -274,0 +274,0 @@ }

@@ -21,3 +21,3 @@ "use strict";

function hasTokenLabel(obj) {
return utils_1.isString(obj.LABEL) && obj.LABEL !== "";
return (0, utils_1.isString)(obj.LABEL) && obj.LABEL !== "";
}

@@ -42,33 +42,33 @@ exports.hasTokenLabel = hasTokenLabel;

tokenType.name = config.name;
if (!utils_1.isUndefined(pattern)) {
if (!(0, utils_1.isUndefined)(pattern)) {
tokenType.PATTERN = pattern;
}
if (utils_1.has(config, PARENT)) {
if ((0, utils_1.has)(config, PARENT)) {
throw ("The parent property is no longer supported.\n" +
"See: https://github.com/chevrotain/chevrotain/issues/564#issuecomment-349062346 for details.");
}
if (utils_1.has(config, CATEGORIES)) {
if ((0, utils_1.has)(config, CATEGORIES)) {
// casting to ANY as this will be fixed inside `augmentTokenTypes``
tokenType.CATEGORIES = config[CATEGORIES];
}
tokens_1.augmentTokenTypes([tokenType]);
if (utils_1.has(config, LABEL)) {
(0, tokens_1.augmentTokenTypes)([tokenType]);
if ((0, utils_1.has)(config, LABEL)) {
tokenType.LABEL = config[LABEL];
}
if (utils_1.has(config, GROUP)) {
if ((0, utils_1.has)(config, GROUP)) {
tokenType.GROUP = config[GROUP];
}
if (utils_1.has(config, POP_MODE)) {
if ((0, utils_1.has)(config, POP_MODE)) {
tokenType.POP_MODE = config[POP_MODE];
}
if (utils_1.has(config, PUSH_MODE)) {
if ((0, utils_1.has)(config, PUSH_MODE)) {
tokenType.PUSH_MODE = config[PUSH_MODE];
}
if (utils_1.has(config, LONGER_ALT)) {
if ((0, utils_1.has)(config, LONGER_ALT)) {
tokenType.LONGER_ALT = config[LONGER_ALT];
}
if (utils_1.has(config, LINE_BREAKS)) {
if ((0, utils_1.has)(config, LINE_BREAKS)) {
tokenType.LINE_BREAKS = config[LINE_BREAKS];
}
if (utils_1.has(config, START_CHARS_HINT)) {
if ((0, utils_1.has)(config, START_CHARS_HINT)) {
tokenType.START_CHARS_HINT = config[START_CHARS_HINT];

@@ -79,3 +79,3 @@ }

exports.EOF = createToken({ name: "EOF", pattern: lexer_public_1.Lexer.NA });
tokens_1.augmentTokenTypes([exports.EOF]);
(0, tokens_1.augmentTokenTypes)([exports.EOF]);
function createTokenInstance(tokType, image, startOffset, endOffset, startLine, endLine, startColumn, endColumn) {

@@ -96,5 +96,5 @@ return {

function tokenMatcher(token, tokType) {
return tokens_1.tokenStructuredMatcher(token, tokType);
return (0, tokens_1.tokenStructuredMatcher)(token, tokType);
}
exports.tokenMatcher = tokenMatcher;
//# sourceMappingURL=tokens_public.js.map

@@ -32,3 +32,3 @@ "use strict";

assignCategoriesTokensProp(tokenTypesAndParents);
utils_1.forEach(tokenTypesAndParents, function (tokType) {
(0, utils_1.forEach)(tokenTypesAndParents, function (tokType) {
tokType.isParent = tokType.categoryMatches.length > 0;

@@ -39,10 +39,10 @@ });

function expandCategories(tokenTypes) {
var result = utils_1.cloneArr(tokenTypes);
var result = (0, utils_1.cloneArr)(tokenTypes);
var categories = tokenTypes;
var searching = true;
while (searching) {
categories = utils_1.compact(utils_1.flatten(utils_1.map(categories, function (currTokType) { return currTokType.CATEGORIES; })));
var newCategories = utils_1.difference(categories, result);
categories = (0, utils_1.compact)((0, utils_1.flatten)((0, utils_1.map)(categories, function (currTokType) { return currTokType.CATEGORIES; })));
var newCategories = (0, utils_1.difference)(categories, result);
result = result.concat(newCategories);
if (utils_1.isEmpty(newCategories)) {
if ((0, utils_1.isEmpty)(newCategories)) {
searching = false;

@@ -58,3 +58,3 @@ }

function assignTokenDefaultProps(tokenTypes) {
utils_1.forEach(tokenTypes, function (currTokType) {
(0, utils_1.forEach)(tokenTypes, function (currTokType) {
if (!hasShortKeyProperty(currTokType)) {

@@ -66,3 +66,3 @@ exports.tokenIdxToClass[exports.tokenShortNameIdx] = currTokType;

if (hasCategoriesProperty(currTokType) &&
!utils_1.isArray(currTokType.CATEGORIES)
!(0, utils_1.isArray)(currTokType.CATEGORIES)
// &&

@@ -86,6 +86,6 @@ // !isUndefined(currTokType.CATEGORIES.PATTERN)

function assignCategoriesTokensProp(tokenTypes) {
utils_1.forEach(tokenTypes, function (currTokType) {
(0, utils_1.forEach)(tokenTypes, function (currTokType) {
// avoid duplications
currTokType.categoryMatches = [];
utils_1.forEach(currTokType.categoryMatchesMap, function (val, key) {
(0, utils_1.forEach)(currTokType.categoryMatchesMap, function (val, key) {
currTokType.categoryMatches.push(exports.tokenIdxToClass[key].tokenTypeIdx);

@@ -97,3 +97,3 @@ });

function assignCategoriesMapProp(tokenTypes) {
utils_1.forEach(tokenTypes, function (currTokType) {
(0, utils_1.forEach)(tokenTypes, function (currTokType) {
singleAssignCategoriesToksMap([], currTokType);

@@ -104,9 +104,9 @@ });

function singleAssignCategoriesToksMap(path, nextNode) {
utils_1.forEach(path, function (pathNode) {
(0, utils_1.forEach)(path, function (pathNode) {
nextNode.categoryMatchesMap[pathNode.tokenTypeIdx] = true;
});
utils_1.forEach(nextNode.CATEGORIES, function (nextCategory) {
(0, utils_1.forEach)(nextNode.CATEGORIES, function (nextCategory) {
var newPath = path.concat(nextNode);
// avoids infinite loops due to cyclic categories.
if (!utils_1.contains(newPath, nextCategory)) {
if (!(0, utils_1.contains)(newPath, nextCategory)) {
singleAssignCategoriesToksMap(newPath, nextCategory);

@@ -118,21 +118,21 @@ }

function hasShortKeyProperty(tokType) {
return utils_1.has(tokType, "tokenTypeIdx");
return (0, utils_1.has)(tokType, "tokenTypeIdx");
}
exports.hasShortKeyProperty = hasShortKeyProperty;
function hasCategoriesProperty(tokType) {
return utils_1.has(tokType, "CATEGORIES");
return (0, utils_1.has)(tokType, "CATEGORIES");
}
exports.hasCategoriesProperty = hasCategoriesProperty;
function hasExtendingTokensTypesProperty(tokType) {
return utils_1.has(tokType, "categoryMatches");
return (0, utils_1.has)(tokType, "categoryMatches");
}
exports.hasExtendingTokensTypesProperty = hasExtendingTokensTypesProperty;
function hasExtendingTokensTypesMapProperty(tokType) {
return utils_1.has(tokType, "categoryMatchesMap");
return (0, utils_1.has)(tokType, "categoryMatchesMap");
}
exports.hasExtendingTokensTypesMapProperty = hasExtendingTokensTypesMapProperty;
function isTokenType(tokType) {
return utils_1.has(tokType, "tokenTypeIdx");
return (0, utils_1.has)(tokType, "tokenTypeIdx");
}
exports.isTokenType = isTokenType;
//# sourceMappingURL=tokens.js.map

@@ -7,3 +7,3 @@ "use strict";

// A separate file avoids cyclic dependencies and webpack errors.
exports.VERSION = "9.0.2";
exports.VERSION = "9.1.0";
//# sourceMappingURL=version.js.map
{
"name": "chevrotain",
"version": "9.0.2",
"version": "9.1.0",
"description": "Chevrotain is a high performance fault tolerant javascript parsing DSL for building recursive decent parsers",

@@ -74,22 +74,22 @@ "keywords": [

"dependencies": {
"@chevrotain/types": "^9.0.2",
"@chevrotain/utils": "^9.0.2",
"@chevrotain/types": "^9.1.0",
"@chevrotain/utils": "^9.1.0",
"regexp-to-ast": "0.5.0"
},
"devDependencies": {
"@types/sinon-chai": "^3.2.0",
"error-stack-parser": "^2.0.6",
"esbuild": "^0.12.8",
"gen-esm-wrapper": "^1.1.2",
"gitty": "^3.6.0",
"jsdom": "16.6.0",
"jsonfile": "^6.0.1",
"require-from-string": "^2.0.2",
"sinon": "^11.1.1",
"sinon-chai": "^3.0.0",
"webpack": "5.38.1",
"webpack-cli": "^4.1.0",
"xregexp": "^5.0.1"
"@types/sinon-chai": "3.2.5",
"error-stack-parser": "2.0.6",
"esbuild": "0.13.4",
"gen-esm-wrapper": "1.1.3",
"gitty": "3.7.2",
"jsdom": "18.0.0",
"jsonfile": "6.1.0",
"require-from-string": "2.0.2",
"sinon": "11.1.2",
"sinon-chai": "3.7.0",
"webpack": "5.58.1",
"webpack-cli": "4.9.0",
"xregexp": "5.1.0"
},
"gitHead": "027c35d42c4976fa7ef438c3af8690783989cfb6"
"gitHead": "45c6c5bc0ea2cf95e16640124e1deb4ece25f4a4"
}

@@ -108,45 +108,47 @@ import { hasTokenLabel, tokenLabel } from "../scan/tokens_public"

export const defaultGrammarResolverErrorProvider: IGrammarResolverErrorMessageProvider = {
buildRuleNotFoundError(
topLevelRule: Rule,
undefinedRule: NonTerminal
): string {
const msg =
"Invalid grammar, reference to a rule which is not defined: ->" +
undefinedRule.nonTerminalName +
"<-\n" +
"inside top level rule: ->" +
topLevelRule.name +
"<-"
return msg
export const defaultGrammarResolverErrorProvider: IGrammarResolverErrorMessageProvider =
{
buildRuleNotFoundError(
topLevelRule: Rule,
undefinedRule: NonTerminal
): string {
const msg =
"Invalid grammar, reference to a rule which is not defined: ->" +
undefinedRule.nonTerminalName +
"<-\n" +
"inside top level rule: ->" +
topLevelRule.name +
"<-"
return msg
}
}
}
export const defaultGrammarValidatorErrorProvider: IGrammarValidatorErrorMessageProvider = {
buildDuplicateFoundError(
topLevelRule: Rule,
duplicateProds: IProductionWithOccurrence[]
): string {
function getExtraProductionArgument(
prod: IProductionWithOccurrence
export const defaultGrammarValidatorErrorProvider: IGrammarValidatorErrorMessageProvider =
{
buildDuplicateFoundError(
topLevelRule: Rule,
duplicateProds: IProductionWithOccurrence[]
): string {
if (prod instanceof Terminal) {
return prod.terminalType.name
} else if (prod instanceof NonTerminal) {
return prod.nonTerminalName
} else {
return ""
function getExtraProductionArgument(
prod: IProductionWithOccurrence
): string {
if (prod instanceof Terminal) {
return prod.terminalType.name
} else if (prod instanceof NonTerminal) {
return prod.nonTerminalName
} else {
return ""
}
}
}
const topLevelName = topLevelRule.name
const duplicateProd = first(duplicateProds)
const index = duplicateProd.idx
const dslName = getProductionDslName(duplicateProd)
const extraArgument = getExtraProductionArgument(duplicateProd)
const topLevelName = topLevelRule.name
const duplicateProd = first(duplicateProds)
const index = duplicateProd.idx
const dslName = getProductionDslName(duplicateProd)
const extraArgument = getExtraProductionArgument(duplicateProd)
const hasExplicitIndex = index > 0
let msg = `->${dslName}${hasExplicitIndex ? index : ""}<- ${
extraArgument ? `with argument: ->${extraArgument}<-` : ""
}
const hasExplicitIndex = index > 0
let msg = `->${dslName}${hasExplicitIndex ? index : ""}<- ${
extraArgument ? `with argument: ->${extraArgument}<-` : ""
}
appears more than once (${

@@ -158,167 +160,169 @@ duplicateProds.length

// white space trimming time! better to trim afterwards as it allows to use WELL formatted multi line template strings...
msg = msg.replace(/[ \t]+/g, " ")
msg = msg.replace(/\s\s+/g, "\n")
// white space trimming time! better to trim afterwards as it allows to use WELL formatted multi line template strings...
msg = msg.replace(/[ \t]+/g, " ")
msg = msg.replace(/\s\s+/g, "\n")
return msg
},
return msg
},
buildNamespaceConflictError(rule: Rule): string {
const errMsg =
`Namespace conflict found in grammar.\n` +
`The grammar has both a Terminal(Token) and a Non-Terminal(Rule) named: <${rule.name}>.\n` +
`To resolve this make sure each Terminal and Non-Terminal names are unique\n` +
`This is easy to accomplish by using the convention that Terminal names start with an uppercase letter\n` +
`and Non-Terminal names start with a lower case letter.`
buildNamespaceConflictError(rule: Rule): string {
const errMsg =
`Namespace conflict found in grammar.\n` +
`The grammar has both a Terminal(Token) and a Non-Terminal(Rule) named: <${rule.name}>.\n` +
`To resolve this make sure each Terminal and Non-Terminal names are unique\n` +
`This is easy to accomplish by using the convention that Terminal names start with an uppercase letter\n` +
`and Non-Terminal names start with a lower case letter.`
return errMsg
},
return errMsg
},
buildAlternationPrefixAmbiguityError(options: {
topLevelRule: Rule
prefixPath: TokenType[]
ambiguityIndices: number[]
alternation: Alternation
}): string {
const pathMsg = map(options.prefixPath, (currTok) =>
tokenLabel(currTok)
).join(", ")
const occurrence =
options.alternation.idx === 0 ? "" : options.alternation.idx
const errMsg =
`Ambiguous alternatives: <${options.ambiguityIndices.join(
" ,"
)}> due to common lookahead prefix\n` +
`in <OR${occurrence}> inside <${options.topLevelRule.name}> Rule,\n` +
`<${pathMsg}> may appears as a prefix path in all these alternatives.\n` +
`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#COMMON_PREFIX\n` +
`For Further details.`
buildAlternationPrefixAmbiguityError(options: {
topLevelRule: Rule
prefixPath: TokenType[]
ambiguityIndices: number[]
alternation: Alternation
}): string {
const pathMsg = map(options.prefixPath, (currTok) =>
tokenLabel(currTok)
).join(", ")
const occurrence =
options.alternation.idx === 0 ? "" : options.alternation.idx
const errMsg =
`Ambiguous alternatives: <${options.ambiguityIndices.join(
" ,"
)}> due to common lookahead prefix\n` +
`in <OR${occurrence}> inside <${options.topLevelRule.name}> Rule,\n` +
`<${pathMsg}> may appears as a prefix path in all these alternatives.\n` +
`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#COMMON_PREFIX\n` +
`For Further details.`
return errMsg
},
return errMsg
},
buildAlternationAmbiguityError(options: {
topLevelRule: Rule
prefixPath: TokenType[]
ambiguityIndices: number[]
alternation: Alternation
}): string {
const pathMsg = map(options.prefixPath, (currtok) =>
tokenLabel(currtok)
).join(", ")
const occurrence =
options.alternation.idx === 0 ? "" : options.alternation.idx
let currMessage =
`Ambiguous Alternatives Detected: <${options.ambiguityIndices.join(
" ,"
)}> in <OR${occurrence}>` +
` inside <${options.topLevelRule.name}> Rule,\n` +
`<${pathMsg}> may appears as a prefix path in all these alternatives.\n`
buildAlternationAmbiguityError(options: {
topLevelRule: Rule
prefixPath: TokenType[]
ambiguityIndices: number[]
alternation: Alternation
}): string {
const pathMsg = map(options.prefixPath, (currtok) =>
tokenLabel(currtok)
).join(", ")
const occurrence =
options.alternation.idx === 0 ? "" : options.alternation.idx
let currMessage =
`Ambiguous Alternatives Detected: <${options.ambiguityIndices.join(
" ,"
)}> in <OR${occurrence}>` +
` inside <${options.topLevelRule.name}> Rule,\n` +
`<${pathMsg}> may appears as a prefix path in all these alternatives.\n`
currMessage =
currMessage +
`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#AMBIGUOUS_ALTERNATIVES\n` +
`For Further details.`
return currMessage
},
currMessage =
currMessage +
`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#AMBIGUOUS_ALTERNATIVES\n` +
`For Further details.`
return currMessage
},
buildEmptyRepetitionError(options: {
topLevelRule: Rule
repetition: IProductionWithOccurrence
}): string {
let dslName = getProductionDslName(options.repetition)
if (options.repetition.idx !== 0) {
dslName += options.repetition.idx
}
buildEmptyRepetitionError(options: {
topLevelRule: Rule
repetition: IProductionWithOccurrence
}): string {
let dslName = getProductionDslName(options.repetition)
if (options.repetition.idx !== 0) {
dslName += options.repetition.idx
}
const errMsg =
`The repetition <${dslName}> within Rule <${options.topLevelRule.name}> can never consume any tokens.\n` +
`This could lead to an infinite loop.`
const errMsg =
`The repetition <${dslName}> within Rule <${options.topLevelRule.name}> can never consume any tokens.\n` +
`This could lead to an infinite loop.`
return errMsg
},
return errMsg
},
// TODO: remove - `errors_public` from nyc.config.js exclude
// once this method is fully removed from this file
buildTokenNameError(options: {
tokenType: TokenType
expectedPattern: RegExp
}): string {
/* istanbul ignore next */
return "deprecated"
},
// TODO: remove - `errors_public` from nyc.config.js exclude
// once this method is fully removed from this file
buildTokenNameError(options: {
tokenType: TokenType
expectedPattern: RegExp
}): string {
/* istanbul ignore next */
return "deprecated"
},
buildEmptyAlternationError(options: {
topLevelRule: Rule
alternation: Alternation
emptyChoiceIdx: number
}): string {
const errMsg =
`Ambiguous empty alternative: <${options.emptyChoiceIdx + 1}>` +
` in <OR${options.alternation.idx}> inside <${options.topLevelRule.name}> Rule.\n` +
`Only the last alternative may be an empty alternative.`
buildEmptyAlternationError(options: {
topLevelRule: Rule
alternation: Alternation
emptyChoiceIdx: number
}): string {
const errMsg =
`Ambiguous empty alternative: <${options.emptyChoiceIdx + 1}>` +
` in <OR${options.alternation.idx}> inside <${options.topLevelRule.name}> Rule.\n` +
`Only the last alternative may be an empty alternative.`
return errMsg
},
return errMsg
},
buildTooManyAlternativesError(options: {
topLevelRule: Rule
alternation: Alternation
}): string {
const errMsg =
`An Alternation cannot have more than 256 alternatives:\n` +
`<OR${options.alternation.idx}> inside <${
options.topLevelRule.name
}> Rule.\n has ${options.alternation.definition.length + 1} alternatives.`
buildTooManyAlternativesError(options: {
topLevelRule: Rule
alternation: Alternation
}): string {
const errMsg =
`An Alternation cannot have more than 256 alternatives:\n` +
`<OR${options.alternation.idx}> inside <${
options.topLevelRule.name
}> Rule.\n has ${
options.alternation.definition.length + 1
} alternatives.`
return errMsg
},
return errMsg
},
buildLeftRecursionError(options: {
topLevelRule: Rule
leftRecursionPath: Rule[]
}): string {
const ruleName = options.topLevelRule.name
const pathNames = utils.map(
options.leftRecursionPath,
(currRule) => currRule.name
)
const leftRecursivePath = `${ruleName} --> ${pathNames
.concat([ruleName])
.join(" --> ")}`
const errMsg =
`Left Recursion found in grammar.\n` +
`rule: <${ruleName}> can be invoked from itself (directly or indirectly)\n` +
`without consuming any Tokens. The grammar path that causes this is: \n ${leftRecursivePath}\n` +
` To fix this refactor your grammar to remove the left recursion.\n` +
`see: https://en.wikipedia.org/wiki/LL_parser#Left_Factoring.`
buildLeftRecursionError(options: {
topLevelRule: Rule
leftRecursionPath: Rule[]
}): string {
const ruleName = options.topLevelRule.name
const pathNames = utils.map(
options.leftRecursionPath,
(currRule) => currRule.name
)
const leftRecursivePath = `${ruleName} --> ${pathNames
.concat([ruleName])
.join(" --> ")}`
const errMsg =
`Left Recursion found in grammar.\n` +
`rule: <${ruleName}> can be invoked from itself (directly or indirectly)\n` +
`without consuming any Tokens. The grammar path that causes this is: \n ${leftRecursivePath}\n` +
` To fix this refactor your grammar to remove the left recursion.\n` +
`see: https://en.wikipedia.org/wiki/LL_parser#Left_Factoring.`
return errMsg
},
return errMsg
},
// TODO: remove - `errors_public` from nyc.config.js exclude
// once this method is fully removed from this file
buildInvalidRuleNameError(options: {
topLevelRule: Rule
expectedPattern: RegExp
}): string {
/* istanbul ignore next */
return "deprecated"
},
// TODO: remove - `errors_public` from nyc.config.js exclude
// once this method is fully removed from this file
buildInvalidRuleNameError(options: {
topLevelRule: Rule
expectedPattern: RegExp
}): string {
/* istanbul ignore next */
return "deprecated"
},
buildDuplicateRuleNameError(options: {
topLevelRule: Rule | string
grammarName: string
}): string {
let ruleName
if (options.topLevelRule instanceof Rule) {
ruleName = options.topLevelRule.name
} else {
ruleName = options.topLevelRule
}
buildDuplicateRuleNameError(options: {
topLevelRule: Rule | string
grammarName: string
}): string {
let ruleName
if (options.topLevelRule instanceof Rule) {
ruleName = options.topLevelRule.name
} else {
ruleName = options.topLevelRule
}
const errMsg = `Duplicate definition, rule: ->${ruleName}<- is already defined in the grammar: ->${options.grammarName}<-`
const errMsg = `Duplicate definition, rule: ->${ruleName}<- is already defined in the grammar: ->${options.grammarName}<-`
return errMsg
return errMsg
}
}
}

@@ -30,3 +30,4 @@ import { contains } from "@chevrotain/utils"

extends Error
implements IRecognitionException {
implements IRecognitionException
{
context: IRecognizerContext

@@ -33,0 +34,0 @@ resyncedTokens = []

@@ -19,3 +19,4 @@ import {

export abstract class AbstractProduction<T extends IProduction = IProduction>
implements IProduction {
implements IProduction
{
public get definition(): T[] {

@@ -40,3 +41,4 @@ return this._definition

extends AbstractProduction
implements IProductionWithOccurrence {
implements IProductionWithOccurrence
{
public nonTerminalName: string

@@ -111,3 +113,4 @@ public label?: string

extends AbstractProduction
implements IProductionWithOccurrence {
implements IProductionWithOccurrence
{
public idx: number = 1

@@ -131,3 +134,4 @@ public maxLookahead?: number

extends AbstractProduction
implements IProductionWithOccurrence {
implements IProductionWithOccurrence
{
public idx: number = 1

@@ -151,3 +155,4 @@ public maxLookahead?: number

extends AbstractProduction
implements IProductionWithOccurrence {
implements IProductionWithOccurrence
{
public separator: TokenType

@@ -171,3 +176,4 @@ public idx: number = 1

extends AbstractProduction
implements IProductionWithOccurrence {
implements IProductionWithOccurrence
{
public separator: TokenType

@@ -192,3 +198,4 @@ public idx: number = 1

extends AbstractProduction
implements IProductionWithOccurrence {
implements IProductionWithOccurrence
{
public separator: TokenType

@@ -212,3 +219,4 @@ public idx: number = 1

extends AbstractProduction<Alternative>
implements IProductionWithOccurrence {
implements IProductionWithOccurrence
{
public idx: number = 1

@@ -215,0 +223,0 @@ public ignoreAmbiguities: boolean = false

@@ -163,5 +163,3 @@ import { contains, every, has, some } from "@chevrotain/utils"

const collectorVisitor = new DslMethodsCollectorVisitor()
export function collectMethods(
rule: Rule
): {
export function collectMethods(rule: Rule): {
option: Option[]

@@ -168,0 +166,0 @@ alternation: Alternation[]

@@ -644,8 +644,7 @@ import {

if (contains(this.definedRulesNames, name)) {
const errMsg = defaultGrammarValidatorErrorProvider.buildDuplicateRuleNameError(
{
const errMsg =
defaultGrammarValidatorErrorProvider.buildDuplicateRuleNameError({
topLevelRule: name,
grammarName: this.className
}
)
})

@@ -652,0 +651,0 @@ const error = {

@@ -266,5 +266,4 @@ import {

if (this.outputCst) {
const partialCstResult: any = this.CST_STACK[
this.CST_STACK.length - 1
]
const partialCstResult: any =
this.CST_STACK[this.CST_STACK.length - 1]
partialCstResult.recoveredNode = true

@@ -277,5 +276,4 @@ return partialCstResult

if (this.outputCst) {
const partialCstResult: any = this.CST_STACK[
this.CST_STACK.length - 1
]
const partialCstResult: any =
this.CST_STACK[this.CST_STACK.length - 1]
partialCstResult.recoveredNode = true

@@ -282,0 +280,0 @@ recogError.partialCstResult = partialCstResult

@@ -419,6 +419,4 @@ import { createTokenInstance, EOF } from "../../../scan/tokens_public"

const ruleGrammar = this.getGAstProductions()[currRuleName]
const walker: AbstractNextTerminalAfterProductionWalker = new nextToksWalker(
ruleGrammar,
prodOccurrence
)
const walker: AbstractNextTerminalAfterProductionWalker =
new nextToksWalker(ruleGrammar, prodOccurrence)
firstAfterRepInfo = walker.startWalking()

@@ -425,0 +423,0 @@ this.firstAfterRepMap[key] = firstAfterRepInfo

@@ -80,3 +80,4 @@ import {

this.cstPostRule = NOOP
this.setInitialNodeLocation = this.setInitialNodeLocationOnlyOffsetRecovery
this.setInitialNodeLocation =
this.setInitialNodeLocationOnlyOffsetRecovery
} else {

@@ -86,3 +87,4 @@ this.setNodeLocationFromToken = NOOP

this.cstPostRule = this.cstPostRuleOnlyOffset
this.setInitialNodeLocation = this.setInitialNodeLocationOnlyOffsetRegular
this.setInitialNodeLocation =
this.setInitialNodeLocationOnlyOffsetRegular
}

@@ -89,0 +91,0 @@ } else if (/none/i.test(this.nodeLocationTracking)) {

@@ -248,4 +248,4 @@ import {

currAnalyzeResult = analyzeTokenTypes(currModDef, {
lineTerminatorCharacters: this.config
.lineTerminatorCharacters,
lineTerminatorCharacters:
this.config.lineTerminatorCharacters,
positionTracking: config.positionTracking,

@@ -405,4 +405,5 @@ ensureOptimizations: config.ensureOptimizations,

j,
k,
matchAltImage,
longerAltIdx,
longerAlt,
matchedImage,

@@ -473,5 +474,6 @@ payload,

// thus the pop is ignored, an error will be created and the lexer will continue parsing in the previous mode.
const msg = this.config.errorMessageProvider.buildUnableToPopLexerModeMessage(
popToken
)
const msg =
this.config.errorMessageProvider.buildUnableToPopLexerModeMessage(
popToken
)

@@ -493,5 +495,4 @@ errors.push({

patternIdxToConfig = this.patternIdxToConfig[newMode]
currCharCodeToPatternIdxToConfig = this.charCodeToPatternIdxToConfig[
newMode
]
currCharCodeToPatternIdxToConfig =
this.charCodeToPatternIdxToConfig[newMode]
currModePatternsLength = patternIdxToConfig.length

@@ -511,5 +512,4 @@ const modeCanBeOptimized =

modeStack.push(newMode)
currCharCodeToPatternIdxToConfig = this.charCodeToPatternIdxToConfig[
newMode
]
currCharCodeToPatternIdxToConfig =
this.charCodeToPatternIdxToConfig[newMode]

@@ -573,36 +573,42 @@ patternIdxToConfig = this.patternIdxToConfig[newMode]

// this can be used to prioritize keywords over identifiers
longerAltIdx = currConfig.longerAlt
if (longerAltIdx !== undefined) {
longerAlt = currConfig.longerAlt
if (longerAlt !== undefined) {
// TODO: micro optimize, avoid extra prop access
// by saving/linking longerAlt on the original config?
const longerAltConfig = patternIdxToConfig[longerAltIdx]
const longerAltPattern = longerAltConfig.pattern
altPayload = null
const longerAltLength = longerAlt.length
for (k = 0; k < longerAltLength; k++) {
const longerAltConfig = patternIdxToConfig[longerAlt[k]]
const longerAltPattern = longerAltConfig.pattern
altPayload = null
// single Char can never be a longer alt so no need to test it.
// manually in-lined because > 600 chars won't be in-lined in V8
if (longerAltConfig.isCustom === true) {
match = longerAltPattern.exec(
orgText,
offset,
matchedTokens,
groups
)
if (match !== null) {
matchAltImage = match[0]
if (match.payload !== undefined) {
altPayload = match.payload
// single Char can never be a longer alt so no need to test it.
// manually in-lined because > 600 chars won't be in-lined in V8
if (longerAltConfig.isCustom === true) {
match = longerAltPattern.exec(
orgText,
offset,
matchedTokens,
groups
)
if (match !== null) {
matchAltImage = match[0]
if (match.payload !== undefined) {
altPayload = match.payload
}
} else {
matchAltImage = null
}
} else {
matchAltImage = null
this.updateLastIndex(longerAltPattern, offset)
matchAltImage = this.match(longerAltPattern, text, offset)
}
} else {
this.updateLastIndex(longerAltPattern, offset)
matchAltImage = this.match(longerAltPattern, text, offset)
}
if (matchAltImage && matchAltImage.length > matchedImage.length) {
matchedImage = matchAltImage
payload = altPayload
currConfig = longerAltConfig
if (matchAltImage && matchAltImage.length > matchedImage.length) {
matchedImage = matchAltImage
payload = altPayload
currConfig = longerAltConfig
// Exit the loop early after matching one of the longer alternatives
// The first matched alternative takes precedence
break
}
}

@@ -609,0 +615,0 @@ }

@@ -49,3 +49,3 @@ import { BaseRegExpVisitor } from "regexp-to-ast"

pattern: IRegExpExec
longerAlt: number
longerAlt: number[]
canLineTerminator: boolean

@@ -196,3 +196,3 @@ isCustom: boolean

let patternIdxToGroup
let patternIdxToLongerAltIdx
let patternIdxToLongerAltIdxArr
let patternIdxToPushMode

@@ -220,8 +220,10 @@ let patternIdxToPopMode

patternIdxToLongerAltIdx = map(onlyRelevantTypes, (clazz: any) => {
patternIdxToLongerAltIdxArr = map(onlyRelevantTypes, (clazz: any) => {
const longerAltType = clazz.LONGER_ALT
if (longerAltType) {
const longerAltIdx = indexOf(onlyRelevantTypes, longerAltType)
return longerAltIdx
const longerAltIdxArr = isArray(longerAltType)
? map(longerAltType, (type: any) => indexOf(onlyRelevantTypes, type))
: [indexOf(onlyRelevantTypes, longerAltType)]
return longerAltIdxArr
}

@@ -284,3 +286,3 @@ })

pattern: allTransformedPatterns[idx],
longerAlt: patternIdxToLongerAltIdx[idx],
longerAlt: patternIdxToLongerAltIdxArr[idx],
canLineTerminator: patternIdxToCanLineTerminator[idx],

@@ -868,4 +870,3 @@ isCustom: patternIdxToIsCustom[idx],

`which does not exist\n`,
type:
LexerDefinitionErrorType.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST
type: LexerDefinitionErrorType.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST
})

@@ -882,4 +883,3 @@ }

`<${currModeName}> at index: <${currIdx}>\n`,
type:
LexerDefinitionErrorType.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED
type: LexerDefinitionErrorType.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED
})

@@ -886,0 +886,0 @@ }

// needs a separate module as this is required inside chevrotain productive code
// and also in the entry point for webpack(api.ts).
// A separate file avoids cyclic dependencies and webpack errors.
export const VERSION = "9.0.2"
export const VERSION = "9.1.0"

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc