Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

antlr4-c3

Package Overview
Dependencies
Maintainers
1
Versions
56
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

antlr4-c3 - npm Package Compare versions

Comparing version 1.1.13 to 1.1.14

.eslintrc.json

3

contributors.txt

@@ -58,2 +58,3 @@ ANTLR4-C3 Project Contributors Certification of Origin and Rights

2020/02/10, kyle-painter, Kyle Painter, kpainter101@gmail.com
2020/08/06, tamcgoey, Thomas McGoey-Smith, thomas@sourdough.dev
2020/08/06, tamcgoey, Thomas McGoey-Smith, thomas@sourdough.dev
2020/11/24, alessiostalla, Alessio Stalla, alessiostalla@gmail.com

@@ -69,13 +69,12 @@ 'use strict';

let tokenStream = this.parser.inputStream;
let currentIndex = tokenStream.index;
tokenStream.seek(this.tokenStartIndex);
this.tokens = [];
let offset = 1;
let offset = this.tokenStartIndex;
while (true) {
let token = tokenStream.LT(offset++);
this.tokens.push(token);
const token = tokenStream.get(offset++);
if (token.channel === antlr4ts_1.Token.DEFAULT_CHANNEL) {
this.tokens.push(token);
}
if (token.tokenIndex >= caretTokenIndex || token.type == antlr4ts_1.Token.EOF)
break;
}
tokenStream.seek(currentIndex);
let callStack = [];

@@ -94,3 +93,3 @@ let startRule = context ? context.ruleIndex : 0;

}
let sortedTokens = new Set();
const sortedTokens = new Set();
for (let token of this.candidates.tokens) {

@@ -114,4 +113,5 @@ let value = this.vocabulary.getDisplayName(token[0]);

translateStackToRuleIndex(ruleWithStartTokenList) {
if (this.preferredRules.size == 0)
if (this.preferredRules.size == 0) {
return false;
}
if (this.translateRulesTopDown) {

@@ -136,7 +136,8 @@ for (let i = ruleWithStartTokenList.length - 1; i >= 0; i--) {

if (this.preferredRules.has(ruleIndex)) {
let path = ruleWithStartTokenList.slice(0, i).map(({ ruleIndex }) => ruleIndex);
const path = ruleWithStartTokenList.slice(0, i).map(({ ruleIndex }) => ruleIndex);
let addNew = true;
for (let rule of this.candidates.rules) {
if (rule[0] != ruleIndex || rule[1].ruleList.length != path.length)
if (rule[0] != ruleIndex || rule[1].ruleList.length != path.length) {
continue;
}
if (path.every((v, j) => v === rule[1].ruleList[j])) {

@@ -152,4 +153,5 @@ addNew = false;

});
if (this.showDebugOutput)
if (this.showDebugOutput) {
console.log("=====> collected: ", this.ruleNames[ruleIndex]);
}
}

@@ -161,7 +163,6 @@ return true;

getFollowingTokens(transition) {
let result = [];
let seen = [];
let pipeline = [transition.target];
const result = [];
const pipeline = [transition.target];
while (pipeline.length > 0) {
let state = pipeline.pop();
const state = pipeline.pop();
for (let transition of state.getTransitions()) {

@@ -185,12 +186,13 @@ if (transition.serializationType == 5) {

determineFollowSets(start, stop) {
let result = [];
let seen = new Set();
let ruleStack = [];
this.collectFollowSets(start, stop, result, seen, ruleStack);
const result = [];
const stateStack = [];
const ruleStack = [];
this.collectFollowSets(start, stop, result, stateStack, ruleStack);
return result;
}
collectFollowSets(s, stopState, followSets, seen, ruleStack) {
if (seen.has(s))
collectFollowSets(s, stopState, followSets, stateStack, ruleStack) {
if (stateStack.find(x => x == s)) {
return;
seen.add(s);
}
stateStack.push(s);
if (s == stopState || s.stateType == atn_1.ATNStateType.RULE_STOP) {

@@ -201,2 +203,3 @@ let set = new FollowSetWithPath();

followSets.push(set);
stateStack.pop();
return;

@@ -207,14 +210,16 @@ }

let ruleTransition = transition;
if (ruleStack.indexOf(ruleTransition.target.ruleIndex) != -1)
if (ruleStack.indexOf(ruleTransition.target.ruleIndex) != -1) {
continue;
}
ruleStack.push(ruleTransition.target.ruleIndex);
this.collectFollowSets(transition.target, stopState, followSets, seen, ruleStack);
this.collectFollowSets(transition.target, stopState, followSets, stateStack, ruleStack);
ruleStack.pop();
}
else if (transition.serializationType == 4) {
if (this.checkPredicate(transition))
this.collectFollowSets(transition.target, stopState, followSets, seen, ruleStack);
if (this.checkPredicate(transition)) {
this.collectFollowSets(transition.target, stopState, followSets, stateStack, ruleStack);
}
}
else if (transition.isEpsilon) {
this.collectFollowSets(transition.target, stopState, followSets, seen, ruleStack);
this.collectFollowSets(transition.target, stopState, followSets, stateStack, ruleStack);
}

@@ -233,3 +238,3 @@ else if (transition.serializationType == 9) {

}
let set = new FollowSetWithPath();
const set = new FollowSetWithPath();
set.intervals = label;

@@ -242,2 +247,3 @@ set.path = ruleStack.slice();

}
stateStack.pop();
}

@@ -258,3 +264,3 @@ processRule(startState, tokenListIndex, callStack, precedence, indentation) {

}
let result = new Set();
const result = new Set();
let setsPerState = CodeCompletionCore.followSetsByATN.get(this.parser.constructor.name);

@@ -272,4 +278,5 @@ if (!setsPerState) {

let combined = new IntervalSet_1.IntervalSet();
for (let set of followSets.sets)
for (let set of followSets.sets) {
combined.addAll(set.intervals);
}
followSets.combined = combined;

@@ -288,3 +295,3 @@ }

for (let set of followSets.sets) {
let fullPath = callStack.slice();
const fullPath = callStack.slice();
const followSetPath = set.path.map(path => ({

@@ -301,7 +308,9 @@ startTokenIndex,

}
if (!this.candidates.tokens.has(symbol))
if (!this.candidates.tokens.has(symbol)) {
this.candidates.tokens.set(symbol, set.following);
}
else {
if (this.candidates.tokens.get(symbol) != set.following)
if (this.candidates.tokens.get(symbol) != set.following) {
this.candidates.tokens.set(symbol, []);
}
}

@@ -316,3 +325,3 @@ }

else {
let currentSymbol = this.tokens[tokenListIndex].type;
const currentSymbol = this.tokens[tokenListIndex].type;
if (!followSets.combined.contains(antlr4ts_1.Token.EPSILON) && !followSets.combined.contains(currentSymbol)) {

@@ -326,3 +335,3 @@ callStack.pop();

}
let statePipeline = [];
const statePipeline = [];
let currentEntry;

@@ -333,4 +342,4 @@ statePipeline.push({ state: startState, tokenListIndex: tokenListIndex });

++this.statesProcessed;
let currentSymbol = this.tokens[currentEntry.tokenListIndex].type;
let atCaret = currentEntry.tokenListIndex >= this.tokens.length - 1;
const currentSymbol = this.tokens[currentEntry.tokenListIndex].type;
const atCaret = currentEntry.tokenListIndex >= this.tokens.length - 1;
if (this.showDebugOutput) {

@@ -345,10 +354,13 @@ this.printDescription(indentation, currentEntry.state, this.generateBaseDescription(currentEntry.state), currentEntry.tokenListIndex);

}
let transitions = currentEntry.state.getTransitions();
const transitions = currentEntry.state.getTransitions();
for (let transition of transitions) {
switch (transition.serializationType) {
case 3: {
let ruleTransition = transition;
let endStatus = this.processRule(transition.target, currentEntry.tokenListIndex, callStack, ruleTransition.precedence, indentation + 1);
const ruleTransition = transition;
const endStatus = this.processRule(transition.target, currentEntry.tokenListIndex, callStack, ruleTransition.precedence, indentation + 1);
for (let position of endStatus) {
statePipeline.push({ state: transition.followState, tokenListIndex: position });
statePipeline.push({
state: transition.followState,
tokenListIndex: position
});
}

@@ -359,3 +371,6 @@ break;

if (this.checkPredicate(transition))
statePipeline.push({ state: transition.target, tokenListIndex: currentEntry.tokenListIndex });
statePipeline.push({
state: transition.target,
tokenListIndex: currentEntry.tokenListIndex
});
break;

@@ -366,3 +381,6 @@ }

if (predTransition.precedence >= this.precedenceStack[this.precedenceStack.length - 1])
statePipeline.push({ state: transition.target, tokenListIndex: currentEntry.tokenListIndex });
statePipeline.push({
state: transition.target,
tokenListIndex: currentEntry.tokenListIndex
});
break;

@@ -375,4 +393,5 @@ }

.toArray()) {
if (!this.ignoredTokens.has(token))
if (!this.ignoredTokens.has(token)) {
this.candidates.tokens.set(token, []);
}
}

@@ -382,3 +401,6 @@ }

else {
statePipeline.push({ state: transition.target, tokenListIndex: currentEntry.tokenListIndex + 1 });
statePipeline.push({
state: transition.target,
tokenListIndex: currentEntry.tokenListIndex + 1
});
}

@@ -389,3 +411,6 @@ break;

if (transition.isEpsilon) {
statePipeline.push({ state: transition.target, tokenListIndex: currentEntry.tokenListIndex });
statePipeline.push({
state: transition.target,
tokenListIndex: currentEntry.tokenListIndex
});
continue;

@@ -404,8 +429,11 @@ }

if (!this.ignoredTokens.has(symbol)) {
if (this.showDebugOutput)
if (this.showDebugOutput) {
console.log("=====> collected: ", this.vocabulary.getDisplayName(symbol));
if (addFollowing)
}
if (addFollowing) {
this.candidates.tokens.set(symbol, this.getFollowingTokens(transition));
else
}
else {
this.candidates.tokens.set(symbol, []);
}
}

@@ -416,4 +444,5 @@ }

if (set.contains(currentSymbol)) {
if (this.showDebugOutput)
if (this.showDebugOutput) {
console.log("=====> consumed: ", this.vocabulary.getDisplayName(currentSymbol));
}
statePipeline.push({

@@ -438,4 +467,5 @@ state: transition.target,

generateBaseDescription(state) {
let stateValue = state.stateNumber == atn_1.ATNState.INVALID_STATE_NUMBER ? "Invalid" : state.stateNumber;
return "[" + stateValue + " " + this.atnStateTypeMap[state.stateType] + "] in " + this.ruleNames[state.ruleIndex];
const stateValue = state.stateNumber == atn_1.ATNState.INVALID_STATE_NUMBER ? "Invalid" : state.stateNumber;
return "[" + stateValue + " " + this.atnStateTypeMap[state.stateType] + "] in " +
this.ruleNames[state.ruleIndex];
}

@@ -456,9 +486,11 @@ printDescription(indentation, state, baseDescription, tokenIndex) {

for (let symbol of symbols) {
if (labels.length > 0)
if (labels.length > 0) {
labels += ", ";
}
labels += this.vocabulary.getDisplayName(symbol);
}
}
if (labels.length == 0)
if (labels.length == 0) {
labels = "ε";
}
transitionDescription += "\n" + indent + "\t(" + labels + ") " + "[" +

@@ -469,6 +501,8 @@ transition.target.stateNumber + " " + this.atnStateTypeMap[transition.target.stateType] + "] in " +

}
if (tokenIndex >= this.tokens.length - 1)
if (tokenIndex >= this.tokens.length - 1) {
output += "<<" + this.tokenStartIndex + tokenIndex + ">> ";
else
}
else {
output += "<" + this.tokenStartIndex + tokenIndex + "> ";
}
console.log(output + "Current state: " + baseDescription + transitionDescription);

@@ -481,4 +515,5 @@ }

}
for (let rule of stack)
for (let rule of stack) {
console.log(this.ruleNames[rule.ruleIndex]);
}
}

@@ -485,0 +520,0 @@ }

{
"name": "antlr4-c3",
"version": "1.1.13",
"version": "1.1.14",
"description": "A code completion core implementation for ANTLR4 based parsers",

@@ -5,0 +5,0 @@ "main": "out/index.js",

@@ -23,3 +23,3 @@ [![NPM](https://nodei.co/npm/antlr4-c3.png?downloads=true&downloadRank=true)](https://nodei.co/npm/antlr4-c3/)

While the symbol table provides symbols of a given type, we need to find out which type is actually required. This is the task of the c3 engine. In its simplest setup it will return only keywords (and other lexer symbols) that are allowed by the grammar for a given position (which is of course the same position used to find the context for a symbol lookup in your symbol table). Keywords are a fixed set of words (or word sequences) that usually don't live in a symbol table. You can get the actual text strings directly from the parser's vocabulary. The c3 engine only returns the lexer tokens for them.
While the symbol table provides symbols of a given type, we need to find out which type is actually required. This is the task of the c3 engine. In its simplest setup it will return only keywords (and other lexer symbols) that are allowed by the grammar for a given position (which is of course the same position used to find the context for a symbol lookup in your symbol table). Keywords are a fixed set of words (or word sequences) that usually don't live in a symbol table. You can get the actual text strings directly from the parser vocabulary. The c3 engine only returns the lexer tokens for them.

@@ -120,5 +120,5 @@ In order to also get other types like variables or class names you have to do 2 steps:

where the map keys are the lexer tokens and the rule indices, respectively. Both can come with additional values, which you may or may not use for your implementation.
where the map keys are the lexer tokens and the rule indices, respectively. Both can come with additional values, which you may or may not use for your implementation.
For parser rules the value includes a `startTokenIndex`, which reflects the index of the starting token within the evaluated rule. This allows consumers to determine the range of tokens that should be replaced or matched against when resolving symbols for your rule. The value also contains a rule list which represents the call stack at which the given rule was found during evaluation. This allows consumers to determine a context for rules that are used in different places.
For parser rules the value includes a `startTokenIndex`, which reflects the index of the starting token within the evaluated rule. This allows consumers to determine the range of tokens that should be replaced or matched against when resolving symbols for your rule. The value also contains a rule list which represents the call stack at which the given rule was found during evaluation. This allows consumers to determine a context for rules that are used in different places.

@@ -165,3 +165,3 @@ For the lexer tokens the list consists of further token ids which directly follow the given token in the grammar (if any). This allows you to show **token sequences** if they are always used together. For example consider this SQL rule:

// Finally combine all found lists into one for the UI.
// We do that in separate steps so that you can apply some ordering to each of your sublists.
// We do that in separate steps so that you can apply some ordering to each of your sub lists.
// Then you also can order symbols groups as a whole depending their importance.

@@ -189,3 +189,3 @@ let candidates: string[] = [];

## Preferred Rules
As mentioned already the `preferredRules` field is an essential part for getting more than just keywords. It lets you specify the parser rules that are interesting for you and should include the rule indexes for the entities we talked about in the code completion breakdown paragraph above. Whenever the c3 engine hits a lexer token when collecting candidates from a specific ATN state it will check the call stack for it and, if that contains any of the preferred rules, will select that instead of the lexer token. This transformation ensures that the engine returns contextual informations which can actually be used to look up symbols.
As mentioned already the `preferredRules` field is an essential part for getting more than just keywords. It lets you specify the parser rules that are interesting for you and should include the rule indexes for the entities we talked about in the code completion breakdown paragraph above. Whenever the c3 engine hits a lexer token when collecting candidates from a specific ATN state it will check the call stack for it and, if that contains any of the preferred rules, will select that instead of the lexer token. This transformation ensures that the engine returns contextual information which can actually be used to look up symbols.

@@ -220,2 +220,5 @@ ## Constraining the Search Space

### 1.1.14
- Unreleased version
### 1.1.13

@@ -229,3 +232,3 @@ - Added a C# port of the library (thanks to Jonathan Philipps)

- Better handling of recursive rules in code completion (via precedence).
- Updated to latest antl4ts.
- Updated to latest antlr4ts.

@@ -232,0 +235,0 @@ ### 1.1.8

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc