New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

antlr4-c3

Package Overview
Dependencies
Maintainers
1
Versions
57
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

antlr4-c3 - npm Package Compare versions

Comparing version 1.1.0 to 1.1.1

97

out/src/CodeCompletionCore.js

@@ -197,3 +197,2 @@ 'use strict';

processRule(startState, tokenIndex, callStack, indentation) {
let result = new Set();
let positionMap = this.shortcutMap.get(startState.ruleIndex);

@@ -212,2 +211,3 @@ if (!positionMap) {

}
let result = new Set();
let setsPerState = CodeCompletionCore.followSetsByATN.get(this.parser.constructor.name);

@@ -289,56 +289,61 @@ if (!setsPerState) {

let transitions = currentEntry.state.getTransitions();
for (let i = transitions.length - 1; i >= 0; --i) {
let transition = transitions[i];
if (transition.serializationType == 3) {
let endStatus = this.processRule(transition.target, currentEntry.tokenIndex, callStack, indentation);
for (let position of endStatus) {
statePipeline.push({ state: transition.followState, tokenIndex: position });
}
}
else if (transition.serializationType == 4) {
if (this.checkPredicate(transition))
statePipeline.push({ state: transition.target, tokenIndex: currentEntry.tokenIndex });
}
else if (transition.isEpsilon) {
statePipeline.push({ state: transition.target, tokenIndex: currentEntry.tokenIndex });
}
else if (transition.serializationType == 9) {
if (atCaret) {
if (!this.translateToRuleIndex(callStack)) {
for (let token of misc_1.IntervalSet.of(antlr4ts_1.Token.MIN_USER_TOKEN_TYPE, this.atn.maxTokenType).toList())
if (!this.ignoredTokens.has(token))
this.candidates.tokens.set(token, []);
for (let transition of transitions) {
switch (transition.serializationType) {
case 3: {
let endStatus = this.processRule(transition.target, currentEntry.tokenIndex, callStack, indentation);
for (let position of endStatus) {
statePipeline.push({ state: transition.followState, tokenIndex: position });
}
break;
}
else {
statePipeline.push({ state: transition.target, tokenIndex: currentEntry.tokenIndex + 1 });
case 4: {
if (this.checkPredicate(transition))
statePipeline.push({ state: transition.target, tokenIndex: currentEntry.tokenIndex });
break;
}
}
else {
let set = transition.label;
if (set && set.size > 0) {
if (transition.serializationType == 8) {
set = set.complement(misc_1.IntervalSet.of(antlr4ts_1.Token.MIN_USER_TOKEN_TYPE, this.atn.maxTokenType));
}
case 9: {
if (atCaret) {
if (!this.translateToRuleIndex(callStack)) {
let list = set.toList();
let addFollowing = list.length == 1;
for (let symbol of list)
if (!this.ignoredTokens.has(symbol)) {
if (this.showDebugOutput)
console.log("=====> collected: ", this.vocabulary.getDisplayName(symbol));
if (addFollowing)
this.candidates.tokens.set(symbol, this.getFollowingTokens(transition));
else
this.candidates.tokens.set(symbol, []);
}
for (let token of misc_1.IntervalSet.of(antlr4ts_1.Token.MIN_USER_TOKEN_TYPE, this.atn.maxTokenType).toList())
if (!this.ignoredTokens.has(token))
this.candidates.tokens.set(token, []);
}
}
else {
if (set.contains(currentSymbol)) {
if (this.showDebugOutput)
console.log("=====> consumed: ", this.vocabulary.getDisplayName(currentSymbol));
statePipeline.push({ state: transition.target, tokenIndex: currentEntry.tokenIndex + 1 });
statePipeline.push({ state: transition.target, tokenIndex: currentEntry.tokenIndex + 1 });
}
break;
}
default: {
if (transition.isEpsilon) {
statePipeline.push({ state: transition.target, tokenIndex: currentEntry.tokenIndex });
continue;
}
let set = transition.label;
if (set && set.size > 0) {
if (transition.serializationType == 8) {
set = set.complement(misc_1.IntervalSet.of(antlr4ts_1.Token.MIN_USER_TOKEN_TYPE, this.atn.maxTokenType));
}
if (atCaret) {
if (!this.translateToRuleIndex(callStack)) {
let list = set.toList();
let addFollowing = list.length == 1;
for (let symbol of list)
if (!this.ignoredTokens.has(symbol)) {
if (this.showDebugOutput)
console.log("=====> collected: ", this.vocabulary.getDisplayName(symbol));
if (addFollowing)
this.candidates.tokens.set(symbol, this.getFollowingTokens(transition));
else
this.candidates.tokens.set(symbol, []);
}
}
}
else {
if (set.contains(currentSymbol)) {
if (this.showDebugOutput)
console.log("=====> consumed: ", this.vocabulary.getDisplayName(currentSymbol));
statePipeline.push({ state: transition.target, tokenIndex: currentEntry.tokenIndex + 1 });
}
}
}

@@ -345,0 +350,0 @@ }

{
"name": "antlr4-c3",
"version": "1.1.0",
"version": "1.1.1",
"description": "A code completion core implmentation for ANTLR4 based parsers",

@@ -8,5 +8,4 @@ "main": "out/index.js",

"scripts": {
"prepare": "tsc",
"prepublish": "check-node-version --npm \">=4\" || npm run prepare",
"test": "tsc && mocha out/test"
"prepublishOnly": "npm run test",
"test": "npm install typescript && tsc && npm install mocha && mocha out/test"
},

@@ -21,20 +20,19 @@ "repository": {

"auto completion",
"grammar"
"grammar",
"parser"
],
"dependencies": {
"antlr4ts": "^0.4.0-alpha.4"
"antlr4ts": "^0.4.0-burt.2"
},
"devDependencies": {
"@types/chai": "^3.4.34",
"@types/fs-extra": "^3.0.3",
"@types/mocha": "^2.2.32",
"@types/node": "^6.0.40",
"@types/fs-extra": "^3.0.3",
"@types/node": "^6.0.78",
"antlr4ts-cli": "^0.4.0-alpha.4",
"chai": "^3.5.0",
"fs-extra": "^3.0.1",
"mocha": "^2.5.3",
"typescript": "^2.0.3",
"ts-node": "^1.7.3",
"fs-extra": "^3.0.1",
"path": "^0.12.7",
"check-node-version": "^1.1.2"
"typescript": "^2.3.4"
},

@@ -41,0 +39,0 @@ "author": "Mike Lischke",

@@ -30,33 +30,37 @@ [![NPM](https://nodei.co/npm/antlr4-c3.png?downloads=true&downloadRank=true)](https://nodei.co/npm/antlr4-c3/)

var a = b + c()
```typescript
var a = b + c()
```
Such a grammar could look like:
grammar Expr;
expression: assignment | simpleExpression;
```antlr
grammar Expr;
expression: assignment | simpleExpression;
assignment: (VAR | LET) ID EQUAL simpleExpression;
assignment: (VAR | LET) ID EQUAL simpleExpression;
simpleExpression
: simpleExpression (PLUS | MINUS) simpleExpression
| simpleExpression (MULTIPLY | DIVIDE) simpleExpression
| variableRef
| functionRef
;
simpleExpression
: simpleExpression (PLUS | MINUS) simpleExpression
| simpleExpression (MULTIPLY | DIVIDE) simpleExpression
| variableRef
| functionRef
;
variableRef: ID;
functionRef: ID OPEN_PAR CLOSE_PAR;
variableRef: ID;
functionRef: ID OPEN_PAR CLOSE_PAR;
VAR: [vV] [aA] [rR];
LET: [lL] [eE] [tT];
VAR: [vV] [aA] [rR];
LET: [lL] [eE] [tT];
PLUS: '+';
MINUS: '-';
MULTIPLY: '*';
DIVIDE: '/';
EQUAL: '=';
OPEN_PAR: '(';
CLOSE_PAR: ')';
ID: [a-zA-Z] [a-zA-Z0-9_]*;
WS: [ \n\r\t] -> channel(HIDDEN);
PLUS: '+';
MINUS: '-';
MULTIPLY: '*';
DIVIDE: '/';
EQUAL: '=';
OPEN_PAR: '(';
CLOSE_PAR: ')';
ID: [a-zA-Z] [a-zA-Z0-9_]*;
WS: [ \n\r\t] -> channel(HIDDEN);
```

@@ -67,8 +71,12 @@ You can see the 2 special rules `variableRef` and `functionRef`, which mostly consist of the `ID` lexer rule. We could have instead used a single `ID` reference in the `simpleExpression` rule. However, this is where your domain knowledge about the language comes in. By making the two use cases explicit you can now exactly tell what to query from your symbol table. As you see we are using parser rules to denote entity types, which is half of the magic here.

dropTable: DROP TABLE tableRef;
tableRef: ID;
```mysql
dropTable: DROP TABLE tableRef;
tableRef: ID;
```
instead of:
dropTable: DROP TABLE ID;
```mysql
dropTable: DROP TABLE ID;
```

@@ -80,13 +88,15 @@ Then tell the c3 engine that you want to get back `tableRef` if it is a valid candidate at a given position.

let inputStream = new ANTLRInputStream("var c = a + b()");
let lexer = new ExprLexer(inputStream);
let tokenStream = new CommonTokenStream(lexer);
```typescript
let inputStream = new ANTLRInputStream("var c = a + b()");
let lexer = new ExprLexer(inputStream);
let tokenStream = new CommonTokenStream(lexer);
let parser = new ExprParser(tokenStream);
let errorListener = new ErrorListener();
parser.addErrorListener(errorListener);
let tree = parser.expression();
let parser = new ExprParser(tokenStream);
let errorListener = new ErrorListener();
parser.addErrorListener(errorListener);
let tree = parser.expression();
let core = new c3.CodeCompletionCore(parser);
let candidates = core.collectCandidates(0);
let core = new c3.CodeCompletionCore(parser);
let candidates = core.collectCandidates(0);
```

@@ -103,10 +113,14 @@ This is a pretty standard parser setup here. It's not even necessary to actually parse the input. But the c3 engine needs a few things for its work:

class CandidatesCollection {
public tokens: Map<number, TokenList>;
public rules: RuleList;
};
```typescript
class CandidatesCollection {
public tokens: Map<number, TokenList>;
public rules: RuleList;
};
```
For the lexer tokens there can be a list of extra tokens which directly follow the given token in the grammar (if any). That's quite a neat additional feature which allows you to show token sequences to the user if they are always used together. For example consider this SQL rule:
createTable: CREATE TABLE (IF NOT EXISTS)? ...;
```typescript
createTable: CREATE TABLE (IF NOT EXISTS)? ...;
```

@@ -119,37 +133,38 @@ Here, if a possible candidate is the `IF` keyword, you can also show the entire `IF NOT EXISTS` sequence to the user (and let him complete all 3 words in one go in the source code). The engine will return a candidate entry for `IF` with a token list containing `NOT` and `EXISTS`. This list will of course update properly when the user comes to `NOT`. Then you will get a candidate entry for `NOT` and an additional list of just `EXISTS`.

let keywords: string[] = [];
for (let candidate of candidates.tokens) {
keywords.push(parser.vocabulay.getDisplayName(candidate[0]);
```typescript
let keywords: string[] = [];
for (let candidate of candidates.tokens) {
keywords.push(parser.vocabulay.getDisplayName(candidate[0]);
}
let symbol = ...; // Find the symbol that covers your caret position.
let functionNames: string[] = [];
let variableNames: string[] = [];
for (let candidate of candidates.rules) {
switch (candidate[0]) {
case ExprParser.RULE_functionRef: {
let functions = symbol.getSymbolsOfType(c3.FunctionSymbol);
for (function of functions)
functionNames.push(function.name);
break;
}
let symbol = ...; // Find the symbol that covers your caret position.
let functionNames: string[] = [];
let variableNames: string[] = [];
for (let candidate of candidates.rules) {
switch (candidate[0]) {
case ExprParser.RULE_functionRef: {
let functions = symbol.getSymbolsOfType(c3.FunctionSymbol);
for (function of functions)
functionNames.push(function.name);
break;
}
case ExprParser.RULE_variableRef: {
let variables = symbol.getSymbolsOfType(c3.VariableSymbol);
for (variable of variables)
functionNames.push(variable.name);
break;
}
}
case ExprParser.RULE_variableRef: {
let variables = symbol.getSymbolsOfType(c3.VariableSymbol);
for (variable of variables)
functionNames.push(variable.name);
break;
}
}
}
// Finally combine all found lists into one for the UI.
// We do that in separate steps so that you can apply some ordering to each of your sublists.
// Then you also can order symbols groups as a whole depending their importance.
let candidates: string[] = [];
candidates.push(...keywords);
candidates.push(...functionNames);
candidates.push(...variableNames);
// Finally combine all found lists into one for the UI.
// We do that in separate steps so that you can apply some ordering to each of your sublists.
// Then you also can order symbols groups as a whole depending their importance.
let candidates: string[] = [];
candidates.push(...keywords);
candidates.push(...functionNames);
candidates.push(...variableNames);
```
# Fine Tuning

@@ -159,9 +174,11 @@ ## Ignored Tokens

core.ignoredTokens = new Set([
ExprLexer.ID,
ExprLexer.PLUS, ExprLexer.MINUS,
ExprLexer.MULTIPLY, ExprLexer.DIVIDE,
ExprLexer.EQUAL,
ExprLexer.OPEN_PAR, ExprLexer.CLOSE_PAR,
]);
```typescript
core.ignoredTokens = new Set([
ExprLexer.ID,
ExprLexer.PLUS, ExprLexer.MINUS,
ExprLexer.MULTIPLY, ExprLexer.DIVIDE,
ExprLexer.EQUAL,
ExprLexer.OPEN_PAR, ExprLexer.CLOSE_PAR,
]);
```

@@ -168,0 +185,0 @@ ## Preferred Rules

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc