New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

antlr4ts

Package Overview
Dependencies
Maintainers
2
Versions
17
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

antlr4ts - npm Package Compare versions

Comparing version 0.5.0-alpha.8565fd2b to 0.5.0-dev

atn.js

14

dist/ANTLRErrorListener.d.ts

@@ -8,3 +8,3 @@ /*!

import { RecognitionException } from "./RecognitionException";
export interface ANTLRErrorListener<Symbol> {
export interface ANTLRErrorListener<TSymbol> {
/**

@@ -17,6 +17,6 @@ * Upon syntax error, notify any interested parties. This is not how to

*
* <p>The {@link RecognitionException} is non-null for all syntax errors except
* The {@link RecognitionException} is non-`undefined` for all syntax errors except
* when we discover mismatched token errors that we can recover from
* in-line, without returning from the surrounding rule (via the single
* token insertion and deletion mechanism).</p>
* token insertion and deletion mechanism).
*

@@ -29,4 +29,4 @@ * @param recognizer

* The offending token in the input token
* stream, unless recognizer is a lexer (then it's null). If
* no viable alternative error, {@code e} has token at which we
* stream, unless recognizer is a lexer (then it's `undefined`). If
* no viable alternative error, `e` has token at which we
* started production for the decision.

@@ -41,7 +41,7 @@ * @param line

* The exception generated by the parser that led to
* the reporting of an error. It is null in the case where
* the reporting of an error. It is `undefined` in the case where
* the parser was able to recover in line without exiting the
* surrounding rule.
*/
syntaxError?: <T extends Symbol>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined) => void;
syntaxError?: <T extends TSymbol>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined) => void;
}

@@ -5,2 +5,5 @@ /*!

*/
import { Parser } from "./Parser";
import { Token } from "./Token";
import { RecognitionException } from "./RecognitionException";
/**

@@ -11,8 +14,6 @@ * The interface for defining strategies to deal with syntax errors encountered

*
* <ul>
* <li>The parser could not figure out which path to take in the ATN (none of
* the available alternatives could possibly match)</li>
* <li>The current input does not match what we were looking for</li>
* <li>A predicate evaluated to false</li>
* </ul>
* * The parser could not figure out which path to take in the ATN (none of
* the available alternatives could possibly match)
* * The current input does not match what we were looking for
* * A predicate evaluated to false
*

@@ -22,10 +23,7 @@ * Implementations of this interface report syntax errors by calling

*
* <p>TODO: what to do about lexers</p>
* TODO: what to do about lexers
*/
import { Parser } from "./Parser";
import { Token } from "./Token";
import { RecognitionException } from "./RecognitionException";
export interface ANTLRErrorStrategy {
/**
* Reset the error handler state for the specified {@code recognizer}.
* Reset the error handler state for the specified `recognizer`.
* @param recognizer the parser instance

@@ -41,8 +39,8 @@ */

*
* <p>This method handles the consumption of any tokens - the caller should
* <em>not</em> call {@link Parser#consume} after a successful recovery.</p>
* This method handles the consumption of any tokens - the caller should
* *not* call {@link Parser#consume} after a successful recovery.
*
* <p>Note that the calling code will not report an error if this method
* Note that the calling code will not report an error if this method
* returns successfully. The error strategy implementation is responsible
* for calling {@link Parser#notifyErrorListeners} as appropriate.</p>
* for calling {@link Parser#notifyErrorListeners} as appropriate.
*

@@ -55,3 +53,3 @@ * @param recognizer the parser instance

/**
* This method is called to recover from exception {@code e}. This method is
* This method is called to recover from exception `e`. This method is
* called after {@link #reportError} by the default exception handler

@@ -73,8 +71,8 @@ * generated for a rule method.

*
* <p>The generated code currently contains calls to {@link #sync} after
* entering the decision state of a closure block ({@code (...)*} or
* {@code (...)+}).</p>
* The generated code currently contains calls to {@link #sync} after
* entering the decision state of a closure block (`(...)*` or
* `(...)+`).
*
* <p>For an implementation based on Jim Idle's "magic sync" mechanism, see
* {@link DefaultErrorStrategy#sync}.</p>
* For an implementation based on Jim Idle's "magic sync" mechanism, see
* {@link DefaultErrorStrategy#sync}.
*

@@ -90,11 +88,12 @@ * @see DefaultErrorStrategy#sync

/**
* Tests whether or not {@code recognizer} is in the process of recovering
* Tests whether or not `recognizer` is in the process of recovering
* from an error. In error recovery mode, {@link Parser#consume} adds
* symbols to the parse tree by calling
* {@link ParserRuleContext#addErrorNode(Token)} instead of
* {@link ParserRuleContext#addChild(Token)}.
* {@link Parser#createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of
* {@link Parser#createTerminalNode(ParserRuleContext, Token)}.
*
* @param recognizer the parser instance
* @return {@code true} if the parser is currently recovering from a parse
* error, otherwise {@code false}
* @returns `true` if the parser is currently recovering from a parse
* error, otherwise `false`
*/

@@ -101,0 +100,0 @@ inErrorRecoveryMode(recognizer: Parser): boolean;

@@ -7,2 +7,11 @@ /*!

import { Interval } from "./misc/Interval";
/**
* Vacuum all input from a {@link Reader}/{@link InputStream} and then treat it
* like a `char[]` buffer. Can also pass in a {@link String} or
* `char[]` to use.
*
* If you need encoding, pass in stream/reader with correct encoding.
*
* @deprecated as of 4.7, please use `CharStreams` interface.
*/
export declare class ANTLRInputStream implements CharStream {

@@ -9,0 +18,0 @@ /** The data being scanned */

@@ -5,4 +5,4 @@ /*!

*/
import { ATNState } from './ATNState';
import { Transition } from './Transition';
import { ATNState } from "./ATNState";
import { Transition } from "./Transition";
/**

@@ -9,0 +9,0 @@ *

@@ -5,5 +5,5 @@ /*!

*/
import { ATNState } from './ATNState';
import { Transition } from './Transition';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { Transition } from "./Transition";
import { TransitionType } from "./TransitionType";
export declare class ActionTransition extends Transition {

@@ -10,0 +10,0 @@ ruleIndex: number;

@@ -5,6 +5,6 @@ /*!

*/
import { BitSet } from '../misc/BitSet';
import { DecisionEventInfo } from './DecisionEventInfo';
import { SimulatorState } from './SimulatorState';
import { TokenStream } from '../TokenStream';
import { BitSet } from "../misc/BitSet";
import { DecisionEventInfo } from "./DecisionEventInfo";
import { SimulatorState } from "./SimulatorState";
import { TokenStream } from "../TokenStream";
/**

@@ -16,3 +16,2 @@ * This class represents profiling event information for an ambiguity.

*
* <p>
* This event may be reported during SLL prediction in cases where the

@@ -23,7 +22,6 @@ * conflicting SLL configuration set provides sufficient information to

* traversed a global follow transition (i.e.
* {@link ATNConfig#getReachesIntoOuterContext} is {@code false} for all
* {@link ATNConfig#getReachesIntoOuterContext} is `false` for all
* configurations), then the result of SLL prediction for that input is known to
* be equivalent to the result of LL prediction for that input.</p>
* be equivalent to the result of LL prediction for that input.
*
* <p>
* In some cases, the minimum represented alternative in the conflicting LL

@@ -34,3 +32,3 @@ * configuration set is not equal to the minimum represented alternative in the

* they cannot use the two-stage parsing strategy to improve parsing performance
* for that input.</p>
* for that input.
*

@@ -37,0 +35,0 @@ * @see ParserATNSimulator#reportAmbiguity

@@ -5,13 +5,13 @@ /*!

*/
import { ATNState } from './ATNState';
import { ATNType } from './ATNType';
import { DecisionState } from './DecisionState';
import { DFA } from '../dfa/DFA';
import { IntervalSet } from '../misc/IntervalSet';
import { LexerAction } from './LexerAction';
import { PredictionContext } from './PredictionContext';
import { RuleContext } from '../RuleContext';
import { RuleStartState } from './RuleStartState';
import { RuleStopState } from './RuleStopState';
import { TokensStartState } from './TokensStartState';
import { ATNState } from "./ATNState";
import { ATNType } from "./ATNType";
import { DecisionState } from "./DecisionState";
import { DFA } from "../dfa/DFA";
import { IntervalSet } from "../misc/IntervalSet";
import { LexerAction } from "./LexerAction";
import { PredictionContext } from "./PredictionContext";
import { RuleContext } from "../RuleContext";
import { RuleStartState } from "./RuleStartState";
import { RuleStopState } from "./RuleStopState";
import { TokensStartState } from "./TokensStartState";
/** */

@@ -47,3 +47,3 @@ export declare class ATN {

* {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
* deserialization option was specified; otherwise, this is {@code null}.
* deserialization option was specified; otherwise, this is `undefined`.
*/

@@ -67,10 +67,10 @@ ruleToTokenType: Int32Array;

getDecisionToDFA(): DFA[];
/** Compute the set of valid tokens that can occur starting in state {@code s}.
* If {@code ctx} is {@link PredictionContext#EMPTY_LOCAL}, the set of tokens will not include what can follow
* the rule surrounding {@code s}. In other words, the set will be
* restricted to tokens reachable staying within {@code s}'s rule.
/** Compute the set of valid tokens that can occur starting in state `s`.
* If `ctx` is {@link PredictionContext#EMPTY_LOCAL}, the set of tokens will not include what can follow
* the rule surrounding `s`. In other words, the set will be
* restricted to tokens reachable staying within `s`'s rule.
*/
nextTokens(s: ATNState, ctx: PredictionContext): IntervalSet;
/**
* Compute the set of valid tokens that can occur starting in {@code s} and
* Compute the set of valid tokens that can occur starting in `s` and
* staying in same rule. {@link Token#EPSILON} is in set if we reach end of

@@ -88,3 +88,3 @@ * rule.

* Computes the set of input symbols which could follow ATN state number
* {@code stateNumber} in the specified full {@code context}. This method
* `stateNumber` in the specified full `context`. This method
* considers the complete parser context, but does not evaluate semantic

@@ -96,28 +96,28 @@ * predicates (i.e. all predicates encountered during the calculation are

*
* <p>If {@code context} is {@code null}, it is treated as
* {@link ParserRuleContext#EMPTY}.</p>
* If `context` is `undefined`, it is treated as
* {@link ParserRuleContext#EMPTY}.
*
* <p>Note that this does NOT give you the set of all tokens that could
* Note that this does NOT give you the set of all tokens that could
* appear at a given token position in the input phrase. In other words, it
* does not answer:</p>
* does not answer:
*
* <quote>"Given a specific partial input phrase, return the set of all
* tokens that can follow the last token in the input phrase."</quote>
* > Given a specific partial input phrase, return the set of all
* > tokens that can follow the last token in the input phrase.
*
* <p>The big difference is that with just the input, the parser could land
* The big difference is that with just the input, the parser could land
* right in the middle of a lookahead decision. Getting all
* <em>possible</em> tokens given a partial input stream is a separate
* computation. See https://github.com/antlr/antlr4/issues/1428</p>
* *possible* tokens given a partial input stream is a separate
* computation. See https://github.com/antlr/antlr4/issues/1428
*
* <p>For this function, we are specifying an ATN state and call stack to
* For this function, we are specifying an ATN state and call stack to
* compute what token(s) can come next and specifically: outside of a
* lookahead decision. That is what you want for error reporting and
* recovery upon parse error.</p>
* recovery upon parse error.
*
* @param stateNumber the ATN state number
* @param context the full parse context
* @return The set of potentially valid input symbols which could follow the
* @returns The set of potentially valid input symbols which could follow the
* specified state in the specified context.
* @ if the ATN does not contain a state with
* number {@code stateNumber}
* number `stateNumber`
*/

@@ -124,0 +124,0 @@ getExpectedTokens(stateNumber: number, context: RuleContext | undefined): IntervalSet;

@@ -5,9 +5,9 @@ /*!

*/
import { ATNState } from './ATNState';
import { Equatable } from '../misc/Stubs';
import { LexerActionExecutor } from './LexerActionExecutor';
import { PredictionContext } from './PredictionContext';
import { PredictionContextCache } from './PredictionContextCache';
import { Recognizer } from '../Recognizer';
import { SemanticContext } from './SemanticContext';
import { ATNState } from "./ATNState";
import { Equatable } from "../misc/Stubs";
import { LexerActionExecutor } from "./LexerActionExecutor";
import { PredictionContext } from "./PredictionContext";
import { PredictionContextCache } from "./PredictionContextCache";
import { Recognizer } from "../Recognizer";
import { SemanticContext } from "./SemanticContext";
/**

@@ -48,7 +48,5 @@ * Represents a location with context in an ATN. The location is identified by the following values:

*
* <ul>
* <li>0x00FFFFFF: Alternative</li>
* <li>0x7F000000: Outer context depth</li>
* <li>0x80000000: Suppress precedence filter</li>
* </ul>
* * 0x00FFFFFF: Alternative
* * 0x7F000000: Outer context depth
* * 0x80000000: Suppress precedence filter
*/

@@ -79,6 +77,5 @@ private altAndOuterContextDepth;

*
* <p>
* closure() tracks the depth of how far we dip into the outer context:
* depth &gt; 0. Note that it may not be totally accurate depth since I
* don't ever decrement. TODO: make it a boolean then</p>
* don't ever decrement. TODO: make it a boolean then
*/

@@ -121,3 +118,3 @@ outerContextDepth: number;

* @param subconfig The sub configuration.
* @return `true` if this configuration contains `subconfig`; otherwise, `false`.
* @returns `true` if this configuration contains `subconfig`; otherwise, `false`.
*/

@@ -136,3 +133,3 @@ contains(subconfig: ATNConfig): boolean;

*
* @return A Graphviz graph representing the current `ATNConfig`.
* @returns A Graphviz graph representing the current `ATNConfig`.
*

@@ -139,0 +136,0 @@ * @see http://www.graphviz.org/

@@ -5,11 +5,10 @@ /*!

*/
import { Array2DHashSet } from '../misc/Array2DHashSet';
import { ATNConfig } from './ATNConfig';
import { ATNSimulator } from './ATNSimulator';
import { ATNState } from './ATNState';
import { BitSet } from '../misc/BitSet';
import { Collection, JavaIterator } from '../misc/Stubs';
import { ConflictInfo } from './ConflictInfo';
import { JavaSet } from '../misc/Stubs';
import { PredictionContextCache } from './PredictionContextCache';
import { Array2DHashSet } from "../misc/Array2DHashSet";
import { ATNConfig } from "./ATNConfig";
import { ATNSimulator } from "./ATNSimulator";
import { ATNState } from "./ATNState";
import { BitSet } from "../misc/BitSet";
import { ConflictInfo } from "./ConflictInfo";
import { JavaSet } from "../misc/Stubs";
import { PredictionContextCache } from "./PredictionContextCache";
/**

@@ -30,8 +29,8 @@ * Represents a set of ATN configurations (see `ATNConfig`). As configurations are added to the set, they are merged

* the {@link ATNConfig#getSemanticContext} of the value, which is only a problem if a single
* {@code ATNConfigSet} contains two configs with the same state and alternative
* `ATNConfigSet` contains two configs with the same state and alternative
* but different semantic contexts. When this case arises, the first config
* added to this map stays, and the remaining configs are placed in {@link #unmerged}.
* <p>
*
* This map is only used for optimizing the process of adding configs to the set,
* and is {@code null} for read-only sets stored in the DFA.
* and is `undefined` for read-only sets stored in the DFA.
*/

@@ -44,5 +43,5 @@ private mergedConfigs?;

* different semantic contexts.
* <p>
*
* This list is only used for optimizing the process of adding configs to the set,
* and is {@code null} for read-only sets stored in the DFA.
* and is `undefined` for read-only sets stored in the DFA.
*/

@@ -59,8 +58,8 @@ private unmerged?;

/**
* When {@code true}, this config set represents configurations where the entire
* When `true`, this config set represents configurations where the entire
* outer context has been consumed by the ATN interpreter. This prevents the
* {@link ParserATNSimulator#closure} from pursuing the global FOLLOW when a
* rule stop state is reached with an empty prediction context.
* <p>
* Note: {@code outermostConfigSet} and {@link #dipsIntoOuterContext} should never
*
* Note: `outermostConfigSet` and {@link #dipsIntoOuterContext} should never
* be true at the same time.

@@ -85,5 +84,4 @@ */

contains(o: any): boolean;
iterator(): JavaIterator<ATNConfig>;
[Symbol.iterator](): IterableIterator<ATNConfig>;
toArray(): ATNConfig[];
toArray(a?: ATNConfig[]): ATNConfig[];
add(e: ATNConfig): boolean;

@@ -101,7 +99,5 @@ add(e: ATNConfig, contextCache: PredictionContextCache | undefined): boolean;

};
containsAll(c: Collection<any>): boolean;
addAll(c: Collection<ATNConfig>): boolean;
addAll(c: Collection<ATNConfig>, contextCache: PredictionContextCache): boolean;
retainAll(c: Collection<any>): boolean;
removeAll(c: Collection<any>): boolean;
containsAll(c: Iterable<any>): boolean;
addAll(c: Iterable<ATNConfig>): boolean;
addAll(c: Iterable<ATNConfig>, contextCache: PredictionContextCache): boolean;
clear(): void;

@@ -119,5 +115,3 @@ equals(obj: any): boolean;

get(index: number): ATNConfig;
remove(o: any): boolean;
remove(index: number): void;
protected ensureWritable(): void;
}

@@ -5,12 +5,12 @@ /*!

*/
import { ATN } from './ATN';
import { ATNDeserializationOptions } from './ATNDeserializationOptions';
import { ATNState } from './ATNState';
import { ATNStateType } from './ATNStateType';
import { IntervalSet } from '../misc/IntervalSet';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Transition } from './Transition';
import { TransitionType } from './TransitionType';
import { UUID } from '../misc/UUID';
import { ATN } from "./ATN";
import { ATNDeserializationOptions } from "./ATNDeserializationOptions";
import { ATNState } from "./ATNState";
import { ATNStateType } from "./ATNStateType";
import { IntervalSet } from "../misc/IntervalSet";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
import { Transition } from "./Transition";
import { TransitionType } from "./TransitionType";
import { UUID } from "../misc/UUID";
/**

@@ -58,10 +58,10 @@ *

* currently being deserialized.
* @return {@code true} if the {@code actualUuid} value represents a
* serialized ATN at or after the feature identified by {@code feature} was
* introduced; otherwise, {@code false}.
* @returns `true` if the `actualUuid` value represents a
* serialized ATN at or after the feature identified by `feature` was
* introduced; otherwise, `false`.
*/
protected isFeatureSupported(feature: UUID, actualUuid: UUID): boolean;
static toCharArray(str: string): Uint16Array;
deserialize(data: Uint16Array | string): ATN;
private readSets(data, p, sets, read32);
protected static isFeatureSupported(feature: UUID, actualUuid: UUID): boolean;
private static getUnicodeDeserializer(mode);
deserialize(data: Uint16Array): ATN;
private deserializeSets(data, p, sets, unicodeDeserializer);
/**

@@ -68,0 +68,0 @@ * Analyze the {@link StarLoopEntryState} states in the specified ATN to set

@@ -5,4 +5,4 @@ /*!

*/
import { ATN } from './ATN';
import { DFAState } from '../dfa/DFAState';
import { ATN } from "./ATN";
import { DFAState } from "../dfa/DFAState";
export declare abstract class ATNSimulator {

@@ -9,0 +9,0 @@ /** Must distinguish between missing edge and edge we know leads nowhere */

@@ -5,6 +5,6 @@ /*!

*/
import { ATN } from './ATN';
import { ATNStateType } from './ATNStateType';
import { IntervalSet } from '../misc/IntervalSet';
import { Transition } from './Transition';
import { ATN } from "./ATN";
import { ATNStateType } from "./ATNStateType";
import { IntervalSet } from "../misc/IntervalSet";
import { Transition } from "./Transition";
/**

@@ -14,20 +14,16 @@ * The following images show the relation of states and

*
* <ul>
* * Solid edges marked with an &#0949; indicate a required
* {@link EpsilonTransition}.
*
* <li>Solid edges marked with an &#0949; indicate a required
* {@link EpsilonTransition}.</li>
* * Dashed edges indicate locations where any transition derived from
* {@link Transition} might appear.
*
* <li>Dashed edges indicate locations where any transition derived from
* {@link Transition} might appear.</li>
* * Dashed nodes are place holders for either a sequence of linked
* {@link BasicState} states or the inclusion of a block representing a nested
* construct in one of the forms below.
*
* <li>Dashed nodes are place holders for either a sequence of linked
* {@link BasicState} states or the inclusion of a block representing a nested
* construct in one of the forms below.</li>
* * Nodes showing multiple outgoing alternatives with a `...` support
* any number of alternatives (one or more). Nodes without the `...` only
* support the exact number of alternatives shown in the diagram.
*
* <li>Nodes showing multiple outgoing alternatives with a {@code ...} support
* any number of alternatives (one or more). Nodes without the {@code ...} only
* support the exact number of alternatives shown in the diagram.</li>
*
* </ul>
*
* <h2>Basic Blocks</h2>

@@ -45,11 +41,11 @@ *

*
* <h3>Greedy Closure: {@code (...)*}</h3>
* <h3>Greedy Closure: `(...)*`</h3>
*
* <embed src="images/ClosureGreedy.svg" type="image/svg+xml"/>
*
* <h3>Greedy Positive Closure: {@code (...)+}</h3>
* <h3>Greedy Positive Closure: `(...)+`</h3>
*
* <embed src="images/PositiveClosureGreedy.svg" type="image/svg+xml"/>
*
* <h3>Greedy Optional: {@code (...)?}</h3>
* <h3>Greedy Optional: `(...)?`</h3>
*

@@ -60,11 +56,11 @@ * <embed src="images/OptionalGreedy.svg" type="image/svg+xml"/>

*
* <h3>Non-Greedy Closure: {@code (...)*?}</h3>
* <h3>Non-Greedy Closure: `(...)*?`</h3>
*
* <embed src="images/ClosureNonGreedy.svg" type="image/svg+xml"/>
*
* <h3>Non-Greedy Positive Closure: {@code (...)+?}</h3>
* <h3>Non-Greedy Positive Closure: `(...)+?`</h3>
*
* <embed src="images/PositiveClosureNonGreedy.svg" type="image/svg+xml"/>
*
* <h3>Non-Greedy Optional: {@code (...)??}</h3>
* <h3>Non-Greedy Optional: `(...)??`</h3>
*

@@ -87,3 +83,3 @@ * <embed src="images/OptionalNonGreedy.svg" type="image/svg+xml"/>

*
* @return the state number
* @returns the state number
*/

@@ -95,3 +91,3 @@ getStateNumber(): number;

*
* @return -1 for {@link RuleStopState}, otherwise the state number
* @returns -1 for {@link RuleStopState}, otherwise the state number
*/

@@ -98,0 +94,0 @@ readonly nonStopStateNumber: number;

@@ -5,6 +5,6 @@ /*!

*/
import { ATNState } from './ATNState';
import { IntervalSet } from '../misc/IntervalSet';
import { Transition } from './Transition';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { IntervalSet } from "../misc/IntervalSet";
import { Transition } from "./Transition";
import { TransitionType } from "./TransitionType";
/** TODO: make all transitions sets? no, should remove set edges */

@@ -11,0 +11,0 @@ export declare class AtomTransition extends Transition {

@@ -5,4 +5,4 @@ /*!

*/
import { ATNStateType } from './ATNStateType';
import { BlockStartState } from './BlockStartState';
import { ATNStateType } from "./ATNStateType";
import { BlockStartState } from "./BlockStartState";
/**

@@ -9,0 +9,0 @@ *

@@ -5,4 +5,4 @@ /*!

*/
import { ATNState } from './ATNState';
import { ATNStateType } from './ATNStateType';
import { ATNState } from "./ATNState";
import { ATNStateType } from "./ATNStateType";
/**

@@ -9,0 +9,0 @@ *

@@ -5,6 +5,6 @@ /*!

*/
import { ATNState } from './ATNState';
import { ATNStateType } from './ATNStateType';
import { BlockStartState } from './BlockStartState';
/** Terminal node of a simple {@code (a|b|c)} block. */
import { ATNState } from "./ATNState";
import { ATNStateType } from "./ATNStateType";
import { BlockStartState } from "./BlockStartState";
/** Terminal node of a simple `(a|b|c)` block. */
export declare class BlockEndState extends ATNState {

@@ -11,0 +11,0 @@ startState: BlockStartState;

@@ -5,7 +5,7 @@ /*!

*/
import { BlockEndState } from './BlockEndState';
import { DecisionState } from './DecisionState';
/** The start of a regular {@code (...)} block. */
import { BlockEndState } from "./BlockEndState";
import { DecisionState } from "./DecisionState";
/** The start of a regular `(...)` block. */
export declare abstract class BlockStartState extends DecisionState {
endState: BlockEndState;
}

@@ -5,3 +5,3 @@ /*!

*/
import { BitSet } from '../misc/BitSet';
import { BitSet } from "../misc/BitSet";
/**

@@ -29,6 +29,5 @@ * This class stores information about a configuration conflict.

*
* <p>
* For the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction mode,
* accept states are conflicting but not exact are treated as non-accept
* states.</p>
* states.
*/

@@ -35,0 +34,0 @@ readonly isExact: boolean;

@@ -5,5 +5,5 @@ /*!

*/
import { DecisionEventInfo } from './DecisionEventInfo';
import { SimulatorState } from './SimulatorState';
import { TokenStream } from '../TokenStream';
import { DecisionEventInfo } from "./DecisionEventInfo";
import { SimulatorState } from "./SimulatorState";
import { TokenStream } from "../TokenStream";
/**

@@ -14,3 +14,2 @@ * This class represents profiling event information for a context sensitivity.

*
* <p>
* In some cases, the unique alternative identified by LL prediction is not

@@ -21,3 +20,3 @@ * equal to the minimum represented alternative in the conflicting SLL

* the two-stage parsing strategy to improve parsing performance for that
* input.</p>
* input.
*

@@ -24,0 +23,0 @@ * @see ParserATNSimulator#reportContextSensitivity

@@ -5,4 +5,4 @@ /*!

*/
import { SimulatorState } from './SimulatorState';
import { TokenStream } from '../TokenStream';
import { SimulatorState } from "./SimulatorState";
import { TokenStream } from "../TokenStream";
/**

@@ -32,3 +32,3 @@ * This is the base class for gathering detailed information about prediction

* The simulator state containing additional information relevant to the
* prediction state when the current event occurred, or {@code null} if no
* prediction state when the current event occurred, or `undefined` if no
* additional information is relevant or available.

@@ -51,4 +51,4 @@ */

/**
* {@code true} if the current event occurred during LL prediction;
* otherwise, {@code false} if the input occurred during SLL prediction.
* `true` if the current event occurred during LL prediction;
* otherwise, `false` if the input occurred during SLL prediction.
*/

@@ -55,0 +55,0 @@ fullCtx: boolean;

@@ -5,11 +5,10 @@ /*!

*/
import { AmbiguityInfo } from './AmbiguityInfo';
import { ContextSensitivityInfo } from './ContextSensitivityInfo';
import { ErrorInfo } from './ErrorInfo';
import { LookaheadEventInfo } from './LookaheadEventInfo';
import { PredicateEvalInfo } from './PredicateEvalInfo';
import { AmbiguityInfo } from "./AmbiguityInfo";
import { ContextSensitivityInfo } from "./ContextSensitivityInfo";
import { ErrorInfo } from "./ErrorInfo";
import { LookaheadEventInfo } from "./LookaheadEventInfo";
import { PredicateEvalInfo } from "./PredicateEvalInfo";
/**
* This class contains profiling gathered for a particular decision.
*
* <p>
* Parsing performance in ANTLR 4 is heavily influenced by both static factors

@@ -20,3 +19,3 @@ * (e.g. the form of the rules in the grammar) and dynamic factors (e.g. the

* statistics from a large sample of inputs representing the inputs expected in
* production before using the results to make changes in the grammar.</p>
* production before using the results to make changes in the grammar.
*

@@ -39,3 +38,2 @@ * @since 4.3

*
* <p>
* The value of this field contains the sum of differential results obtained

@@ -48,3 +46,3 @@ * by {@link System#nanoTime()}, and is not adjusted to compensate for JIT

* call {@link ATNSimulator#clearDFA} to reset the DFA cache to its initial
* state before starting the profiling measurement pass.</p>
* state before starting the profiling measurement pass.
*/

@@ -139,7 +137,6 @@ timeInPrediction: number;

*
* <p>
* If DFA caching of SLL transitions is employed by the implementation, ATN
* computation may cache the computed edge for efficient lookup during
* future parsing of this decision. Otherwise, the SLL parsing algorithm
* will use ATN transitions exclusively.</p>
* will use ATN transitions exclusively.
*

@@ -155,4 +152,4 @@ * @see #SLL_ATNTransitions

*
* <p>If the ATN simulator implementation does not use DFA caching for SLL
* transitions, this value will be 0.</p>
* If the ATN simulator implementation does not use DFA caching for SLL
* transitions, this value will be 0.
*

@@ -167,3 +164,3 @@ * @see ParserATNSimulator#getExistingTargetState

*
* <p>Note that this value is not related to whether or not
* Note that this value is not related to whether or not
* {@link PredictionMode#SLL} may be used successfully with a particular

@@ -173,3 +170,3 @@ * grammar. If the ambiguity resolution algorithm applied to the SLL

* this decision, {@link PredictionMode#SLL} would produce the same overall
* parsing result as {@link PredictionMode#LL}.</p>
* parsing result as {@link PredictionMode#LL}.
*/

@@ -183,7 +180,6 @@ LL_Fallback: number;

*
* <p>
* If DFA caching of LL transitions is employed by the implementation, ATN
* computation may cache the computed edge for efficient lookup during
* future parsing of this decision. Otherwise, the LL parsing algorithm will
* use ATN transitions exclusively.</p>
* use ATN transitions exclusively.
*

@@ -199,4 +195,4 @@ * @see #LL_DFATransitions

*
* <p>If the ATN simulator implementation does not use DFA caching for LL
* transitions, this value will be 0.</p>
* If the ATN simulator implementation does not use DFA caching for LL
* transitions, this value will be 0.
*

@@ -203,0 +199,0 @@ * @see ParserATNSimulator#getExistingTargetState

@@ -5,3 +5,3 @@ /*!

*/
import { ATNState } from './ATNState';
import { ATNState } from "./ATNState";
export declare abstract class DecisionState extends ATNState {

@@ -8,0 +8,0 @@ decision: number;

@@ -5,5 +5,5 @@ /*!

*/
import { ATNState } from './ATNState';
import { Transition } from './Transition';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { Transition } from "./Transition";
import { TransitionType } from "./TransitionType";
export declare class EpsilonTransition extends Transition {

@@ -13,3 +13,3 @@ private _outermostPrecedenceReturn;

/**
* @return the rule index of a precedence rule for which this transition is
* @returns the rule index of a precedence rule for which this transition is
* returning from, where the precedence value is 0; otherwise, -1.

@@ -16,0 +16,0 @@ *

@@ -5,5 +5,5 @@ /*!

*/
import { DecisionEventInfo } from './DecisionEventInfo';
import { SimulatorState } from './SimulatorState';
import { TokenStream } from '../TokenStream';
import { DecisionEventInfo } from "./DecisionEventInfo";
import { SimulatorState } from "./SimulatorState";
import { TokenStream } from "../TokenStream";
/**

@@ -10,0 +10,0 @@ * This class represents profiling event information for a syntax error

@@ -5,69 +5,70 @@ /*!

*/
export * from './AbstractPredicateTransition';
export * from './ActionTransition';
export * from './AmbiguityInfo';
export * from './ATN';
export * from './ATNConfig';
export * from './ATNConfigSet';
export * from './ATNDeserializationOptions';
export * from './ATNDeserializer';
export * from './ATNSimulator';
export * from './ATNState';
export * from './ATNStateType';
export * from './ATNType';
export * from './AtomTransition';
export * from './BasicBlockStartState';
export * from './BasicState';
export * from './BlockEndState';
export * from './BlockStartState';
export * from './ConflictInfo';
export * from './ContextSensitivityInfo';
export * from './DecisionEventInfo';
export * from './DecisionInfo';
export * from './DecisionState';
export * from './EpsilonTransition';
export * from './ErrorInfo';
export * from './InvalidState';
export * from './LexerAction';
export * from './LexerActionExecutor';
export * from './LexerActionType';
export * from './LexerATNSimulator';
export * from './LexerChannelAction';
export * from './LexerCustomAction';
export * from './LexerIndexedCustomAction';
export * from './LexerModeAction';
export * from './LexerMoreAction';
export * from './LexerPopModeAction';
export * from './LexerPushModeAction';
export * from './LexerSkipAction';
export * from './LexerTypeAction';
export * from './LL1Analyzer';
export * from './LookaheadEventInfo';
export * from './LoopEndState';
export * from './NotSetTransition';
export * from './OrderedATNConfigSet';
export * from './ParseInfo';
export * from './ParserATNSimulator';
export * from './PlusBlockStartState';
export * from './PlusLoopbackState';
export * from './PrecedencePredicateTransition';
export * from './PredicateEvalInfo';
export * from './PredicateTransition';
export * from './PredictionContext';
export * from './PredictionContextCache';
export * from './PredictionMode';
export * from './ProfilingATNSimulator';
export * from './RangeTransition';
export * from './RuleStartState';
export * from './RuleStopState';
export * from './RuleTransition';
export * from './SemanticContext';
export * from './SetTransition';
export * from './SimulatorState';
export * from './StarBlockStartState';
export * from './StarLoopbackState';
export * from './StarLoopEntryState';
export * from './TokensStartState';
export * from './Transition';
export * from './TransitionType';
export * from './WildcardTransition';
export * from "./AbstractPredicateTransition";
export * from "./ActionTransition";
export * from "./AmbiguityInfo";
export * from "./ATN";
export * from "./ATNConfig";
export * from "./ATNConfigSet";
export * from "./ATNDeserializationOptions";
export * from "./ATNDeserializer";
export * from "./ATNSimulator";
export * from "./ATNState";
export * from "./ATNStateType";
export * from "./ATNType";
export * from "./AtomTransition";
export * from "./BasicBlockStartState";
export * from "./BasicState";
export * from "./BlockEndState";
export * from "./BlockStartState";
export * from "./CodePointTransitions";
export * from "./ConflictInfo";
export * from "./ContextSensitivityInfo";
export * from "./DecisionEventInfo";
export * from "./DecisionInfo";
export * from "./DecisionState";
export * from "./EpsilonTransition";
export * from "./ErrorInfo";
export * from "./InvalidState";
export * from "./LexerAction";
export * from "./LexerActionExecutor";
export * from "./LexerActionType";
export * from "./LexerATNSimulator";
export * from "./LexerChannelAction";
export * from "./LexerCustomAction";
export * from "./LexerIndexedCustomAction";
export * from "./LexerModeAction";
export * from "./LexerMoreAction";
export * from "./LexerPopModeAction";
export * from "./LexerPushModeAction";
export * from "./LexerSkipAction";
export * from "./LexerTypeAction";
export * from "./LL1Analyzer";
export * from "./LookaheadEventInfo";
export * from "./LoopEndState";
export * from "./NotSetTransition";
export * from "./OrderedATNConfigSet";
export * from "./ParseInfo";
export * from "./ParserATNSimulator";
export * from "./PlusBlockStartState";
export * from "./PlusLoopbackState";
export * from "./PrecedencePredicateTransition";
export * from "./PredicateEvalInfo";
export * from "./PredicateTransition";
export * from "./PredictionContext";
export * from "./PredictionContextCache";
export * from "./PredictionMode";
export * from "./ProfilingATNSimulator";
export * from "./RangeTransition";
export * from "./RuleStartState";
export * from "./RuleStopState";
export * from "./RuleTransition";
export * from "./SemanticContext";
export * from "./SetTransition";
export * from "./SimulatorState";
export * from "./StarBlockStartState";
export * from "./StarLoopbackState";
export * from "./StarLoopEntryState";
export * from "./TokensStartState";
export * from "./Transition";
export * from "./TransitionType";
export * from "./WildcardTransition";

@@ -5,4 +5,4 @@ /*!

*/
import { ATNStateType } from './ATNStateType';
import { BasicState } from './BasicState';
import { ATNStateType } from "./ATNStateType";
import { BasicState } from "./BasicState";
/**

@@ -9,0 +9,0 @@ *

@@ -5,5 +5,5 @@ /*!

*/
import { Equatable } from '../misc/Stubs';
import { Lexer } from '../Lexer';
import { LexerActionType } from './LexerActionType';
import { Equatable } from "../misc/Stubs";
import { Lexer } from "../Lexer";
import { LexerActionType } from "./LexerActionType";
/**

@@ -21,3 +21,3 @@ * Represents a single action which can be executed following the successful

*
* @return The serialization type of the lexer action.
* @returns The serialization type of the lexer action.
*/

@@ -30,10 +30,10 @@ readonly actionType: LexerActionType;

*
* <p>Many lexer commands, including {@code type}, {@code skip}, and
* {@code more}, do not check the input index during their execution.
* Many lexer commands, including `type`, `skip`, and
* `more`, do not check the input index during their execution.
* Actions like this are position-independent, and may be stored more
* efficiently as part of the `ATNConfig.lexerActionExecutor`.</p>
* efficiently as part of the `ATNConfig.lexerActionExecutor`.
*
* @return {@code true} if the lexer action semantics can be affected by the
* @returns `true` if the lexer action semantics can be affected by the
* position of the input {@link CharStream} at the time it is executed;
* otherwise, {@code false}.
* otherwise, `false`.
*/

@@ -44,4 +44,4 @@ readonly isPositionDependent: boolean;

*
* <p>For position-dependent actions, the input stream must already be
* positioned correctly prior to calling this method.</p>
* For position-dependent actions, the input stream must already be
* positioned correctly prior to calling this method.
*

@@ -48,0 +48,0 @@ * @param lexer The lexer instance.

@@ -5,5 +5,5 @@ /*!

*/
import { CharStream } from '../CharStream';
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { CharStream } from "../CharStream";
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
/**

@@ -13,5 +13,5 @@ * Represents an executor for a sequence of lexer actions which traversed during

*
* <p>The executor tracks position information for position-dependent lexer actions
* The executor tracks position information for position-dependent lexer actions
* efficiently, ensuring that actions appearing only at the end of the rule do
* not cause bloating of the {@link DFA} created for the lexer.</p>
* not cause bloating of the {@link DFA} created for the lexer.
*

@@ -35,14 +35,14 @@ * @author Sam Harwell

* Creates a {@link LexerActionExecutor} which executes the actions for
* the input {@code lexerActionExecutor} followed by a specified
* {@code lexerAction}.
* the input `lexerActionExecutor` followed by a specified
* `lexerAction`.
*
* @param lexerActionExecutor The executor for actions already traversed by
* the lexer while matching a token within a particular
* {@link ATNConfig}. If this is {@code null}, the method behaves as though
* {@link ATNConfig}. If this is `undefined`, the method behaves as though
* it were an empty executor.
* @param lexerAction The lexer action to execute after the actions
* specified in {@code lexerActionExecutor}.
* specified in `lexerActionExecutor`.
*
* @return A {@link LexerActionExecutor} for executing the combine actions
* of {@code lexerActionExecutor} and {@code lexerAction}.
* @returns A {@link LexerActionExecutor} for executing the combine actions
* of `lexerActionExecutor` and `lexerAction`.
*/

@@ -54,11 +54,11 @@ static append(lexerActionExecutor: LexerActionExecutor | undefined, lexerAction: LexerAction): LexerActionExecutor;

*
* <p>Normally, when the executor encounters lexer actions where
* {@link LexerAction#isPositionDependent} returns {@code true}, it calls
* Normally, when the executor encounters lexer actions where
* {@link LexerAction#isPositionDependent} returns `true`, it calls
* {@link IntStream#seek} on the input {@link CharStream} to set the input
* position to the <em>end</em> of the current token. This behavior provides
* position to the *end* of the current token. This behavior provides
* for efficient DFA representation of lexer actions which appear at the end
* of a lexer rule, even when the lexer rule matches a variable number of
* characters.</p>
* characters.
*
* <p>Prior to traversing a match transition in the ATN, the current offset
* Prior to traversing a match transition in the ATN, the current offset
* from the token start index is assigned to all position-dependent lexer

@@ -69,6 +69,6 @@ * actions which have not already been assigned a fixed offset. By storing

* to sharing among tokens of the same length, regardless of their absolute
* position in the input stream.</p>
* position in the input stream.
*
* <p>If the current executor already has offsets assigned to all
* position-dependent lexer actions, the method returns {@code this}.</p>
* If the current executor already has offsets assigned to all
* position-dependent lexer actions, the method returns `this`.
*

@@ -78,3 +78,3 @@ * @param offset The current offset to assign to all position-dependent

*
* @return A {@link LexerActionExecutor} which stores input stream offsets
* @returns A {@link LexerActionExecutor} which stores input stream offsets
* for all position-dependent lexer actions.

@@ -85,3 +85,3 @@ */

* Gets the lexer actions to be executed by this executor.
* @return The lexer actions to be executed by this executor.
* @returns The lexer actions to be executed by this executor.
*/

@@ -93,7 +93,7 @@ readonly lexerActions: LexerAction[];

*
* <p>This method calls {@link IntStream#seek} to set the position of the
* {@code input} {@link CharStream} prior to calling
* This method calls {@link IntStream#seek} to set the position of the
* `input` {@link CharStream} prior to calling
* {@link LexerAction#execute} on a position-dependent action. Before the
* method returns, the input position will be restored to the same position
* it was in when the method was invoked.</p>
* it was in when the method was invoked.
*

@@ -103,6 +103,6 @@ * @param lexer The lexer instance.

* When this method is called, the current {@link IntStream#index} for
* {@code input} should be the start of the following token, i.e. 1
* `input` should be the start of the following token, i.e. 1
* character past the end of the current token.
* @param startIndex The token start index. This value may be passed to
* {@link IntStream#seek} to set the {@code input} position to the beginning
* {@link IntStream#seek} to set the `input` position to the beginning
* of the token.

@@ -109,0 +109,0 @@ */

@@ -5,13 +5,13 @@ /*!

*/
import { ATN } from './ATN';
import { ATNConfig } from './ATNConfig';
import { ATNConfigSet } from './ATNConfigSet';
import { ATNSimulator } from './ATNSimulator';
import { ATNState } from './ATNState';
import { CharStream } from '../CharStream';
import { DFA } from '../dfa/DFA';
import { DFAState } from '../dfa/DFAState';
import { Lexer } from '../Lexer';
import { LexerActionExecutor } from './LexerActionExecutor';
import { Transition } from './Transition';
import { ATN } from "./ATN";
import { ATNConfig } from "./ATNConfig";
import { ATNConfigSet } from "./ATNConfigSet";
import { ATNSimulator } from "./ATNSimulator";
import { ATNState } from "./ATNState";
import { CharStream } from "../CharStream";
import { DFA } from "../dfa/DFA";
import { DFAState } from "../dfa/DFAState";
import { Lexer } from "../Lexer";
import { LexerActionExecutor } from "./LexerActionExecutor";
import { Transition } from "./Transition";
/** "dup" of ParserInterpreter */

@@ -45,8 +45,8 @@ export declare class LexerATNSimulator extends ATNSimulator {

* for the edge has not yet been computed or is otherwise not available,
* this method returns {@code null}.
* this method returns `undefined`.
*
* @param s The current DFA state
* @param t The next input symbol
* @return The existing target DFA state for the given input symbol
* {@code t}, or {@code null} if the target state for this edge is not
* @returns The existing target DFA state for the given input symbol
* `t`, or `undefined` if the target state for this edge is not
* already cached

@@ -63,4 +63,4 @@ */

*
* @return The computed target DFA state for the given input symbol
* {@code t}. If {@code t} does not lead to a valid DFA state, this method
* @returns The computed target DFA state for the given input symbol
* `t`. If `t` does not lead to a valid DFA state, this method
* returns {@link #ERROR}.

@@ -71,3 +71,3 @@ */

/** Given a starting configuration set, figure out all ATN configurations
* we can reach upon input {@code t}. Parameter {@code reach} is a return
* we can reach upon input `t`. Parameter `reach` is a return
* parameter.

@@ -83,7 +83,7 @@ */

* state is reached. After the first accept state is reached by depth-first
* search from {@code config}, all other (potentially reachable) states for
* search from `config`, all other (potentially reachable) states for
* this rule would have a lower priority.
*
* @return {@code true} if an accept state is reached, otherwise
* {@code false}.
* @returns `true` if an accept state is reached, otherwise
* `false`.
*/

@@ -95,3 +95,3 @@ protected closure(input: CharStream, config: ATNConfig, configs: ATNConfigSet, currentAltReachedAcceptState: boolean, speculative: boolean, treatEofAsEpsilon: boolean): boolean;

*
* <p>If {@code speculative} is {@code true}, this method was called before
* If `speculative` is `true`, this method was called before
* {@link #consume} for the matched character. This method should call

@@ -101,5 +101,5 @@ * {@link #consume} before evaluating the predicate to ensure position

* and {@link Lexer#getCharPositionInLine}, properly reflect the current
* lexer state. This method should restore {@code input} and the simulator
* lexer state. This method should restore `input` and the simulator
* to the original state before returning (i.e. undo the actions made by the
* call to {@link #consume}.</p>
* call to {@link #consume}.
*

@@ -109,7 +109,7 @@ * @param input The input stream.

* @param predIndex The index of the predicate within the rule.
* @param speculative {@code true} if the current index in {@code input} is
* @param speculative `true` if the current index in `input` is
* one character before the predicate's location.
*
* @return {@code true} if the specified predicate evaluates to
* {@code true}.
* @returns `true` if the specified predicate evaluates to
* `true`.
*/

@@ -121,5 +121,5 @@ protected evaluatePredicate(input: CharStream, ruleIndex: number, predIndex: number, speculative: boolean): boolean;

/** Add a new DFA state if there isn't one with this set of
configurations already. This method also detects the first
configuration containing an ATN rule stop state. Later, when
traversing the DFA, we will know which rule to accept.
* configurations already. This method also detects the first
* configuration containing an ATN rule stop state. Later, when
* traversing the DFA, we will know which rule to accept.
*/

@@ -147,3 +147,3 @@ protected addDFAState(configs: ATNConfigSet): DFAState;

*
* <p>We track these variables separately for the DFA and ATN simulation
* We track these variables separately for the DFA and ATN simulation
* because the DFA simulation often has to fail over to the ATN

@@ -153,3 +153,3 @@ * simulation. If the ATN simulation fails, we need the DFA to fall

* then the ATN does the accept and the DFA simulator that invoked it
* can simply return the predicted token type.</p>
* can simply return the predicted token type.
*/

@@ -156,0 +156,0 @@ class SimState {

@@ -5,7 +5,7 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**
* Implements the {@code channel} lexer action by calling
* Implements the `channel` lexer action by calling
* {@link Lexer#setChannel} with the assigned channel.

@@ -19,3 +19,3 @@ *

/**
* Constructs a new {@code channel} action with the specified channel value.
* Constructs a new `channel` action with the specified channel value.
* @param channel The channel value to pass to {@link Lexer#setChannel}.

@@ -27,3 +27,3 @@ */

*
* @return The channel to use for the {@link Token} created by the lexer.
* @returns The channel to use for the {@link Token} created by the lexer.
*/

@@ -33,3 +33,3 @@ readonly channel: number;

* {@inheritDoc}
* @return This method returns {@link LexerActionType#CHANNEL}.
* @returns This method returns {@link LexerActionType#CHANNEL}.
*/

@@ -39,3 +39,3 @@ readonly actionType: LexerActionType;

* {@inheritDoc}
* @return This method returns {@code false}.
* @returns This method returns `false`.
*/

@@ -46,4 +46,4 @@ readonly isPositionDependent: boolean;

*
* <p>This action is implemented by calling {@link Lexer#setChannel} with the
* value provided by {@link #getChannel}.</p>
* This action is implemented by calling {@link Lexer#setChannel} with the
* value provided by {@link #getChannel}.
*/

@@ -50,0 +50,0 @@ execute(lexer: Lexer): void;

@@ -5,5 +5,5 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**

@@ -15,5 +15,5 @@ * Executes a custom lexer action by calling {@link Recognizer#action} with the

*
* <p>This class may represent embedded actions created with the <code>{...}</code>
* This class may represent embedded actions created with the `{...}`
* syntax in ANTLR 4, as well as actions created for lexer commands where the
* command argument could not be evaluated when the grammar was compiled.</p>
* command argument could not be evaluated when the grammar was compiled.
*

@@ -39,3 +39,3 @@ * @author Sam Harwell

*
* @return The rule index for the custom action.
* @returns The rule index for the custom action.
*/

@@ -46,3 +46,3 @@ readonly ruleIndex: number;

*
* @return The action index for the custom action.
* @returns The action index for the custom action.
*/

@@ -53,3 +53,3 @@ readonly actionIndex: number;

*
* @return This method returns {@link LexerActionType#CUSTOM}.
* @returns This method returns {@link LexerActionType#CUSTOM}.
*/

@@ -62,7 +62,7 @@ readonly actionType: LexerActionType;

*
* <p>Custom actions are position-dependent since they may represent a
* Custom actions are position-dependent since they may represent a
* user-defined embedded action which makes calls to methods like
* {@link Lexer#getText}.</p>
* {@link Lexer#getText}.
*
* @return This method returns {@code true}.
* @returns This method returns `true`.
*/

@@ -73,4 +73,4 @@ readonly isPositionDependent: boolean;

*
* <p>Custom actions are implemented by calling {@link Lexer#action} with the
* appropriate rule and action indexes.</p>
* Custom actions are implemented by calling {@link Lexer#action} with the
* appropriate rule and action indexes.
*/

@@ -77,0 +77,0 @@ execute(lexer: Lexer): void;

@@ -5,5 +5,5 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**

@@ -13,7 +13,7 @@ * This implementation of {@link LexerAction} is used for tracking input offsets

*
* <p>This action is not serialized as part of the ATN, and is only required for
* This action is not serialized as part of the ATN, and is only required for
* position-dependent lexer actions which appear at a location other than the
* end of a rule. For more information about DFA optimizations employed for
* lexer actions, see {@link LexerActionExecutor#append} and
* {@link LexerActionExecutor#fixOffsetBeforeMatch}.</p>
* {@link LexerActionExecutor#fixOffsetBeforeMatch}.
*

@@ -30,4 +30,4 @@ * @author Sam Harwell

*
* <p>Note: This class is only required for lexer actions for which
* {@link LexerAction#isPositionDependent} returns {@code true}.</p>
* Note: This class is only required for lexer actions for which
* {@link LexerAction#isPositionDependent} returns `true`.
*

@@ -46,3 +46,3 @@ * @param offset The offset into the input {@link CharStream}, relative to

*
* @return The location in the input {@link CharStream} at which the lexer
* @returns The location in the input {@link CharStream} at which the lexer
* action should be executed.

@@ -54,3 +54,3 @@ */

*
* @return A {@link LexerAction} object which executes the lexer action.
* @returns A {@link LexerAction} object which executes the lexer action.
*/

@@ -61,3 +61,3 @@ readonly action: LexerAction;

*
* @return This method returns the result of calling {@link #getActionType}
* @returns This method returns the result of calling {@link #getActionType}
* on the {@link LexerAction} returned by {@link #getAction}.

@@ -68,3 +68,3 @@ */

* {@inheritDoc}
* @return This method returns {@code true}.
* @returns This method returns `true`.
*/

@@ -75,4 +75,4 @@ readonly isPositionDependent: boolean;

*
* <p>This method calls {@link #execute} on the result of {@link #getAction}
* using the provided {@code lexer}.</p>
* This method calls {@link #execute} on the result of {@link #getAction}
* using the provided `lexer`.
*/

@@ -79,0 +79,0 @@ execute(lexer: Lexer): void;

@@ -5,7 +5,7 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**
* Implements the {@code mode} lexer action by calling {@link Lexer#mode} with
* Implements the `mode` lexer action by calling {@link Lexer#mode} with
* the assigned mode.

@@ -19,3 +19,3 @@ *

/**
* Constructs a new {@code mode} action with the specified mode value.
* Constructs a new `mode` action with the specified mode value.
* @param mode The mode value to pass to {@link Lexer#mode}.

@@ -27,3 +27,3 @@ */

*
* @return The lexer mode for this {@code mode} command.
* @returns The lexer mode for this `mode` command.
*/

@@ -33,3 +33,3 @@ readonly mode: number;

* {@inheritDoc}
* @return This method returns {@link LexerActionType#MODE}.
* @returns This method returns {@link LexerActionType#MODE}.
*/

@@ -39,3 +39,3 @@ readonly actionType: LexerActionType;

* {@inheritDoc}
* @return This method returns {@code false}.
* @returns This method returns `false`.
*/

@@ -46,4 +46,4 @@ readonly isPositionDependent: boolean;

*
* <p>This action is implemented by calling {@link Lexer#mode} with the
* value provided by {@link #getMode}.</p>
* This action is implemented by calling {@link Lexer#mode} with the
* value provided by {@link #getMode}.
*/

@@ -50,0 +50,0 @@ execute(lexer: Lexer): void;

@@ -5,10 +5,10 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**
* Implements the {@code more} lexer action by calling {@link Lexer#more}.
* Implements the `more` lexer action by calling {@link Lexer#more}.
*
* <p>The {@code more} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
* The `more` command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.
*

@@ -20,3 +20,3 @@ * @author Sam Harwell

/**
* Constructs the singleton instance of the lexer {@code more} command.
* Constructs the singleton instance of the lexer `more` command.
*/

@@ -26,3 +26,3 @@ constructor();

* {@inheritDoc}
* @return This method returns {@link LexerActionType#MORE}.
* @returns This method returns {@link LexerActionType#MORE}.
*/

@@ -32,3 +32,3 @@ readonly actionType: LexerActionType;

* {@inheritDoc}
* @return This method returns {@code false}.
* @returns This method returns `false`.
*/

@@ -39,3 +39,3 @@ readonly isPositionDependent: boolean;

*
* <p>This action is implemented by calling {@link Lexer#more}.</p>
* This action is implemented by calling {@link Lexer#more}.
*/

@@ -42,0 +42,0 @@ execute(lexer: Lexer): void;

@@ -5,10 +5,10 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**
* Implements the {@code popMode} lexer action by calling {@link Lexer#popMode}.
* Implements the `popMode` lexer action by calling {@link Lexer#popMode}.
*
* <p>The {@code popMode} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
* The `popMode` command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.
*

@@ -20,3 +20,3 @@ * @author Sam Harwell

/**
* Constructs the singleton instance of the lexer {@code popMode} command.
* Constructs the singleton instance of the lexer `popMode` command.
*/

@@ -26,3 +26,3 @@ constructor();

* {@inheritDoc}
* @return This method returns {@link LexerActionType#POP_MODE}.
* @returns This method returns {@link LexerActionType#POP_MODE}.
*/

@@ -32,3 +32,3 @@ readonly actionType: LexerActionType;

* {@inheritDoc}
* @return This method returns {@code false}.
* @returns This method returns `false`.
*/

@@ -39,3 +39,3 @@ readonly isPositionDependent: boolean;

*
* <p>This action is implemented by calling {@link Lexer#popMode}.</p>
* This action is implemented by calling {@link Lexer#popMode}.
*/

@@ -42,0 +42,0 @@ execute(lexer: Lexer): void;

@@ -5,7 +5,7 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**
* Implements the {@code pushMode} lexer action by calling
* Implements the `pushMode` lexer action by calling
* {@link Lexer#pushMode} with the assigned mode.

@@ -19,3 +19,3 @@ *

/**
* Constructs a new {@code pushMode} action with the specified mode value.
* Constructs a new `pushMode` action with the specified mode value.
* @param mode The mode value to pass to {@link Lexer#pushMode}.

@@ -27,3 +27,3 @@ */

*
* @return The lexer mode for this {@code pushMode} command.
* @returns The lexer mode for this `pushMode` command.
*/

@@ -33,3 +33,3 @@ readonly mode: number;

* {@inheritDoc}
* @return This method returns {@link LexerActionType#PUSH_MODE}.
* @returns This method returns {@link LexerActionType#PUSH_MODE}.
*/

@@ -39,3 +39,3 @@ readonly actionType: LexerActionType;

* {@inheritDoc}
* @return This method returns {@code false}.
* @returns This method returns `false`.
*/

@@ -46,4 +46,4 @@ readonly isPositionDependent: boolean;

*
* <p>This action is implemented by calling {@link Lexer#pushMode} with the
* value provided by {@link #getMode}.</p>
* This action is implemented by calling {@link Lexer#pushMode} with the
* value provided by {@link #getMode}.
*/

@@ -50,0 +50,0 @@ execute(lexer: Lexer): void;

@@ -5,10 +5,10 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**
* Implements the {@code skip} lexer action by calling {@link Lexer#skip}.
* Implements the `skip` lexer action by calling {@link Lexer#skip}.
*
* <p>The {@code skip} command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
* The `skip` command does not have any parameters, so this action is
* implemented as a singleton instance exposed by {@link #INSTANCE}.
*

@@ -20,3 +20,3 @@ * @author Sam Harwell

/**
* Constructs the singleton instance of the lexer {@code skip} command.
* Constructs the singleton instance of the lexer `skip` command.
*/

@@ -26,3 +26,3 @@ constructor();

* {@inheritDoc}
* @return This method returns {@link LexerActionType#SKIP}.
* @returns This method returns {@link LexerActionType#SKIP}.
*/

@@ -32,3 +32,3 @@ readonly actionType: LexerActionType;

* {@inheritDoc}
* @return This method returns {@code false}.
* @returns This method returns `false`.
*/

@@ -39,3 +39,3 @@ readonly isPositionDependent: boolean;

*
* <p>This action is implemented by calling {@link Lexer#skip}.</p>
* This action is implemented by calling {@link Lexer#skip}.
*/

@@ -42,0 +42,0 @@ execute(lexer: Lexer): void;

@@ -5,7 +5,7 @@ /*!

*/
import { Lexer } from '../Lexer';
import { LexerAction } from './LexerAction';
import { LexerActionType } from './LexerActionType';
import { Lexer } from "../Lexer";
import { LexerAction } from "./LexerAction";
import { LexerActionType } from "./LexerActionType";
/**
* Implements the {@code type} lexer action by setting `Lexer.type`
* Implements the `type` lexer action by setting `Lexer.type`
* with the assigned type.

@@ -19,3 +19,3 @@ *

/**
* Constructs a new {@code type} action with the specified token type value.
* Constructs a new `type` action with the specified token type value.
* @param type The type to assign to the token using `Lexer.type`.

@@ -26,3 +26,3 @@ */

* Gets the type to assign to a token created by the lexer.
* @return The type to assign to a token created by the lexer.
* @returns The type to assign to a token created by the lexer.
*/

@@ -32,3 +32,3 @@ readonly type: number;

* {@inheritDoc}
* @return This method returns {@link LexerActionType#TYPE}.
* @returns This method returns {@link LexerActionType#TYPE}.
*/

@@ -38,3 +38,3 @@ readonly actionType: LexerActionType;

* {@inheritDoc}
* @return This method returns {@code false}.
* @returns This method returns `false`.
*/

@@ -45,4 +45,4 @@ readonly isPositionDependent: boolean;

*
* <p>This action is implemented by setting `Lexer.type` with the
* value provided by `type`.</p>
* This action is implemented by setting `Lexer.type` with the
* value provided by `type`.
*/

@@ -49,0 +49,0 @@ execute(lexer: Lexer): void;

@@ -5,12 +5,12 @@ /*!

*/
import { Array2DHashSet } from '../misc/Array2DHashSet';
import { ATN } from './ATN';
import { ATNConfig } from './ATNConfig';
import { ATNState } from './ATNState';
import { BitSet } from '../misc/BitSet';
import { IntervalSet } from '../misc/IntervalSet';
import { PredictionContext } from './PredictionContext';
import { Array2DHashSet } from "../misc/Array2DHashSet";
import { ATN } from "./ATN";
import { ATNConfig } from "./ATNConfig";
import { ATNState } from "./ATNState";
import { BitSet } from "../misc/BitSet";
import { IntervalSet } from "../misc/IntervalSet";
import { PredictionContext } from "./PredictionContext";
export declare class LL1Analyzer {
/** Special value added to the lookahead sets to indicate that we hit
* a predicate during analysis if {@code seeThruPreds==false}.
* a predicate during analysis if `seeThruPreds==false`.
*/

@@ -23,35 +23,35 @@ static readonly HIT_PRED: number;

* of an {@link ATNState}. The returned array has one element for each
* outgoing transition in {@code s}. If the closure from transition
* <em>i</em> leads to a semantic predicate before matching a symbol, the
* element at index <em>i</em> of the result will be {@code null}.
* outgoing transition in `s`. If the closure from transition
* *i* leads to a semantic predicate before matching a symbol, the
* element at index *i* of the result will be `undefined`.
*
* @param s the ATN state
* @return the expected symbols for each outgoing transition of {@code s}.
* @returns the expected symbols for each outgoing transition of `s`.
*/
getDecisionLookahead(s: ATNState | undefined): (IntervalSet | undefined)[] | undefined;
getDecisionLookahead(s: ATNState | undefined): Array<IntervalSet | undefined> | undefined;
/**
* Compute set of tokens that can follow {@code s} in the ATN in the
* specified {@code ctx}.
* Compute set of tokens that can follow `s` in the ATN in the
* specified `ctx`.
*
* <p>If {@code ctx} is {@code null} and the end of the rule containing
* {@code s} is reached, {@link Token#EPSILON} is added to the result set.
* If {@code ctx} is not {@code null} and the end of the outermost rule is
* reached, {@link Token#EOF} is added to the result set.</p>
* If `ctx` is `undefined` and the end of the rule containing
* `s` is reached, {@link Token#EPSILON} is added to the result set.
* If `ctx` is not `undefined` and the end of the outermost rule is
* reached, {@link Token#EOF} is added to the result set.
*
* @param s the ATN state
* @param ctx the complete parser context, or {@code null} if the context
* @param ctx the complete parser context, or `undefined` if the context
* should be ignored
*
* @return The set of tokens that can follow {@code s} in the ATN in the
* specified {@code ctx}.
* @returns The set of tokens that can follow `s` in the ATN in the
* specified `ctx`.
*/
LOOK(s: ATNState, ctx: PredictionContext): IntervalSet;
/**
* Compute set of tokens that can follow {@code s} in the ATN in the
* specified {@code ctx}.
* Compute set of tokens that can follow `s` in the ATN in the
* specified `ctx`.
*
* <p>If {@code ctx} is {@code null} and the end of the rule containing
* {@code s} is reached, {@link Token#EPSILON} is added to the result set.
* If {@code ctx} is not {@code PredictionContext#EMPTY_LOCAL} and the end of the outermost rule is
* reached, {@link Token#EOF} is added to the result set.</p>
* If `ctx` is `undefined` and the end of the rule containing
* `s` is reached, {@link Token#EPSILON} is added to the result set.
* If `ctx` is not `PredictionContext#EMPTY_LOCAL` and the end of the outermost rule is
* reached, {@link Token#EOF} is added to the result set.
*

@@ -61,18 +61,18 @@ * @param s the ATN state

* {@link BlockEndState} to detect epsilon paths through a closure.
* @param ctx the complete parser context, or {@code null} if the context
* @param ctx the complete parser context, or `undefined` if the context
* should be ignored
*
* @return The set of tokens that can follow {@code s} in the ATN in the
* specified {@code ctx}.
* @returns The set of tokens that can follow `s` in the ATN in the
* specified `ctx`.
*/
LOOK(s: ATNState, ctx: PredictionContext, stopState: ATNState | null): IntervalSet;
/**
* Compute set of tokens that can follow {@code s} in the ATN in the
* specified {@code ctx}.
* Compute set of tokens that can follow `s` in the ATN in the
* specified `ctx`.
* <p/>
* If {@code ctx} is {@link PredictionContext#EMPTY_LOCAL} and
* {@code stopState} or the end of the rule containing {@code s} is reached,
* {@link Token#EPSILON} is added to the result set. If {@code ctx} is not
* {@link PredictionContext#EMPTY_LOCAL} and {@code addEOF} is {@code true}
* and {@code stopState} or the end of the outermost rule is reached,
* If `ctx` is {@link PredictionContext#EMPTY_LOCAL} and
* `stopState` or the end of the rule containing `s` is reached,
* {@link Token#EPSILON} is added to the result set. If `ctx` is not
* {@link PredictionContext#EMPTY_LOCAL} and `addEOF` is `true`
* and `stopState` or the end of the outermost rule is reached,
* {@link Token#EOF} is added to the result set.

@@ -88,12 +88,12 @@ *

* from causing a stack overflow. Outside code should pass
* {@code new HashSet<ATNConfig>} for this argument.
* `new HashSet<ATNConfig>` for this argument.
* @param calledRuleStack A set used for preventing left recursion in the
* ATN from causing a stack overflow. Outside code should pass
* {@code new BitSet()} for this argument.
* @param seeThruPreds {@code true} to true semantic predicates as
* implicitly {@code true} and "see through them", otherwise {@code false}
* `new BitSet()` for this argument.
* @param seeThruPreds `true` to true semantic predicates as
* implicitly `true` and "see through them", otherwise `false`
* to treat semantic predicates as opaque and add {@link #HIT_PRED} to the
* result if one is encountered.
* @param addEOF Add {@link Token#EOF} to the result if the end of the
* outermost context is reached. This parameter has no effect if {@code ctx}
* outermost context is reached. This parameter has no effect if `ctx`
* is {@link PredictionContext#EMPTY_LOCAL}.

@@ -100,0 +100,0 @@ */

@@ -5,5 +5,5 @@ /*!

*/
import { DecisionEventInfo } from './DecisionEventInfo';
import { SimulatorState } from './SimulatorState';
import { TokenStream } from '../TokenStream';
import { DecisionEventInfo } from "./DecisionEventInfo";
import { SimulatorState } from "./SimulatorState";
import { TokenStream } from "../TokenStream";
/**

@@ -28,3 +28,3 @@ * This class represents profiling event information for tracking the lookahead

* @param state The final simulator state containing the necessary
* information to determine the result of a prediction, or {@code null} if
* information to determine the result of a prediction, or `undefined` if
* the final state is not available

@@ -34,4 +34,4 @@ * @param input The input token stream

* @param stopIndex The index at which the prediction was finally made
* @param fullCtx {@code true} if the current lookahead is part of an LL
* prediction; otherwise, {@code false} if the current lookahead is part of
* @param fullCtx `true` if the current lookahead is part of an LL
* prediction; otherwise, `false` if the current lookahead is part of
* an SLL prediction

@@ -38,0 +38,0 @@ */

@@ -5,4 +5,4 @@ /*!

*/
import { ATNState } from './ATNState';
import { ATNStateType } from './ATNStateType';
import { ATNState } from "./ATNState";
import { ATNStateType } from "./ATNStateType";
/** Mark the end of a * or + loop. */

@@ -9,0 +9,0 @@ export declare class LoopEndState extends ATNState {

@@ -5,6 +5,6 @@ /*!

*/
import { ATNState } from './ATNState';
import { IntervalSet } from '../misc/IntervalSet';
import { SetTransition } from './SetTransition';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { IntervalSet } from "../misc/IntervalSet";
import { SetTransition } from "./SetTransition";
import { TransitionType } from "./TransitionType";
export declare class NotSetTransition extends SetTransition {

@@ -11,0 +11,0 @@ constructor(target: ATNState, set: IntervalSet);

@@ -5,4 +5,4 @@ /*!

*/
import { ATNConfig } from './ATNConfig';
import { ATNConfigSet } from './ATNConfigSet';
import { ATNConfig } from "./ATNConfig";
import { ATNConfigSet } from "./ATNConfigSet";
/**

@@ -9,0 +9,0 @@ *

@@ -5,4 +5,4 @@ /*!

*/
import { DecisionInfo } from './DecisionInfo';
import { ProfilingATNSimulator } from './ProfilingATNSimulator';
import { DecisionInfo } from "./DecisionInfo";
import { ProfilingATNSimulator } from "./ProfilingATNSimulator";
/**

@@ -21,3 +21,3 @@ * This class provides access to specific and aggregate statistics gathered

*
* @return An array of {@link DecisionInfo} instances, indexed by decision
* @returns An array of {@link DecisionInfo} instances, indexed by decision
* number.

@@ -31,3 +31,3 @@ */

*
* @return A list of decision numbers which required one or more
* @returns A list of decision numbers which required one or more
* full-context predictions during parsing.

@@ -68,5 +68,4 @@ */

*
* <p>
* This value is the sum of {@link #getTotalSLLATNLookaheadOps} and
* {@link #getTotalLLATNLookaheadOps}.</p>
* {@link #getTotalLLATNLookaheadOps}.
*/

@@ -73,0 +72,0 @@ getTotalATNLookaheadOps(): number;

@@ -5,31 +5,29 @@ /*!

*/
import { ActionTransition } from './ActionTransition';
import { Array2DHashSet } from '../misc/Array2DHashSet';
import { ATN } from './ATN';
import { ATNConfig } from './ATNConfig';
import { ATNConfigSet } from './ATNConfigSet';
import { ATNSimulator } from './ATNSimulator';
import { ATNState } from './ATNState';
import { BitSet } from '../misc/BitSet';
import { Collection } from '../misc/Stubs';
import { DFA } from '../dfa/DFA';
import { DFAState } from '../dfa/DFAState';
import { IntegerList } from '../misc/IntegerList';
import { NoViableAltException } from '../NoViableAltException';
import { Parser } from '../Parser';
import { ParserRuleContext } from '../ParserRuleContext';
import { PrecedencePredicateTransition } from './PrecedencePredicateTransition';
import { PredicateTransition } from './PredicateTransition';
import { PredictionContextCache } from './PredictionContextCache';
import { PredictionMode } from './PredictionMode';
import { RuleContext } from '../RuleContext';
import { RuleTransition } from './RuleTransition';
import { SemanticContext } from './SemanticContext';
import { SimulatorState } from './SimulatorState';
import { TokenStream } from '../TokenStream';
import { Transition } from './Transition';
import { ActionTransition } from "./ActionTransition";
import { Array2DHashSet } from "../misc/Array2DHashSet";
import { ATN } from "./ATN";
import { ATNConfig } from "./ATNConfig";
import { ATNConfigSet } from "./ATNConfigSet";
import { ATNSimulator } from "./ATNSimulator";
import { ATNState } from "./ATNState";
import { BitSet } from "../misc/BitSet";
import { DFA } from "../dfa/DFA";
import { DFAState } from "../dfa/DFAState";
import { IntegerList } from "../misc/IntegerList";
import { NoViableAltException } from "../NoViableAltException";
import { Parser } from "../Parser";
import { ParserRuleContext } from "../ParserRuleContext";
import { PrecedencePredicateTransition } from "./PrecedencePredicateTransition";
import { PredicateTransition } from "./PredicateTransition";
import { PredictionContextCache } from "./PredictionContextCache";
import { PredictionMode } from "./PredictionMode";
import { RuleContext } from "../RuleContext";
import { RuleTransition } from "./RuleTransition";
import { SemanticContext } from "./SemanticContext";
import { SimulatorState } from "./SimulatorState";
import { TokenStream } from "../TokenStream";
import { Transition } from "./Transition";
/**
* The embodiment of the adaptive LL(*), ALL(*), parsing strategy.
*
* <p>
* The basic complexity of the adaptive strategy makes it harder to understand.

@@ -40,5 +38,4 @@ * We begin with ATN simulation to build paths in a DFA. Subsequent prediction

* complete the DFA path for the current input (until it finds a conflict state
* or uniquely predicting state).</p>
* or uniquely predicting state).
*
* <p>
* All of that is done without using the outer context because we want to create

@@ -53,5 +50,4 @@ * a DFA that is not dependent upon the rule invocation stack when we do a

* don't know if it's an ambiguity or a weakness in the strong LL(*) parsing
* strategy (versus full LL(*)).</p>
* strategy (versus full LL(*)).
*
* <p>
* When SLL yields a configuration set with conflict, we rewind the input and

@@ -64,14 +60,11 @@ * retry the ATN simulation, this time using full outer context without adding

* context. (It is not context-sensitive in the sense of context-sensitive
* grammars.)</p>
* grammars.)
*
* <p>
* The next time we reach this DFA state with an SLL conflict, through DFA
* simulation, we will again retry the ATN simulation using full context mode.
* This is slow because we can't save the results and have to "interpret" the
* ATN each time we get that input.</p>
* ATN each time we get that input.
*
* <p>
* <strong>CACHING FULL CONTEXT PREDICTIONS</strong></p>
* **CACHING FULL CONTEXT PREDICTIONS**
*
* <p>
* We could cache results from full context to predicted alternative easily and

@@ -85,5 +78,4 @@ * that saves a lot of time but doesn't work in presence of predicates. The set

* to keep this algorithm simple. By launching multiple threads, we can improve
* the speed of parsing across a large number of files.</p>
* the speed of parsing across a large number of files.
*
* <p>
* There is no strict ordering between the amount of input used by SLL vs LL,

@@ -99,32 +91,27 @@ * which makes it really hard to build a cache for full context. Let's say that

* input used during the previous prediction. That amounts to a cache that maps
* X to a specific DFA for that context.</p>
* X to a specific DFA for that context.
*
* <p>
* Something should be done for left-recursive expression predictions. They are
* likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry
* with full LL thing Sam does.</p>
* with full LL thing Sam does.
*
* <p>
* <strong>AVOIDING FULL CONTEXT PREDICTION</strong></p>
* **AVOIDING FULL CONTEXT PREDICTION**
*
* <p>
* We avoid doing full context retry when the outer context is empty, we did not
* dip into the outer context by falling off the end of the decision state rule,
* or when we force SLL mode.</p>
* or when we force SLL mode.
*
* <p>
* As an example of the not dip into outer context case, consider as super
* constructor calls versus function calls. One grammar might look like
* this:</p>
* this:
*
* <pre>
* ```antlr
* ctorBody
* : '{' superCall? stat* '}'
* ;
* </pre>
* ```
*
* <p>
* Or, you might see something like</p>
* Or, you might see something like
*
* <pre>
* ```antlr
* stat

@@ -135,14 +122,11 @@ * : superCall ';'

* ;
* </pre>
* ```
*
* <p>
* In both cases I believe that no closure operations will dip into the outer
* context. In the first case ctorBody in the worst case will stop at the '}'.
* In the 2nd case it should stop at the ';'. Both cases should stay within the
* entry rule and not dip into the outer context.</p>
* entry rule and not dip into the outer context.
*
* <p>
* <strong>PREDICATES</strong></p>
* **PREDICATES**
*
* <p>
* Predicates are always evaluated if present in either SLL or LL both. SLL and

@@ -154,5 +138,4 @@ * LL simulation deals with predicates differently. SLL collects predicates as

* closure, the DFA state configuration sets would be different and we couldn't
* build up a suitable DFA.</p>
* build up a suitable DFA.
*
* <p>
* When building a DFA accept state during ATN simulation, we evaluate any

@@ -164,19 +147,15 @@ * predicates and return the sole semantically valid alternative. If there is

* alternatives with false predicates and choose the minimum alternative that
* remains.</p>
* remains.
*
* <p>
* When we start in the DFA and reach an accept state that's predicated, we test
* those and return the minimum semantically viable alternative. If no
* alternatives are viable, we throw an exception.</p>
* alternatives are viable, we throw an exception.
*
* <p>
* During full LL ATN simulation, closure always evaluates predicates and
* on-the-fly. This is crucial to reducing the configuration set size during
* closure. It hits a landmine when parsing with the Java grammar, for example,
* without this on-the-fly evaluation.</p>
* without this on-the-fly evaluation.
*
* <p>
* <strong>SHARING DFA</strong></p>
* **SHARING DFA**
*
* <p>
* All instances of the same parser share the same decision DFAs through a

@@ -187,8 +166,6 @@ * static field. Each instance gets its own ATN simulator but they share the

* {@link PredictionContext} objects are shared among the DFA states. This makes
* a big size difference.</p>
* a big size difference.
*
* <p>
* <strong>THREAD SAFETY</strong></p>
* **THREAD SAFETY**
*
* <p>
* The {@link ParserATNSimulator} locks on the {@link ATN#decisionToDFA} field when

@@ -207,33 +184,29 @@ * it adds a new DFA object to that array. {@link #addDFAEdge}

* safe as long as we can guarantee that all threads referencing
* {@code s.edge[t]} get the same physical target {@link DFAState}, or
* {@code null}. Once into the DFA, the DFA simulation does not reference the
* `s.edge[t]` get the same physical target {@link DFAState}, or
* `undefined`. Once into the DFA, the DFA simulation does not reference the
* {@link DFA#states} map. It follows the {@link DFAState#edges} field to new
* targets. The DFA simulator will either find {@link DFAState#edges} to be
* {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or
* {@code dfa.edges[t]} to be non-null. The
* `undefined`, to be non-`undefined` and `dfa.edges[t]` undefined, or
* `dfa.edges[t]` to be non-undefined. The
* {@link #addDFAEdge} method could be racing to set the field
* but in either case the DFA simulator works; if {@code null}, and requests ATN
* simulation. It could also race trying to get {@code dfa.edges[t]}, but either
* way it will work because it's not doing a test and set operation.</p>
* but in either case the DFA simulator works; if `undefined`, and requests ATN
* simulation. It could also race trying to get `dfa.edges[t]`, but either
* way it will work because it's not doing a test and set operation.
*
* <p>
* <strong>Starting with SLL then failing to combined SLL/LL (Two-Stage
* Parsing)</strong></p>
* **Starting with SLL then failing to combined SLL/LL (Two-Stage
* Parsing)**
*
* <p>
* Sam pointed out that if SLL does not give a syntax error, then there is no
* point in doing full LL, which is slower. We only have to try LL if we get a
* syntax error. For maximum speed, Sam starts the parser set to pure SLL
* mode with the {@link BailErrorStrategy}:</p>
* mode with the {@link BailErrorStrategy}:
*
* <pre>
* parser.interpreter.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
* ```
* parser.interpreter.{@link #setPredictionMode setPredictionMode}`(`{@link PredictionMode#SLL}`)`;
* parser.{@link Parser#setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
* </pre>
* ```
*
* <p>
* If it does not get a syntax error, then we're done. If it does get a syntax
* error, we need to retry with the combined SLL/LL strategy.</p>
* error, we need to retry with the combined SLL/LL strategy.
*
* <p>
* The reason this works is as follows. If there are no SLL conflicts, then the

@@ -252,17 +225,15 @@ * grammar is SLL (at least for that input set). If there is an SLL conflict,

* alternative because they both choose the minimum of multiple conflicting
* alternatives.</p>
* alternatives.
*
* <p>
* Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and
* a smaller LL set called <em>s</em>. If <em>s</em> is {@code {2, 3}}, then SLL
* Let's say we have a set of SLL conflicting alternatives `{1, 2, 3}` and
* a smaller LL set called *s*. If *s* is `{2, 3}`, then SLL
* parsing will get an error because SLL will pursue alternative 1. If
* <em>s</em> is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will
* *s* is `{1, 2}` or `{1, 3}` then both SLL and LL will
* choose the same alternative because alternative one is the minimum of either
* set. If <em>s</em> is {@code {2}} or {@code {3}} then SLL will get a syntax
* error. If <em>s</em> is {@code {1}} then SLL will succeed.</p>
* set. If *s* is `{2}` or `{3}` then SLL will get a syntax
* error. If *s* is `{1}` then SLL will succeed.
*
* <p>
* Of course, if the input is invalid, then we will get an error for sure in
* both SLL and LL parsing. Erroneous input will therefore require 2 passes over
* the input.</p>
* the input.
*/

@@ -278,14 +249,12 @@ export declare class ParserATNSimulator extends ATNSimulator {

* Determines whether the DFA is used for full-context predictions. When
* {@code true}, the DFA stores transition information for both full-context
* `true`, the DFA stores transition information for both full-context
* and SLL parsing; otherwise, the DFA only stores SLL transition
* information.
*
* <p>
* For some grammars, enabling the full-context DFA can result in a
* substantial performance improvement. However, this improvement typically
* comes at the expense of memory used for storing the cached DFA states,
* configuration sets, and prediction contexts.</p>
* configuration sets, and prediction contexts.
*
* <p>
* The default value is {@code false}.</p>
* The default value is `false`.
*/

@@ -300,8 +269,8 @@ enable_global_context_dfa: boolean;

/**
* When {@code true}, ambiguous alternatives are reported when they are
* encountered within {@link #execATN}. When {@code false}, these messages
* are suppressed. The default is {@code false}.
* <p>
* When `true`, ambiguous alternatives are reported when they are
* encountered within {@link #execATN}. When `false`, these messages
* are suppressed. The default is `false`.
*
* When messages about ambiguous alternatives are not required, setting this
* to {@code false} enables additional internal optimizations which may lose
* to `false` enables additional internal optimizations which may lose
* this information.

@@ -328,22 +297,20 @@ */

* Determines if a particular DFA state should be treated as an accept state
* for the current prediction mode. In addition to the {@code useContext}
* for the current prediction mode. In addition to the `useContext`
* parameter, the {@link #getPredictionMode()} method provides the
* prediction mode controlling the prediction algorithm as a whole.
*
* <p>
* The default implementation simply returns the value of
* `DFAState.isAcceptState` except for conflict states when
* {@code useContext} is {@code true} and {@link #getPredictionMode()} is
* `useContext` is `true` and {@link #getPredictionMode()} is
* {@link PredictionMode#LL_EXACT_AMBIG_DETECTION}. In that case, only
* conflict states where {@link ATNConfigSet#isExactConflict} is
* {@code true} are considered accept states.
* </p>
* `true` are considered accept states.
*
* @param state The DFA state to check.
* @param useContext {@code true} if the prediction algorithm is currently
* considering the full parser context; otherwise, {@code false} if the
* @param useContext `true` if the prediction algorithm is currently
* considering the full parser context; otherwise, `false` if the
* algorithm is currently performing a local context prediction.
*
* @return {@code true} if the specified {@code state} is an accept state;
* otherwise, {@code false}.
* @returns `true` if the specified `state` is an accept state;
* otherwise, `false`.
*/

@@ -354,41 +321,42 @@ protected isAcceptState(state: DFAState, useContext: boolean): boolean;

* having to traverse the ATN again for the same input sequence.
There are some key conditions we're looking for after computing a new
set of ATN configs (proposed DFA state):
* if the set is empty, there is no viable alternative for current symbol
* does the state uniquely predict an alternative?
* does the state have a conflict that would prevent us from
putting it on the work list?
* if in non-greedy decision is there a config at a rule stop state?
We also have some key operations to do:
* add an edge from previous DFA state to potentially new DFA state, D,
upon current symbol but only if adding to work list, which means in all
cases except no viable alternative (and possibly non-greedy decisions?)
* collecting predicates and adding semantic context to DFA accept states
* adding rule context to context-sensitive DFA accept states
* consuming an input symbol
* reporting a conflict
* reporting an ambiguity
* reporting a context sensitivity
* reporting insufficient predicates
We should isolate those operations, which are side-effecting, to the
main work loop. We can isolate lots of code into other functions, but
they should be side effect free. They can return package that
indicates whether we should report something, whether we need to add a
DFA edge, whether we need to augment accept state with semantic
context or rule invocation context. Actually, it seems like we always
add predicates if they exist, so that can simply be done in the main
loop for any accept state creation or modification request.
cover these cases:
dead end
single alt
single alt + preds
conflict
conflict + preds
TODO: greedy + those
*
* There are some key conditions we're looking for after computing a new
* set of ATN configs (proposed DFA state):
*
* * if the set is empty, there is no viable alternative for current symbol
* * does the state uniquely predict an alternative?
* * does the state have a conflict that would prevent us from
* putting it on the work list?
* * if in non-greedy decision is there a config at a rule stop state?
*
* We also have some key operations to do:
*
* * add an edge from previous DFA state to potentially new DFA state, D,
* upon current symbol but only if adding to work list, which means in all
* cases except no viable alternative (and possibly non-greedy decisions?)
* * collecting predicates and adding semantic context to DFA accept states
* * adding rule context to context-sensitive DFA accept states
* * consuming an input symbol
* * reporting a conflict
* * reporting an ambiguity
* * reporting a context sensitivity
* * reporting insufficient predicates
*
* We should isolate those operations, which are side-effecting, to the
* main work loop. We can isolate lots of code into other functions, but
* they should be side effect free. They can return package that
* indicates whether we should report something, whether we need to add a
* DFA edge, whether we need to augment accept state with semantic
* context or rule invocation context. Actually, it seems like we always
* add predicates if they exist, so that can simply be done in the main
* loop for any accept state creation or modification request.
*
* cover these cases:
* dead end
* single alt
* single alt + preds
* conflict
* conflict + preds
*
* TODO: greedy + those
*/

@@ -402,3 +370,2 @@ protected execATN(dfa: DFA, input: TokenStream, startIndex: number, initialState: SimulatorState): number;

*
* <p>
* The default implementation of this method uses the following

@@ -409,27 +376,22 @@ * algorithm to identify an ATN configuration which successfully parsed the

* and valid, and the syntax error will be reported later at a more
* localized location.</p>
* localized location.
*
* <ul>
* <li>If no configuration in {@code configs} reached the end of the
* decision rule, return {@link ATN#INVALID_ALT_NUMBER}.</li>
* <li>If all configurations in {@code configs} which reached the end of the
* decision rule predict the same alternative, return that alternative.</li>
* <li>If the configurations in {@code configs} which reached the end of the
* decision rule predict multiple alternatives (call this <em>S</em>),
* choose an alternative in the following order.
* <ol>
* <li>Filter the configurations in {@code configs} to only those
* configurations which remain viable after evaluating semantic predicates.
* If the set of these filtered configurations which also reached the end of
* the decision rule is not empty, return the minimum alternative
* represented in this set.</li>
* <li>Otherwise, choose the minimum alternative in <em>S</em>.</li>
* </ol>
* </li>
* </ul>
* * If no configuration in `configs` reached the end of the
* decision rule, return {@link ATN#INVALID_ALT_NUMBER}.
* * If all configurations in `configs` which reached the end of the
* decision rule predict the same alternative, return that alternative.
* * If the configurations in `configs` which reached the end of the
* decision rule predict multiple alternatives (call this *S*),
* choose an alternative in the following order.
*
* <p>
* 1. Filter the configurations in `configs` to only those
* configurations which remain viable after evaluating semantic predicates.
* If the set of these filtered configurations which also reached the end of
* the decision rule is not empty, return the minimum alternative
* represented in this set.
* 1. Otherwise, choose the minimum alternative in *S*.
*
* In some scenarios, the algorithm described above could predict an
* alternative which will result in a {@link FailedPredicateException} in
* parser. Specifically, this could occur if the <em>only</em> configuration
* parser. Specifically, this could occur if the *only* configuration
* capable of successfully parsing to the end of the decision rule is

@@ -443,7 +405,6 @@ * blocked by a semantic predicate. By choosing this alternative within

* in semantic predicates.
* </p>
*
* @param input The input {@link TokenStream}
* @param startIndex The start index for the current prediction, which is
* the input index where any semantic context in {@code configs} should be
* the input index where any semantic context in `configs` should be
* evaluated

@@ -453,3 +414,3 @@ * @param previous The ATN simulation state immediately before the

*
* @return The value to return from {@link #adaptivePredict}, or
* @returns The value to return from {@link #adaptivePredict}, or
* {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not

@@ -463,8 +424,8 @@ * identified and {@link #adaptivePredict} should report an error instead.

* for the edge has not yet been computed or is otherwise not available,
* this method returns {@code null}.
* this method returns `undefined`.
*
* @param s The current DFA state
* @param t The next input symbol
* @return The existing target DFA state for the given input symbol
* {@code t}, or {@code null} if the target state for this edge is not
* @returns The existing target DFA state for the given input symbol
* `t`, or `undefined` if the target state for this edge is not
* already cached

@@ -484,4 +445,4 @@ */

*
* @return The computed target DFA state for the given input symbol
* {@code t}. If {@code t} does not lead to a valid DFA state, this method
* @returns The computed target DFA state for the given input symbol
* `t`. If `t` does not lead to a valid DFA state, this method
* returns {@link #ERROR}.

@@ -492,5 +453,5 @@ */

* Return a configuration set containing only the configurations from
* {@code configs} which are in a {@link RuleStopState}. If all
* configurations in {@code configs} are already in a rule stop state, this
* method simply returns {@code configs}.
* `configs` which are in a {@link RuleStopState}. If all
* configurations in `configs` are already in a rule stop state, this
* method simply returns `configs`.
*

@@ -500,5 +461,5 @@ * @param configs the configuration set to update

*
* @return {@code configs} if all configurations in {@code configs} are in a
* @returns `configs` if all configurations in `configs` are in a
* rule stop state, otherwise return a new configuration set containing only
* the configurations from {@code configs} which are in a rule stop state
* the configurations from `configs` which are in a rule stop state
*/

@@ -514,33 +475,27 @@ protected removeAllConfigsNotInRuleStopState(configs: ATNConfigSet, contextCache: PredictionContextCache): ATNConfigSet;

*
* <ol>
* <li>Evaluate the precedence predicates for each configuration using
* {@link SemanticContext#evalPrecedence}.</li>
* <li>When {@link ATNConfig#isPrecedenceFilterSuppressed} is {@code false},
* remove all configurations which predict an alternative greater than 1,
* for which another configuration that predicts alternative 1 is in the
* same ATN state with the same prediction context. This transformation is
* valid for the following reasons:
* <ul>
* <li>The closure block cannot contain any epsilon transitions which bypass
* the body of the closure, so all states reachable via alternative 1 are
* part of the precedence alternatives of the transformed left-recursive
* rule.</li>
* <li>The "primary" portion of a left recursive rule cannot contain an
* epsilon transition, so the only way an alternative other than 1 can exist
* in a state that is also reachable via alternative 1 is by nesting calls
* to the left-recursive rule, with the outer calls not being at the
* preferred precedence level. The
* {@link ATNConfig#isPrecedenceFilterSuppressed} property marks ATN
* configurations which do not meet this condition, and therefore are not
* eligible for elimination during the filtering process.</li>
* </ul>
* </li>
* </ol>
* 1. Evaluate the precedence predicates for each configuration using
* {@link SemanticContext#evalPrecedence}.
* 1. When {@link ATNConfig#isPrecedenceFilterSuppressed} is `false`,
* remove all configurations which predict an alternative greater than 1,
* for which another configuration that predicts alternative 1 is in the
* same ATN state with the same prediction context. This transformation is
* valid for the following reasons:
*
* <p>
* * The closure block cannot contain any epsilon transitions which bypass
* the body of the closure, so all states reachable via alternative 1 are
* part of the precedence alternatives of the transformed left-recursive
* rule.
* * The "primary" portion of a left recursive rule cannot contain an
* epsilon transition, so the only way an alternative other than 1 can exist
* in a state that is also reachable via alternative 1 is by nesting calls
* to the left-recursive rule, with the outer calls not being at the
* preferred precedence level. The
* {@link ATNConfig#isPrecedenceFilterSuppressed} property marks ATN
* configurations which do not meet this condition, and therefore are not
* eligible for elimination during the filtering process.
*
* The prediction context must be considered by this filter to address
* situations like the following.
* </p>
* <code>
* <pre>
*
* ```antlr
* grammar TA;

@@ -550,17 +505,15 @@ * prog: statement* EOF;

* letterA: 'a';
* </pre>
* </code>
* <p>
* ```
*
* If the above grammar, the ATN state immediately before the token
* reference {@code 'a'} in {@code letterA} is reachable from the left edge
* reference `'a'` in `letterA` is reachable from the left edge
* of both the primary and closure blocks of the left-recursive rule
* {@code statement}. The prediction context associated with each of these
* `statement`. The prediction context associated with each of these
* configurations distinguishes between them, and prevents the alternative
* which stepped out to {@code prog} (and then back in to {@code statement}
* which stepped out to `prog` (and then back in to `statement`
* from being eliminated by the filter.
* </p>
*
* @param configs The configuration set computed by
* {@link #computeStartState} as the start state for the DFA.
* @return The transformed configuration set representing the start state
* @returns The transformed configuration set representing the start state
* for a precedence DFA at a particular precedence level (determined by

@@ -576,3 +529,3 @@ * calling {@link Parser#getPrecedence}).

/** Look through a list of predicate/alt pairs, returning alts for the
* pairs that win. A {@code null} predicate indicates an alt containing an
* pairs that win. An `undefined` predicate indicates an alt containing an
* unpredicated config which behaves as "always true."

@@ -584,18 +537,15 @@ */

*
* <p>
* This method might not be called for every semantic context evaluated
* during the prediction process. In particular, we currently do not
* evaluate the following but it may change in the future:</p>
* evaluate the following but it may change in the future:
*
* <ul>
* <li>Precedence predicates (represented by
* {@link SemanticContext.PrecedencePredicate}) are not currently evaluated
* through this method.</li>
* <li>Operator predicates (represented by {@link SemanticContext.AND} and
* {@link SemanticContext.OR}) are evaluated as a single semantic
* context, rather than evaluating the operands individually.
* Implementations which require evaluation results from individual
* predicates should override this method to explicitly handle evaluation of
* the operands within operator predicates.</li>
* </ul>
* * Precedence predicates (represented by
* {@link SemanticContext.PrecedencePredicate}) are not currently evaluated
* through this method.
* * Operator predicates (represented by {@link SemanticContext.AND} and
* {@link SemanticContext.OR}) are evaluated as a single semantic
* context, rather than evaluating the operands individually.
* Implementations which require evaluation results from individual
* predicates should override this method to explicitly handle evaluation of
* the operands within operator predicates.
*

@@ -605,3 +555,3 @@ * @param pred The semantic context to evaluate

* semantic context
* @param alt The alternative which is guarded by {@code pred}
* @param alt The alternative which is guarded by `pred`
*

@@ -626,4 +576,4 @@ * @since 4.3

protected noViableAlt(input: TokenStream, outerContext: ParserRuleContext, configs: ATNConfigSet, startIndex: number): NoViableAltException;
protected getUniqueAlt(configs: Collection<ATNConfig>): number;
protected configWithAltAtStopState(configs: Collection<ATNConfig>, alt: number): boolean;
protected getUniqueAlt(configs: Iterable<ATNConfig>): number;
protected configWithAltAtStopState(configs: Iterable<ATNConfig>, alt: number): boolean;
protected addDFAEdge(dfa: DFA, fromState: DFAState, t: number, contextTransitions: IntegerList | undefined, toConfigs: ATNConfigSet, contextCache: PredictionContextCache): DFAState;

@@ -630,0 +580,0 @@ protected setDFAEdge(p: DFAState, t: number, q: DFAState): void;

@@ -5,9 +5,9 @@ /*!

*/
import { ATNStateType } from './ATNStateType';
import { BlockStartState } from './BlockStartState';
import { PlusLoopbackState } from './PlusLoopbackState';
/** Start of {@code (A|B|...)+} loop. Technically a decision state, but
import { ATNStateType } from "./ATNStateType";
import { BlockStartState } from "./BlockStartState";
import { PlusLoopbackState } from "./PlusLoopbackState";
/** Start of `(A|B|...)+` loop. Technically a decision state, but
* we don't use for code generation; somebody might need it, so I'm defining
* it for completeness. In reality, the {@link PlusLoopbackState} node is the
* real decision-making note for {@code A+}.
* real decision-making note for `A+`.
*/

@@ -14,0 +14,0 @@ export declare class PlusBlockStartState extends BlockStartState {

@@ -5,5 +5,5 @@ /*!

*/
import { ATNStateType } from './ATNStateType';
import { DecisionState } from './DecisionState';
/** Decision state for {@code A+} and {@code (A|B)+}. It has two transitions:
import { ATNStateType } from "./ATNStateType";
import { DecisionState } from "./DecisionState";
/** Decision state for `A+` and `(A|B)+`. It has two transitions:
* one to the loop back to start of the block and one to exit.

@@ -10,0 +10,0 @@ */

@@ -5,6 +5,6 @@ /*!

*/
import { AbstractPredicateTransition } from './AbstractPredicateTransition';
import { ATNState } from './ATNState';
import { SemanticContext } from './SemanticContext';
import { TransitionType } from './TransitionType';
import { AbstractPredicateTransition } from "./AbstractPredicateTransition";
import { ATNState } from "./ATNState";
import { SemanticContext } from "./SemanticContext";
import { TransitionType } from "./TransitionType";
/**

@@ -11,0 +11,0 @@ *

@@ -5,6 +5,6 @@ /*!

*/
import { DecisionEventInfo } from './DecisionEventInfo';
import { SemanticContext } from './SemanticContext';
import { SimulatorState } from './SimulatorState';
import { TokenStream } from '../TokenStream';
import { DecisionEventInfo } from "./DecisionEventInfo";
import { SemanticContext } from "./SemanticContext";
import { SimulatorState } from "./SimulatorState";
import { TokenStream } from "../TokenStream";
/**

@@ -48,3 +48,3 @@ * This class represents profiling event information for semantic predicate

* @param predictedAlt The alternative number for the decision which is
* guarded by the semantic context {@code semctx}. See {@link #predictedAlt}
* guarded by the semantic context `semctx`. See {@link #predictedAlt}
* for more information.

@@ -51,0 +51,0 @@ *

@@ -5,6 +5,6 @@ /*!

*/
import { AbstractPredicateTransition } from './AbstractPredicateTransition';
import { ATNState } from './ATNState';
import { SemanticContext } from './SemanticContext';
import { TransitionType } from './TransitionType';
import { AbstractPredicateTransition } from "./AbstractPredicateTransition";
import { ATNState } from "./ATNState";
import { SemanticContext } from "./SemanticContext";
import { TransitionType } from "./TransitionType";
/** TODO: this is old comment:

@@ -11,0 +11,0 @@ * A tree of semantic predicates from the grammar AST if label==SEMPRED.

@@ -5,9 +5,9 @@ /*!

*/
import { Array2DHashMap } from '../misc/Array2DHashMap';
import { ATN } from './ATN';
import { EqualityComparator } from '../misc/EqualityComparator';
import { Equatable } from '../misc/Stubs';
import { PredictionContextCache } from './PredictionContextCache';
import { Recognizer } from '../Recognizer';
import { RuleContext } from '../RuleContext';
import { Array2DHashMap } from "../misc/Array2DHashMap";
import { ATN } from "./ATN";
import { EqualityComparator } from "../misc/EqualityComparator";
import { Equatable } from "../misc/Stubs";
import { PredictionContextCache } from "./PredictionContextCache";
import { Recognizer } from "../Recognizer";
import { RuleContext } from "../RuleContext";
export declare abstract class PredictionContext implements Equatable {

@@ -18,18 +18,18 @@ /**

*
* <pre>
* private int referenceHashCode() {
* int hash = {@link MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH});
* ```
* private int referenceHashCode() {
* int hash = {@link MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH});
*
* for (int i = 0; i &lt; this.size; i++) {
* hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i));
* }
* for (int i = 0; i &lt; this.size; i++) {
* hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i));
* }
*
* for (int i = 0; i &lt; this.size; i++) {
* hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i));
* }
* for (int i = 0; i &lt; this.size; i++) {
* hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i));
* }
*
* hash = {@link MurmurHash#finish MurmurHash.finish}(hash, 2 * this.size);
* return hash;
* }
* </pre>
* hash = {@link MurmurHash#finish MurmurHash.finish}(hash, 2 * this.size);
* return hash;
* }
* ```
*/

@@ -36,0 +36,0 @@ private readonly cachedHashCode;

@@ -5,3 +5,3 @@ /*!

*/
import { PredictionContext } from './PredictionContext';
import { PredictionContext } from "./PredictionContext";
/** Used to cache {@link PredictionContext} objects. Its used for the shared

@@ -8,0 +8,0 @@ * context cash associated with contexts in DFA states. This cache

@@ -5,3 +5,3 @@ /*!

*/
import { ATNConfigSet } from './ATNConfigSet';
import { ATNConfigSet } from "./ATNConfigSet";
/**

@@ -21,3 +21,2 @@ * This enumeration defines the prediction modes available in ANTLR 4 along with

*
* <p>
* When using this prediction mode, the parser will either return a correct

@@ -29,7 +28,6 @@ * parse tree (i.e. the same parse tree that would be returned with the

* that the particular combination of grammar and input requires the more
* powerful {@link #LL} prediction abilities to complete successfully.</p>
* powerful {@link #LL} prediction abilities to complete successfully.
*
* <p>
* This prediction mode does not provide any guarantees for prediction
* behavior for syntactically-incorrect inputs.</p>
* behavior for syntactically-incorrect inputs.
*/

@@ -44,12 +42,10 @@ SLL = 0,

*
* <p>
* When using this prediction mode, the parser will make correct decisions
* for all syntactically-correct grammar and input combinations. However, in
* cases where the grammar is truly ambiguous this prediction mode might not
* report a precise answer for <em>exactly which</em> alternatives are
* ambiguous.</p>
* report a precise answer for *exactly which* alternatives are
* ambiguous.
*
* <p>
* This prediction mode does not provide any guarantees for prediction
* behavior for syntactically-incorrect inputs.</p>
* behavior for syntactically-incorrect inputs.
*/

@@ -64,11 +60,9 @@ LL = 1,

*
* <p>
* This prediction mode may be used for diagnosing ambiguities during
* grammar development. Due to the performance overhead of calculating sets
* of ambiguous alternatives, this prediction mode should be avoided when
* the exact results are not necessary.</p>
* the exact results are not necessary.
*
* <p>
* This prediction mode does not provide any guarantees for prediction
* behavior for syntactically-incorrect inputs.</p>
* behavior for syntactically-incorrect inputs.
*/

@@ -79,3 +73,3 @@ LL_EXACT_AMBIG_DETECTION = 2,

/**
* Checks if any configuration in {@code configs} is in a
* Checks if any configuration in `configs` is in a
* {@link RuleStopState}. Configurations meeting this condition have reached

@@ -86,8 +80,8 @@ * the end of the decision rule (local context) or end of start rule (full

* @param configs the configuration set to test
* @return {@code true} if any configuration in {@code configs} is in a
* {@link RuleStopState}, otherwise {@code false}
* @returns `true` if any configuration in `configs` is in a
* {@link RuleStopState}, otherwise `false`
*/
function hasConfigInRuleStopState(configs: ATNConfigSet): boolean;
/**
* Checks if all configurations in {@code configs} are in a
* Checks if all configurations in `configs` are in a
* {@link RuleStopState}. Configurations meeting this condition have reached

@@ -98,6 +92,6 @@ * the end of the decision rule (local context) or end of start rule (full

* @param configs the configuration set to test
* @return {@code true} if all configurations in {@code configs} are in a
* {@link RuleStopState}, otherwise {@code false}
* @returns `true` if all configurations in `configs` are in a
* {@link RuleStopState}, otherwise `false`
*/
function allConfigsInRuleStopStates(configs: ATNConfigSet): boolean;
}

@@ -5,14 +5,14 @@ /*!

*/
import { ATNConfigSet } from './ATNConfigSet';
import { BitSet } from '../misc/BitSet';
import { DecisionInfo } from './DecisionInfo';
import { DFA } from '../dfa/DFA';
import { DFAState } from '../dfa/DFAState';
import { Parser } from '../Parser';
import { ParserATNSimulator } from './ParserATNSimulator';
import { ParserRuleContext } from '../ParserRuleContext';
import { PredictionContextCache } from './PredictionContextCache';
import { SemanticContext } from './SemanticContext';
import { SimulatorState } from './SimulatorState';
import { TokenStream } from '../TokenStream';
import { ATNConfigSet } from "./ATNConfigSet";
import { BitSet } from "../misc/BitSet";
import { DecisionInfo } from "./DecisionInfo";
import { DFA } from "../dfa/DFA";
import { DFAState } from "../dfa/DFAState";
import { Parser } from "../Parser";
import { ParserATNSimulator } from "./ParserATNSimulator";
import { ParserRuleContext } from "../ParserRuleContext";
import { PredictionContextCache } from "./PredictionContextCache";
import { SemanticContext } from "./SemanticContext";
import { SimulatorState } from "./SimulatorState";
import { TokenStream } from "../TokenStream";
/**

@@ -43,3 +43,4 @@ * @since 4.3

constructor(parser: Parser);
adaptivePredict(input: TokenStream, decision: number, outerContext: ParserRuleContext): number;
adaptivePredict(input: TokenStream, decision: number, outerContext: ParserRuleContext | undefined): number;
adaptivePredict(input: TokenStream, decision: number, outerContext: ParserRuleContext | undefined, useContext: boolean): number;
protected getStartState(dfa: DFA, input: TokenStream, outerContext: ParserRuleContext, useContext: boolean): SimulatorState | undefined;

@@ -46,0 +47,0 @@ protected computeStartState(dfa: DFA, globalContext: ParserRuleContext, useContext: boolean): SimulatorState;

@@ -5,6 +5,6 @@ /*!

*/
import { ATNState } from './ATNState';
import { IntervalSet } from '../misc/IntervalSet';
import { Transition } from './Transition';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { IntervalSet } from "../misc/IntervalSet";
import { Transition } from "./Transition";
import { TransitionType } from "./TransitionType";
export declare class RangeTransition extends Transition {

@@ -11,0 +11,0 @@ from: number;

@@ -5,5 +5,5 @@ /*!

*/
import { ATNState } from './ATNState';
import { ATNStateType } from './ATNStateType';
import { RuleStopState } from './RuleStopState';
import { ATNState } from "./ATNState";
import { ATNStateType } from "./ATNStateType";
import { RuleStopState } from "./RuleStopState";
export declare class RuleStartState extends ATNState {

@@ -10,0 +10,0 @@ stopState: RuleStopState;

@@ -5,4 +5,4 @@ /*!

*/
import { ATNState } from './ATNState';
import { ATNStateType } from './ATNStateType';
import { ATNState } from "./ATNState";
import { ATNStateType } from "./ATNStateType";
/** The last node in the ATN for a rule, unless that rule is the start symbol.

@@ -9,0 +9,0 @@ * In that case, there is one transition to EOF. Later, we might encode

@@ -5,6 +5,6 @@ /*!

*/
import { ATNState } from './ATNState';
import { RuleStartState } from './RuleStartState';
import { Transition } from './Transition';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { RuleStartState } from "./RuleStartState";
import { Transition } from "./Transition";
import { TransitionType } from "./TransitionType";
/** */

@@ -11,0 +11,0 @@ export declare class RuleTransition extends Transition {

@@ -5,12 +5,12 @@ /*!

*/
import { Comparable } from '../misc/Stubs';
import { Equatable } from '../misc/Stubs';
import { Recognizer } from '../Recognizer';
import { RuleContext } from '../RuleContext';
import { Comparable } from "../misc/Stubs";
import { Equatable } from "../misc/Stubs";
import { Recognizer } from "../Recognizer";
import { RuleContext } from "../RuleContext";
/** A tree structure used to record the semantic context in which
* an ATN configuration is valid. It's either a single predicate,
* a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
* a conjunction `p1&&p2`, or a sum of products `p1||p2`.
*
* <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
* {@link SemanticContext} within the scope of this outer class.</p>
* I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
* {@link SemanticContext} within the scope of this outer class.
*/

@@ -21,3 +21,3 @@ export declare abstract class SemanticContext implements Equatable {

* The default {@link SemanticContext}, which is semantically equivalent to
* a predicate of the form {@code {true}?}.
* a predicate of the form `{true}?`.
*/

@@ -27,3 +27,3 @@ static readonly NONE: SemanticContext;

* For context independent predicates, we evaluate them without a local
* context (i.e., null context). That way, we can evaluate them without
* context (i.e., unedfined context). That way, we can evaluate them without
* having to create proper rule-specific context during prediction (as

@@ -33,7 +33,7 @@ * opposed to the parser, which creates them naturally). In a practical

*
* <p>For context dependent predicates, we must pass in a local context so that
* For context dependent predicates, we must pass in a local context so that
* references such as $arg evaluate properly as _localctx.arg. We only
* capture context dependent predicates in the context in which we begin
* prediction, so we passed in the outer context here in case of context
* dependent predicate evaluation.</p>
* dependent predicate evaluation.
*/

@@ -46,14 +46,13 @@ abstract eval<T>(parser: Recognizer<T, any>, parserCallStack: RuleContext): boolean;

* @param parserCallStack
* @return The simplified semantic context after precedence predicates are
* @returns The simplified semantic context after precedence predicates are
* evaluated, which will be one of the following values.
* <ul>
* <li>{@link #NONE}: if the predicate simplifies to {@code true} after
* precedence predicates are evaluated.</li>
* <li>{@code null}: if the predicate simplifies to {@code false} after
* precedence predicates are evaluated.</li>
* <li>{@code this}: if the semantic context is not changed as a result of
* precedence predicate evaluation.</li>
* <li>A non-{@code null} {@link SemanticContext}: the new simplified
* semantic context after precedence predicates are evaluated.</li>
* </ul>
*
* * {@link #NONE}: if the predicate simplifies to `true` after
* precedence predicates are evaluated.
* * `undefined`: if the predicate simplifies to `false` after
* precedence predicates are evaluated.
* * `this`: if the semantic context is not changed as a result of
* precedence predicate evaluation.
* * A non-`undefined` {@link SemanticContext}: the new simplified
* semantic context after precedence predicates are evaluated.
*/

@@ -102,3 +101,3 @@ evalPrecedence(parser: Recognizer<any, any>, parserCallStack: RuleContext): SemanticContext | undefined;

*
* @return a collection of {@link SemanticContext} operands for the
* @returns a collection of {@link SemanticContext} operands for the
* operator.

@@ -123,5 +122,4 @@ *

*
* <p>
* The evaluation of predicates by this context is short-circuiting, but
* unordered.</p>
* unordered.
*/

@@ -145,5 +143,4 @@ eval<T>(parser: Recognizer<T, any>, parserCallStack: RuleContext): boolean;

*
* <p>
* The evaluation of predicates by this context is short-circuiting, but
* unordered.</p>
* unordered.
*/

@@ -150,0 +147,0 @@ eval<T>(parser: Recognizer<T, any>, parserCallStack: RuleContext): boolean;

@@ -5,6 +5,6 @@ /*!

*/
import { ATNState } from './ATNState';
import { IntervalSet } from '../misc/IntervalSet';
import { Transition } from './Transition';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { IntervalSet } from "../misc/IntervalSet";
import { Transition } from "./Transition";
import { TransitionType } from "./TransitionType";
/** A transition containing a set of values. */

@@ -11,0 +11,0 @@ export declare class SetTransition extends Transition {

@@ -5,4 +5,4 @@ /*!

*/
import { DFAState } from '../dfa/DFAState';
import { ParserRuleContext } from '../ParserRuleContext';
import { DFAState } from "../dfa/DFAState";
import { ParserRuleContext } from "../ParserRuleContext";
/**

@@ -9,0 +9,0 @@ *

@@ -5,4 +5,4 @@ /*!

*/
import { ATNStateType } from './ATNStateType';
import { BlockStartState } from './BlockStartState';
import { ATNStateType } from "./ATNStateType";
import { BlockStartState } from "./BlockStartState";
/** The block that begins a closure loop. */

@@ -9,0 +9,0 @@ export declare class StarBlockStartState extends BlockStartState {

@@ -5,5 +5,5 @@ /*!

*/
import { ATNState } from './ATNState';
import { ATNStateType } from './ATNStateType';
import { StarLoopEntryState } from './StarLoopEntryState';
import { ATNState } from "./ATNState";
import { ATNStateType } from "./ATNStateType";
import { StarLoopEntryState } from "./StarLoopEntryState";
export declare class StarLoopbackState extends ATNState {

@@ -10,0 +10,0 @@ readonly loopEntryState: StarLoopEntryState;

@@ -5,6 +5,6 @@ /*!

*/
import { ATNStateType } from './ATNStateType';
import { BitSet } from '../misc/BitSet';
import { DecisionState } from './DecisionState';
import { StarLoopbackState } from './StarLoopbackState';
import { ATNStateType } from "./ATNStateType";
import { BitSet } from "../misc/BitSet";
import { DecisionState } from "./DecisionState";
import { StarLoopbackState } from "./StarLoopbackState";
export declare class StarLoopEntryState extends DecisionState {

@@ -16,5 +16,5 @@ loopBackState: StarLoopbackState;

*
* <p>This is a computed property that is calculated during ATN deserialization
* This is a computed property that is calculated during ATN deserialization
* and stored for use in {@link ParserATNSimulator} and
* {@link ParserInterpreter}.</p>
* {@link ParserInterpreter}.
*

@@ -25,15 +25,13 @@ * @see `DFA.isPrecedenceDfa`

/**
* For precedence decisions, this set marks states <em>S</em> which have all
* For precedence decisions, this set marks states *S* which have all
* of the following characteristics:
*
* <ul>
* <li>One or more invocation sites of the current rule returns to
* <em>S</em>.</li>
* <li>The closure from <em>S</em> includes the current decision without
* passing through any rule invocations or stepping out of the current
* rule.</li>
* </ul>
* * One or more invocation sites of the current rule returns to
* *S*.
* * The closure from *S* includes the current decision without
* passing through any rule invocations or stepping out of the current
* rule.
*
* <p>This field is not used when {@link #isPrecedenceDecision} is
* {@code false}.</p>
* This field is not used when {@link #precedenceRuleDecision} is
* `false`.
*/

@@ -40,0 +38,0 @@ precedenceLoopbackStates: BitSet;

@@ -5,4 +5,4 @@ /*!

*/
import { ATNStateType } from './ATNStateType';
import { DecisionState } from './DecisionState';
import { ATNStateType } from "./ATNStateType";
import { DecisionState } from "./DecisionState";
/** The Tokens rule start state linking to each lexer rule start state */

@@ -9,0 +9,0 @@ export declare class TokensStartState extends DecisionState {

@@ -5,16 +5,16 @@ /*!

*/
import { ATNState } from './ATNState';
import { IntervalSet } from '../misc/IntervalSet';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { IntervalSet } from "../misc/IntervalSet";
import { TransitionType } from "./TransitionType";
/** An ATN transition between any two ATN states. Subclasses define
* atom, set, epsilon, action, predicate, rule transitions.
*
* <p>This is a one way link. It emanates from a state (usually via a list of
* transitions) and has a target state.</p>
* This is a one way link. It emanates from a state (usually via a list of
* transitions) and has a target state.
*
* <p>Since we never have to change the ATN transitions once we construct it,
* Since we never have to change the ATN transitions once we construct it,
* we can fix these transitions as specific classes. The DFA transitions
* on the other hand need to update the labels as it adds transitions to
* the states. We'll use the term Edge for the DFA to distinguish them from
* ATN transitions.</p>
* ATN transitions.
*/

@@ -30,6 +30,6 @@ export declare abstract class Transition {

*
* <p>The default implementation returns {@code false}.</p>
* The default implementation returns `false`.
*
* @return {@code true} if traversing this transition in the ATN does not
* consume an input symbol; otherwise, {@code false} if traversing this
* @returns `true` if traversing this transition in the ATN does not
* consume an input symbol; otherwise, `false` if traversing this
* transition consumes (matches) an input symbol.

@@ -36,0 +36,0 @@ */

@@ -5,5 +5,5 @@ /*!

*/
import { ATNState } from './ATNState';
import { Transition } from './Transition';
import { TransitionType } from './TransitionType';
import { ATNState } from "./ATNState";
import { Transition } from "./Transition";
import { TransitionType } from "./TransitionType";
export declare class WildcardTransition extends Transition {

@@ -10,0 +10,0 @@ constructor(target: ATNState);

@@ -5,2 +5,6 @@ /*!

*/
import { DefaultErrorStrategy } from "./DefaultErrorStrategy";
import { Parser } from "./Parser";
import { RecognitionException } from "./RecognitionException";
import { Token } from "./Token";
/**

@@ -13,29 +17,23 @@ * This implementation of {@link ANTLRErrorStrategy} responds to syntax errors

*
* <p>
* This error strategy is useful in the following scenarios.</p>
* This error strategy is useful in the following scenarios.
*
* <ul>
* <li><strong>Two-stage parsing:</strong> This error strategy allows the first
* stage of two-stage parsing to immediately terminate if an error is
* encountered, and immediately fall back to the second stage. In addition to
* avoiding wasted work by attempting to recover from errors here, the empty
* implementation of {@link BailErrorStrategy#sync} improves the performance of
* the first stage.</li>
* <li><strong>Silent validation:</strong> When syntax errors are not being
* reported or logged, and the parse result is simply ignored if errors occur,
* the {@link BailErrorStrategy} avoids wasting work on recovering from errors
* when the result will be ignored either way.</li>
* </ul>
* * **Two-stage parsing:** This error strategy allows the first
* stage of two-stage parsing to immediately terminate if an error is
* encountered, and immediately fall back to the second stage. In addition to
* avoiding wasted work by attempting to recover from errors here, the empty
* implementation of {@link BailErrorStrategy#sync} improves the performance of
* the first stage.
* * **Silent validation:** When syntax errors are not being
* reported or logged, and the parse result is simply ignored if errors occur,
* the {@link BailErrorStrategy} avoids wasting work on recovering from errors
* when the result will be ignored either way.
*
* <p>
* {@code myparser.errorHandler = new BailErrorStrategy();}</p>
* ```
* myparser.errorHandler = new BailErrorStrategy();
* ```
*
* @see Parser.errorHandler
*/
import { DefaultErrorStrategy } from "./DefaultErrorStrategy";
import { Parser } from './Parser';
import { RecognitionException } from "./RecognitionException";
import { Token } from "./Token";
export declare class BailErrorStrategy extends DefaultErrorStrategy {
/** Instead of recovering from exception {@code e}, re-throw it wrapped
/** Instead of recovering from exception `e`, re-throw it wrapped
* in a {@link ParseCancellationException} so it is not caught by the

@@ -42,0 +40,0 @@ * rule function catches. Use {@link Exception#getCause()} to get the

@@ -5,7 +5,7 @@ /*!

*/
import { Interval } from './misc/Interval';
import { RuleContext } from './RuleContext';
import { Token } from './Token';
import { TokenSource } from './TokenSource';
import { TokenStream } from './TokenStream';
import { Interval } from "./misc/Interval";
import { RuleContext } from "./RuleContext";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
import { TokenStream } from "./TokenStream";
/**

@@ -16,3 +16,2 @@ * This implementation of {@link TokenStream} loads tokens from a

*
* <p>
* This token stream ignores the value of {@link Token#getChannel}. If your

@@ -22,3 +21,3 @@ * parser requires the token stream filter tokens to only those on a particular

* {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a
* {@link CommonTokenStream}.</p>
* {@link CommonTokenStream}.
*/

@@ -33,3 +32,3 @@ export declare class BufferedTokenStream implements TokenStream {

* considered a complete view of the input once {@link #fetchedEOF} is set
* to {@code true}.
* to `true`.
*/

@@ -39,10 +38,10 @@ protected tokens: Token[];

* The index into {@link #tokens} of the current token (next token to
* {@link #consume}). {@link #tokens}{@code [}{@link #p}{@code ]} should be
* {@link #consume}). {@link #tokens}`[`{@link #p}`]` should be
* {@link #LT LT(1)}.
*
* <p>This field is set to -1 when the stream is first constructed or when
* This field is set to -1 when the stream is first constructed or when
* {@link #setTokenSource} is called, indicating that the first token has
* not yet been fetched from the token source. For additional information,
* see the documentation of {@link IntStream} for a description of
* Initializing Methods.</p>
* Initializing Methods.
*/

@@ -55,9 +54,7 @@ protected p: number;

*
* <ul>
* <li>{@link #consume}: The lookahead check in {@link #consume} to prevent
* consuming the EOF symbol is optimized by checking the values of
* {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.</li>
* <li>{@link #fetch}: The check to prevent adding multiple EOF symbols into
* {@link #tokens} is trivial with this field.</li>
* <ul>
* * {@link #consume}: The lookahead check in {@link #consume} to prevent
* consuming the EOF symbol is optimized by checking the values of
* {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.
* * {@link #fetch}: The check to prevent adding multiple EOF symbols into
* {@link #tokens} is trivial with this field.
*/

@@ -74,12 +71,12 @@ protected fetchedEOF: boolean;

consume(): void;
/** Make sure index {@code i} in tokens has a token.
/** Make sure index `i` in tokens has a token.
*
* @return {@code true} if a token is located at index {@code i}, otherwise
* {@code false}.
* @returns `true` if a token is located at index `i`, otherwise
* `false`.
* @see #get(int i)
*/
protected sync(i: number): boolean;
/** Add {@code n} elements to buffer.
/** Add `n` elements to buffer.
*
* @return The actual number of elements added to the buffer.
* @returns The actual number of elements added to the buffer.
*/

@@ -97,11 +94,11 @@ protected fetch(n: number): number;

* the current stream position by adjusting the target token index of a seek
* operation. The default implementation simply returns {@code i}. If an
* operation. The default implementation simply returns `i`. If an
* exception is thrown in this method, the current stream index should not be
* changed.
*
* <p>For example, {@link CommonTokenStream} overrides this method to ensure that
* the seek target is always an on-channel token.</p>
* For example, {@link CommonTokenStream} overrides this method to ensure that
* the seek target is always an on-channel token.
*
* @param i The target token index.
* @return The adjusted target token index.
* @returns The adjusted target token index.
*/

@@ -111,11 +108,10 @@ protected adjustSeekIndex(i: number): number;

protected setup(): void;
/** Given a start and stop index, return a {@code List} of all tokens in
* the token type {@code BitSet}. Return {@code null} if no tokens were found. This
* method looks at both on and off channel tokens.
*/
getTokens(start?: number, stop?: number, types?: Set<number> | number): Token[];
getTokens(): Token[];
getTokens(start: number, stop: number): Token[];
getTokens(start: number, stop: number, types: Set<number>): Token[];
getTokens(start: number, stop: number, ttype: number): Token[];
/**
* Given a starting index, return the index of the next token on channel.
* Return {@code i} if {@code tokens[i]} is on channel. Return the index of
* the EOF token if there are no tokens on channel between {@code i} and
* Return `i` if `tokens[i]` is on channel. Return the index of
* the EOF token if there are no tokens on channel between `i` and
* EOF.

@@ -126,9 +122,8 @@ */

* Given a starting index, return the index of the previous token on
* channel. Return {@code i} if {@code tokens[i]} is on channel. Return -1
* if there are no tokens on channel between {@code i} and 0.
* channel. Return `i` if `tokens[i]` is on channel. Return -1
* if there are no tokens on channel between `i` and 0.
*
* <p>
* If {@code i} specifies an index at or after the EOF token, the EOF token
* If `i` specifies an index at or after the EOF token, the EOF token
* index is returned. This is due to the fact that the EOF token is treated
* as though it were on every channel.</p>
* as though it were on every channel.
*/

@@ -138,3 +133,3 @@ protected previousTokenOnChannel(i: number, channel: number): number;

* the current token up until we see a token on {@link Lexer#DEFAULT_TOKEN_CHANNEL} or
* EOF. If {@code channel} is {@code -1}, find any non default channel token.
* EOF. If `channel` is `-1`, find any non default channel token.
*/

@@ -144,3 +139,3 @@ getHiddenTokensToRight(tokenIndex: number, channel?: number): Token[];

* the current token up until we see a token on {@link Lexer#DEFAULT_TOKEN_CHANNEL}.
* If {@code channel} is {@code -1}, find any non default channel token.
* If `channel` is `-1`, find any non default channel token.
*/

@@ -147,0 +142,0 @@ getHiddenTokensToLeft(tokenIndex: number, channel?: number): Token[];

@@ -5,4 +5,4 @@ /*!

*/
import { Interval } from './misc/Interval';
import { IntStream } from './IntStream';
import { Interval } from "./misc/Interval";
import { IntStream } from "./IntStream";
/** A source of characters for an ANTLR lexer. */

@@ -13,11 +13,11 @@ export interface CharStream extends IntStream {

* stream. This method is guaranteed to not throw an exception if the
* specified {@code interval} lies entirely within a marked range. For more
* specified `interval` lies entirely within a marked range. For more
* information about marked ranges, see {@link IntStream#mark}.
*
* @param interval an interval within the stream
* @return the text of the specified interval
* @returns the text of the specified interval
*
* @throws NullPointerException if {@code interval} is {@code null}
* @throws IllegalArgumentException if {@code interval.a < 0}, or if
* {@code interval.b < interval.a - 1}, or if {@code interval.b} lies at or
* @throws NullPointerException if `interval` is `undefined`
* @throws IllegalArgumentException if `interval.a < 0`, or if
* `interval.b < interval.a - 1`, or if `interval.b` lies at or
* past the end of the stream

@@ -24,0 +24,0 @@ * @throws UnsupportedOperationException if the stream does not support

@@ -5,8 +5,8 @@ /*!

*/
import { ATNSimulator } from './atn/ATNSimulator';
import { CharStream } from './CharStream';
import { Recognizer } from './Recognizer';
import { Token } from './Token';
import { TokenSource } from './TokenSource';
import { WritableToken } from './WritableToken';
import { ATNSimulator } from "./atn/ATNSimulator";
import { CharStream } from "./CharStream";
import { Recognizer } from "./Recognizer";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
import { WritableToken } from "./WritableToken";
export declare class CommonToken implements WritableToken {

@@ -43,7 +43,6 @@ /**

*
* <p>
* These properties share a field to reduce the memory footprint of
* {@link CommonToken}. Tokens created by a {@link CommonTokenFactory} from
* the same source and input stream share a reference to the same
* {@link Tuple2} containing these values.</p>
* {@link Tuple2} containing these values.
*/

@@ -80,4 +79,3 @@ protected source: {

*
* <p>
* If {@code oldToken} is also a {@link CommonToken} instance, the newly
* If `oldToken` is also a {@link CommonToken} instance, the newly
* constructed token will share a reference to the {@link #text} field and

@@ -87,3 +85,3 @@ * the {@link Tuple2} stored in {@link #source}. Otherwise, {@link #text} will

* will be constructed from the result of {@link Token#getTokenSource} and
* {@link Token#getInputStream}.</p>
* {@link Token#getInputStream}.
*

@@ -97,6 +95,6 @@ * @param oldToken The token to copy.

* Explicitly set the text for this token. If {code text} is not
* {@code null}, then {@link #getText} will return this value rather than
* `undefined`, then {@link #getText} will return this value rather than
* extracting the text from the input.
*
* @param text The explicit text of the token, or {@code null} if the text
* @param text The explicit text of the token, or `undefined` if the text
* should be obtained from the input along with the start and stop indexes

@@ -114,3 +112,3 @@ * of the token.

toString(): string;
toString<Symbol, ATNInterpreter extends ATNSimulator>(recognizer: Recognizer<Symbol, ATNInterpreter> | undefined): string;
toString<TSymbol, ATNInterpreter extends ATNSimulator>(recognizer: Recognizer<TSymbol, ATNInterpreter> | undefined): string;
}

@@ -5,6 +5,6 @@ /*!

*/
import { CharStream } from './CharStream';
import { CommonToken } from './CommonToken';
import { TokenFactory } from './TokenFactory';
import { TokenSource } from './TokenSource';
import { CharStream } from "./CharStream";
import { CommonToken } from "./CommonToken";
import { TokenFactory } from "./TokenFactory";
import { TokenSource } from "./TokenSource";
/**

@@ -26,5 +26,4 @@ * This default implementation of {@link TokenFactory} creates

*
* <p>
* The default value is {@code false} to avoid the performance and memory
* overhead of copying text for every token unless explicitly requested.</p>
* The default value is `false` to avoid the performance and memory
* overhead of copying text for every token unless explicitly requested.
*/

@@ -36,5 +35,4 @@ protected copyText: boolean;

*
* <p>
* When {@code copyText} is {@code false}, the {@link #DEFAULT} instance
* should be used instead of constructing a new instance.</p>
* When `copyText` is `false`, the {@link #DEFAULT} instance
* should be used instead of constructing a new instance.
*

@@ -47,3 +45,3 @@ * @param copyText The value for {@link #copyText}.

stream?: CharStream;
}, type: number, text: string, channel: number, start: number, stop: number, line: number, charPositionInLine: number): CommonToken;
}, type: number, text: string | undefined, channel: number, start: number, stop: number, line: number, charPositionInLine: number): CommonToken;
createSimple(type: number, text: string): CommonToken;

@@ -55,7 +53,6 @@ }

*
* <p>
* This token factory does not explicitly copy token text when constructing
* tokens.</p>
* tokens.
*/
const DEFAULT: TokenFactory;
}

@@ -5,5 +5,5 @@ /*!

*/
import { BufferedTokenStream } from './BufferedTokenStream';
import { Token } from './Token';
import { TokenSource } from './TokenSource';
import { BufferedTokenStream } from "./BufferedTokenStream";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
/**

@@ -14,20 +14,16 @@ * This class extends {@link BufferedTokenStream} with functionality to filter

*
* <p>
* This token stream provides access to all tokens by index or when calling
* methods like {@link #getText}. The channel filtering is only used for code
* accessing tokens via the lookahead methods {@link #LA}, {@link #LT}, and
* {@link #LB}.</p>
* {@link #LB}.
*
* <p>
* By default, tokens are placed on the default channel
* ({@link Token#DEFAULT_CHANNEL}), but may be reassigned by using the
* {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to
* `->channel(HIDDEN)` lexer command, or by using an embedded action to
* call {@link Lexer#setChannel}.
* </p>
*
* <p>
* Note: lexer rules which use the {@code ->skip} lexer command or call
* Note: lexer rules which use the `->skip` lexer command or call
* {@link Lexer#skip} do not produce tokens at all, so input text matched by
* such a rule will not be available as part of the token stream, regardless of
* channel.</p>
* channel.
*/

@@ -38,5 +34,4 @@ export declare class CommonTokenStream extends BufferedTokenStream {

*
* <p>
* The default value is {@link Token#DEFAULT_CHANNEL}, which matches the
* default channel assigned to tokens created by the lexer.</p>
* default channel assigned to tokens created by the lexer.
*/

@@ -47,3 +42,3 @@ protected channel: number;

* source and filtering tokens to the specified channel. Only tokens whose
* {@link Token#getChannel} matches {@code channel} or have the
* {@link Token#getChannel} matches `channel` or have the
* `Token.type` equal to {@link Token#EOF} will be returned by the

@@ -50,0 +45,0 @@ * token stream lookahead methods.

@@ -5,2 +5,5 @@ /*!

*/
import { ANTLRErrorListener } from "./ANTLRErrorListener";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
/**

@@ -10,5 +13,2 @@ *

*/
import { ANTLRErrorListener } from "./ANTLRErrorListener";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from './Recognizer';
export declare class ConsoleErrorListener implements ANTLRErrorListener<any> {

@@ -22,12 +22,11 @@ /**

*
* <p>
* This implementation prints messages to {@link System#err} containing the
* values of {@code line}, {@code charPositionInLine}, and {@code msg} using
* the following format.</p>
* values of `line`, `charPositionInLine`, and `msg` using
* the following format.
*
* <pre>
* line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
* line *line*:*charPositionInLine* *msg*
* </pre>
*/
syntaxError<T>(recognizer: Recognizer<T, any>, offendingSymbol: T, line: number, charPositionInLine: number, msg: string, e: RecognitionException): void;
syntaxError<T>(recognizer: Recognizer<T, any>, offendingSymbol: T, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined): void;
}

@@ -5,6 +5,2 @@ /*!

*/
/**
* This is the default implementation of {@link ANTLRErrorStrategy} used for
* error reporting and recovery in ANTLR parsers.
*/
import { ANTLRErrorStrategy } from "./ANTLRErrorStrategy";

@@ -15,6 +11,11 @@ import { FailedPredicateException } from "./FailedPredicateException";

import { NoViableAltException } from "./NoViableAltException";
import { Parser } from './Parser';
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
import { RecognitionException } from "./RecognitionException";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
/**
* This is the default implementation of {@link ANTLRErrorStrategy} used for
* error reporting and recovery in ANTLR parsers.
*/
export declare class DefaultErrorStrategy implements ANTLRErrorStrategy {

@@ -38,6 +39,19 @@ /**

/**
* This field is used to propagate information about the lookahead following
* the previous match. Since prediction prefers completing the current rule
* to error recovery efforts, error reporting may occur later than the
* original point where it was discoverable. The original context is used to
* compute the true expected sets as though the reporting occurred as early
* as possible.
*/
protected nextTokensContext?: ParserRuleContext;
/**
* @see #nextTokensContext
*/
protected nextTokensState: number;
/**
* {@inheritDoc}
*
* <p>The default implementation simply calls {@link #endErrorCondition} to
* ensure that the handler is not in error recovery mode.</p>
* The default implementation simply calls {@link #endErrorCondition} to
* ensure that the handler is not in error recovery mode.
*/

@@ -66,3 +80,3 @@ reset(recognizer: Parser): void;

*
* <p>The default implementation simply calls {@link #endErrorCondition}.</p>
* The default implementation simply calls {@link #endErrorCondition}.
*/

@@ -73,17 +87,15 @@ reportMatch(recognizer: Parser): void;

*
* <p>The default implementation returns immediately if the handler is already
* The default implementation returns immediately if the handler is already
* in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
* and dispatches the reporting task based on the runtime type of {@code e}
* according to the following table.</p>
* and dispatches the reporting task based on the runtime type of `e`
* according to the following table.
*
* <ul>
* <li>{@link NoViableAltException}: Dispatches the call to
* {@link #reportNoViableAlternative}</li>
* <li>{@link InputMismatchException}: Dispatches the call to
* {@link #reportInputMismatch}</li>
* <li>{@link FailedPredicateException}: Dispatches the call to
* {@link #reportFailedPredicate}</li>
* <li>All other types: calls {@link Parser#notifyErrorListeners} to report
* the exception</li>
* </ul>
* * {@link NoViableAltException}: Dispatches the call to
* {@link #reportNoViableAlternative}
* * {@link InputMismatchException}: Dispatches the call to
* {@link #reportInputMismatch}
* * {@link FailedPredicateException}: Dispatches the call to
* {@link #reportFailedPredicate}
* * All other types: calls {@link Parser#notifyErrorListeners} to report
* the exception
*/

@@ -95,5 +107,5 @@ reportError(recognizer: Parser, e: RecognitionException): void;

*
* <p>The default implementation resynchronizes the parser by consuming tokens
* The default implementation resynchronizes the parser by consuming tokens
* until we find one in the resynchronization set--loosely the set of tokens
* that can follow the current rule.</p>
* that can follow the current rule.
*/

@@ -107,9 +119,9 @@ recover(recognizer: Parser, e: RecognitionException): void;

*
* <p>Implements Jim Idle's magic sync mechanism in closures and optional
* subrules. E.g.,</p>
* Implements Jim Idle's magic sync mechanism in closures and optional
* subrules. E.g.,
*
* <pre>
* ```antlr
* a : sync ( stuff sync )* ;
* sync : {consume to what can follow sync} ;
* </pre>
* ```
*

@@ -121,19 +133,19 @@ * At the start of a sub rule upon error, {@link #sync} performs single

*
* <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
* If the sub rule is optional (`(...)?`, `(...)*`, or block
* with an empty alternative), then the expected set includes what follows
* the subrule.</p>
* the subrule.
*
* <p>During loop iteration, it consumes until it sees a token that can start a
* During loop iteration, it consumes until it sees a token that can start a
* sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
* stay in the loop as long as possible.</p>
* stay in the loop as long as possible.
*
* <p><strong>ORIGINS</strong></p>
* **ORIGINS**
*
* <p>Previous versions of ANTLR did a poor job of their recovery within loops.
* Previous versions of ANTLR did a poor job of their recovery within loops.
* A single mismatch token or missing token would force the parser to bail
* out of the entire rules surrounding the loop. So, for rule</p>
* out of the entire rules surrounding the loop. So, for rule
*
* <pre>
* ```antlr
* classDef : 'class' ID '{' member* '}'
* </pre>
* ```
*

@@ -144,6 +156,6 @@ * input with an extra token between members would force the parser to

*
* <p>This functionality cost a little bit of effort because the parser has to
* This functionality cost a little bit of effort because the parser has to
* compare token set at the start of the loop and at each iteration. If for
* some reason speed is suffering for you, you can turn off this
* functionality by simply overriding this method as a blank { }.</p>
* functionality by simply overriding this method as a blank { }.
*/

@@ -184,14 +196,14 @@ sync(recognizer: Parser): void;

* of a token from the input stream. At the time this method is called, the
* erroneous symbol is current {@code LT(1)} symbol and has not yet been
* erroneous symbol is current `LT(1)` symbol and has not yet been
* removed from the input stream. When this method returns,
* {@code recognizer} is in error recovery mode.
* `recognizer` is in error recovery mode.
*
* <p>This method is called when {@link #singleTokenDeletion} identifies
* This method is called when {@link #singleTokenDeletion} identifies
* single-token deletion as a viable recovery strategy for a mismatched
* input error.</p>
* input error.
*
* <p>The default implementation simply returns if the handler is already in
* The default implementation simply returns if the handler is already in
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
* enter error recovery mode, followed by calling
* {@link Parser#notifyErrorListeners}.</p>
* {@link Parser#notifyErrorListeners}.
*

@@ -205,12 +217,12 @@ * @param recognizer the parser instance

* method is called, the missing token has not yet been inserted. When this
* method returns, {@code recognizer} is in error recovery mode.
* method returns, `recognizer` is in error recovery mode.
*
* <p>This method is called when {@link #singleTokenInsertion} identifies
* This method is called when {@link #singleTokenInsertion} identifies
* single-token insertion as a viable recovery strategy for a mismatched
* input error.</p>
* input error.
*
* <p>The default implementation simply returns if the handler is already in
* The default implementation simply returns if the handler is already in
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
* enter error recovery mode, followed by calling
* {@link Parser#notifyErrorListeners}.</p>
* {@link Parser#notifyErrorListeners}.
*

@@ -223,48 +235,48 @@ * @param recognizer the parser instance

*
* <p>The default implementation attempts to recover from the mismatched input
* The default implementation attempts to recover from the mismatched input
* by using single token insertion and deletion as described below. If the
* recovery attempt fails, this method
* {@link InputMismatchException}.</p>
* {@link InputMismatchException}.
*
* <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
* **EXTRA TOKEN** (single token deletion)
*
* <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
* right token, however, then assume {@code LA(1)} is some extra spurious
* `LA(1)` is not what we are looking for. If `LA(2)` has the
* right token, however, then assume `LA(1)` is some extra spurious
* token and delete it. Then consume and return the next token (which was
* the {@code LA(2)} token) as the successful result of the match operation.</p>
* the `LA(2)` token) as the successful result of the match operation.
*
* <p>This recovery strategy is implemented by {@link #singleTokenDeletion}.</p>
* This recovery strategy is implemented by {@link #singleTokenDeletion}.
*
* <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
* **MISSING TOKEN** (single token insertion)
*
* <p>If current token (at {@code LA(1)}) is consistent with what could come
* after the expected {@code LA(1)} token, then assume the token is missing
* If current token (at `LA(1)`) is consistent with what could come
* after the expected `LA(1)` token, then assume the token is missing
* and use the parser's {@link TokenFactory} to create it on the fly. The
* "insertion" is performed by returning the created token as the successful
* result of the match operation.</p>
* result of the match operation.
*
* <p>This recovery strategy is implemented by {@link #singleTokenInsertion}.</p>
* This recovery strategy is implemented by {@link #singleTokenInsertion}.
*
* <p><strong>EXAMPLE</strong></p>
* **EXAMPLE**
*
* <p>For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
* the parser returns from the nested call to {@code expr}, it will have
* call chain:</p>
* For example, Input `i=(3;` is clearly missing the `')'`. When
* the parser returns from the nested call to `expr`, it will have
* call chain:
*
* <pre>
* stat &rarr; expr &rarr; atom
* </pre>
* ```
* stat → expr → atom
* ```
*
* and it will be trying to match the {@code ')'} at this point in the
* and it will be trying to match the `')'` at this point in the
* derivation:
*
* <pre>
* =&gt; ID '=' '(' INT ')' ('+' atom)* ';'
* ```
* => ID '=' '(' INT ')' ('+' atom)* ';'
* ^
* </pre>
* ```
*
* The attempt to match {@code ')'} will fail when it sees {@code ';'} and
* call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
* is in the set of tokens that can follow the {@code ')'} token reference
* in rule {@code atom}. It can assume that you forgot the {@code ')'}.
* The attempt to match `')'` will fail when it sees `';'` and
* call {@link #recoverInline}. To recover, it sees that `LA(1)==';'`
* is in the set of tokens that can follow the `')'` token reference
* in rule `atom`. It can assume that you forgot the `')'`.
*/

@@ -276,14 +288,14 @@ recoverInline(recognizer: Parser): Token;

* deletion strategy fails to recover from the mismatched input. If this
* method returns {@code true}, {@code recognizer} will be in error recovery
* method returns `true`, `recognizer` will be in error recovery
* mode.
*
* <p>This method determines whether or not single-token insertion is viable by
* checking if the {@code LA(1)} input symbol could be successfully matched
* if it were instead the {@code LA(2)} symbol. If this method returns
* {@code true}, the caller is responsible for creating and inserting a
* token with the correct type to produce this behavior.</p>
* This method determines whether or not single-token insertion is viable by
* checking if the `LA(1)` input symbol could be successfully matched
* if it were instead the `LA(2)` symbol. If this method returns
* `true`, the caller is responsible for creating and inserting a
* token with the correct type to produce this behavior.
*
* @param recognizer the parser instance
* @return {@code true} if single-token insertion is a viable recovery
* strategy for the current mismatched input, otherwise {@code false}
* @returns `true` if single-token insertion is a viable recovery
* strategy for the current mismatched input, otherwise `false`
*/

@@ -294,17 +306,17 @@ protected singleTokenInsertion(recognizer: Parser): boolean;

* strategy. It is called by {@link #recoverInline} to attempt to recover
* from mismatched input. If this method returns null, the parser and error
* handler state will not have changed. If this method returns non-null,
* {@code recognizer} will <em>not</em> be in error recovery mode since the
* from mismatched input. If this method returns `undefined`, the parser and error
* handler state will not have changed. If this method returns non-`undefined`,
* `recognizer` will *not* be in error recovery mode since the
* returned token was a successful match.
*
* <p>If the single-token deletion is successful, this method calls
* If the single-token deletion is successful, this method calls
* {@link #reportUnwantedToken} to report the error, followed by
* {@link Parser#consume} to actually "delete" the extraneous token. Then,
* before returning {@link #reportMatch} is called to signal a successful
* match.</p>
* match.
*
* @param recognizer the parser instance
* @return the successfully matched {@link Token} instance if single-token
* @returns the successfully matched {@link Token} instance if single-token
* deletion successfully recovers from the mismatched input, otherwise
* {@code null}
* `undefined`
*/

@@ -311,0 +323,0 @@ protected singleTokenDeletion(recognizer: Parser): Token | undefined;

@@ -5,3 +5,3 @@ /*!

*/
import { LexerActionExecutor } from '../atn/LexerActionExecutor';
import { LexerActionExecutor } from "../atn/LexerActionExecutor";
/**

@@ -24,3 +24,3 @@ * Stores information about a {@link DFAState} which is an accept state under

* assumes the predicates, if any, in the {@link DFAState} evaluate to
* {@code true}. If predicate evaluation is enabled, the final prediction of
* `true`. If predicate evaluation is enabled, the final prediction of
* the accept state will be determined by the result of predicate

@@ -27,0 +27,0 @@ * evaluation.

@@ -5,9 +5,9 @@ /*!

*/
import { Array2DHashSet } from '../misc/Array2DHashSet';
import { ATN } from '../atn/ATN';
import { ATNState } from '../atn/ATNState';
import { DecisionState } from '../atn/DecisionState';
import { DFAState } from './DFAState';
import { TokensStartState } from '../atn/TokensStartState';
import { Vocabulary } from '../Vocabulary';
import { Array2DHashSet } from "../misc/Array2DHashSet";
import { ATN } from "../atn/ATN";
import { ATNState } from "../atn/ATNState";
import { DecisionState } from "../atn/DecisionState";
import { DFAState } from "./DFAState";
import { TokensStartState } from "../atn/TokensStartState";
import { Vocabulary } from "../Vocabulary";
export declare class DFA {

@@ -33,4 +33,4 @@ /**

/**
* {@code true} if this DFA is for a precedence decision; otherwise,
* {@code false}. This is the backing field for {@link #isPrecedenceDfa}.
* `true` if this DFA is for a precedence decision; otherwise,
* `false`. This is the backing field for {@link #isPrecedenceDfa}.
*/

@@ -62,4 +62,4 @@ private precedenceDfa;

*
* @return {@code true} if this is a precedence DFA; otherwise,
* {@code false}.
* @returns `true` if this is a precedence DFA; otherwise,
* `false`.
* @see Parser.precedence

@@ -72,4 +72,4 @@ */

* @param precedence The current precedence.
* @return The start state corresponding to the specified precedence, or
* {@code null} if no start state exists for the specified precedence.
* @returns The start state corresponding to the specified precedence, or
* `undefined` if no start state exists for the specified precedence.
*

@@ -76,0 +76,0 @@ * @ if this is not a precedence DFA.

@@ -5,7 +5,7 @@ /*!

*/
import { ATN } from '../atn/ATN';
import { DFA } from './DFA';
import { DFAState } from './DFAState';
import { Recognizer } from '../Recognizer';
import { Vocabulary } from '../Vocabulary';
import { ATN } from "../atn/ATN";
import { DFA } from "./DFA";
import { DFAState } from "./DFAState";
import { Recognizer } from "../Recognizer";
import { Vocabulary } from "../Vocabulary";
/** A DFA walker that knows how to dump them to serialized strings. */

@@ -12,0 +12,0 @@ export declare class DFASerializer {

@@ -5,7 +5,7 @@ /*!

*/
import { AcceptStateInfo } from './AcceptStateInfo';
import { ATN } from '../atn/ATN';
import { ATNConfigSet } from '../atn/ATNConfigSet';
import { LexerActionExecutor } from '../atn/LexerActionExecutor';
import { SemanticContext } from '../atn/SemanticContext';
import { AcceptStateInfo } from "./AcceptStateInfo";
import { ATN } from "../atn/ATN";
import { ATNConfigSet } from "../atn/ATNConfigSet";
import { LexerActionExecutor } from "../atn/LexerActionExecutor";
import { SemanticContext } from "../atn/SemanticContext";
/** A DFA state represents a set of possible ATN configurations.

@@ -27,9 +27,9 @@ * As Aho, Sethi, Ullman p. 117 says "The DFA uses its state

*
* <p>I use a set of ATNConfig objects not simple states. An ATNConfig
* I use a set of ATNConfig objects not simple states. An ATNConfig
* is both a state (ala normal conversion) and a RuleContext describing
* the chain of rules (if any) followed to arrive at that state.</p>
* the chain of rules (if any) followed to arrive at that state.
*
* <p>A DFA state may have multiple references to a particular state,
* A DFA state may have multiple references to a particular state,
* but with different ATN contexts (with same or different alts)
* meaning that state was reached via a different set of rule invocations.</p>
* meaning that state was reached via a different set of rule invocations.
*/

@@ -39,3 +39,3 @@ export declare class DFAState {

configs: ATNConfigSet;
/** {@code edges.get(symbol)} points to target of symbol.
/** `edges.get(symbol)` points to target of symbol.
*/

@@ -77,10 +77,10 @@ private readonly edges;

*
* <p>Because the number of alternatives and number of ATN configurations are
* Because the number of alternatives and number of ATN configurations are
* finite, there is a finite number of DFA states that can be processed.
* This is necessary to show that the algorithm terminates.</p>
* This is necessary to show that the algorithm terminates.
*
* <p>Cannot test the DFA state numbers here because in
* Cannot test the DFA state numbers here because in
* {@link ParserATNSimulator#addDFAState} we need to know if any other state
* exists that has this exact set of ATN configurations. The
* {@link #stateNumber} is irrelevant.</p>
* {@link #stateNumber} is irrelevant.
*/

@@ -87,0 +87,0 @@ equals(o: any): boolean;

@@ -5,6 +5,6 @@ /*!

*/
export * from './AcceptStateInfo';
export * from './DFA';
export * from './DFASerializer';
export * from './DFAState';
export * from './LexerDFASerializer';
export * from "./AcceptStateInfo";
export * from "./DFA";
export * from "./DFASerializer";
export * from "./DFAState";
export * from "./LexerDFASerializer";

@@ -5,4 +5,4 @@ /*!

*/
import { DFA } from './DFA';
import { DFASerializer } from './DFASerializer';
import { DFA } from "./DFA";
import { DFASerializer } from "./DFASerializer";
export declare class LexerDFASerializer extends DFASerializer {

@@ -9,0 +9,0 @@ constructor(dfa: DFA);

@@ -5,11 +5,30 @@ /*!

*/
import { ATNConfigSet } from './atn/ATNConfigSet';
import { BitSet } from './misc/BitSet';
import { DFA } from './dfa/DFA';
import { Parser } from './Parser';
import { ParserErrorListener } from './ParserErrorListener';
import { RecognitionException } from './RecognitionException';
import { Recognizer } from './Recognizer';
import { SimulatorState } from './atn/SimulatorState';
import { Token } from './Token';
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { BitSet } from "./misc/BitSet";
import { DFA } from "./dfa/DFA";
import { Parser } from "./Parser";
import { ParserErrorListener } from "./ParserErrorListener";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
import { SimulatorState } from "./atn/SimulatorState";
import { Token } from "./Token";
/**
* This implementation of {@link ANTLRErrorListener} can be used to identify
* certain potential correctness and performance problems in grammars. "Reports"
* are made by calling {@link Parser#notifyErrorListeners} with the appropriate
* message.
*
* * **Ambiguities**: These are cases where more than one path through the
* grammar can match the input.
* * **Weak context sensitivity**: These are cases where full-context
* prediction resolved an SLL conflict to a unique alternative which equaled the
* minimum alternative of the SLL conflict.
* * **Strong (forced) context sensitivity**: These are cases where the
* full-context prediction resolved an SLL conflict to a unique alternative,
* *and* the minimum alternative of the SLL conflict was found to not be
* a truly viable alternative. Two-stage parsing cannot be used for inputs where
* this situation occurs.
*
* @author Sam Harwell
*/
export declare class DiagnosticErrorListener implements ParserErrorListener {

@@ -21,4 +40,4 @@ protected exactOnly: boolean;

*
* @param exactOnly {@code true} to report only exact ambiguities, otherwise
* {@code false} to report all ambiguities. Defaults to true.
* @param exactOnly `true` to report only exact ambiguities, otherwise
* `false` to report all ambiguities. Defaults to true.
*/

@@ -39,6 +58,6 @@ constructor(exactOnly?: boolean);

* @param configs The conflicting or ambiguous configuration set.
* @return Returns {@code reportedAlts} if it is not {@code null}, otherwise
* returns the set of alternatives represented in {@code configs}.
* @returns Returns `reportedAlts` if it is not `undefined`, otherwise
* returns the set of alternatives represented in `configs`.
*/
protected getConflictingAlts(reportedAlts: BitSet | undefined, configs: ATNConfigSet): BitSet;
}

@@ -5,4 +5,9 @@ /*!

*/
import { Parser } from './Parser';
import { Parser } from "./Parser";
import { RecognitionException } from "./RecognitionException";
/** A semantic predicate failed during validation. Validation of predicates
* occurs when normally parsing the alternative just like matching a token.
* Disambiguating predicate evaluation occurs when we test a predicate during
* prediction.
*/
export declare class FailedPredicateException extends RecognitionException {

@@ -9,0 +14,0 @@ private _ruleIndex;

@@ -5,9 +5,11 @@ /*!

*/
import { RecognitionException } from "./RecognitionException";
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
/** This signifies any kind of mismatched input exceptions such as
* when the current input does not match the expected token.
*/
import { RecognitionException } from "./RecognitionException";
import { Parser } from "./Parser";
export declare class InputMismatchException extends RecognitionException {
constructor(recognizer: Parser);
constructor(recognizer: Parser, state: number, context: ParserRuleContext);
}

@@ -5,3 +5,3 @@ /*!

*/
import { ParserRuleContext } from './ParserRuleContext';
import { ParserRuleContext } from "./ParserRuleContext";
/**

@@ -11,3 +11,2 @@ * This class extends {@link ParserRuleContext} by allowing the value of

*
* <p>
* {@link ParserRuleContext} does not include field storage for the rule index

@@ -18,3 +17,3 @@ * since the context classes created by the code generator override the

* parser, this class (with slightly more memory overhead per node) is used to
* provide equivalent functionality.</p>
* provide equivalent functionality.
*/

@@ -21,0 +20,0 @@ export declare class InterpreterRuleContext extends ParserRuleContext {

@@ -19,15 +19,13 @@ /*!

* A simple stream of symbols whose values are represented as integers. This
* interface provides <em>marked ranges</em> with support for a minimum level
* interface provides *marked ranges* with support for a minimum level
* of buffering necessary to implement arbitrary lookahead during prediction.
* For more information on marked ranges, see {@link #mark}.
*
* <p><strong>Initializing Methods:</strong> Some methods in this interface have
* **Initializing Methods:** Some methods in this interface have
* unspecified behavior if no call to an initializing method has occurred after
* the stream was constructed. The following is a list of initializing methods:</p>
* the stream was constructed. The following is a list of initializing methods:
*
* <ul>
* <li>{@link #LA}</li>
* <li>{@link #consume}</li>
* <li>{@link #size}</li>
* </ul>
* * {@link #LA}
* * {@link #consume}
* * {@link #size}
*/

@@ -39,10 +37,8 @@ export interface IntStream {

*
* <ul>
* <li><strong>Forward movement:</strong> The value of `index`
* before calling this method is less than the value of `index`
* after calling this method.</li>
* <li><strong>Ordered lookahead:</strong> The value of {@code LA(1)} before
* calling this method becomes the value of {@code LA(-1)} after calling
* this method.</li>
* </ul>
* * **Forward movement:** The value of `index`
* before calling this method is less than the value of `index`
* after calling this method.
* * **Ordered lookahead:** The value of `LA(1)` before
* calling this method becomes the value of `LA(-1)` after calling
* this method.
*

@@ -54,37 +50,35 @@ * Note that calling this method does not guarantee that `index` is

*
* @throws IllegalStateException if an attempt is made to consume the the
* end of the stream (i.e. if {@code LA(1)==}{@link #EOF EOF} before calling
* {@code consume}).
* @throws IllegalStateException if an attempt is made to consume the
* end of the stream (i.e. if `LA(1)==`{@link #EOF EOF} before calling
* `consume`).
*/
consume(): void;
/**
* Gets the value of the symbol at offset {@code i} from the current
* position. When {@code i==1}, this method returns the value of the current
* Gets the value of the symbol at offset `i` from the current
* position. When `i==1`, this method returns the value of the current
* symbol in the stream (which is the next symbol to be consumed). When
* {@code i==-1}, this method returns the value of the previously read
* `i==-1`, this method returns the value of the previously read
* symbol in the stream. It is not valid to call this method with
* {@code i==0}, but the specific behavior is unspecified because this
* `i==0`, but the specific behavior is unspecified because this
* method is frequently called from performance-critical code.
*
* <p>This method is guaranteed to succeed if any of the following are true:</p>
* This method is guaranteed to succeed if any of the following are true:
*
* <ul>
* <li>{@code i>0}</li>
* <li>{@code i==-1} and `index` returns a value greater
* than the value of `index` after the stream was constructed
* and {@code LA(1)} was called in that order. Specifying the current
* `index` relative to the index after the stream was created
* allows for filtering implementations that do not return every symbol
* from the underlying source. Specifying the call to {@code LA(1)}
* allows for lazily initialized streams.</li>
* <li>{@code LA(i)} refers to a symbol consumed within a marked region
* that has not yet been released.</li>
* </ul>
* * `i>0`
* * `i==-1` and `index` returns a value greater
* than the value of `index` after the stream was constructed
* and `LA(1)` was called in that order. Specifying the current
* `index` relative to the index after the stream was created
* allows for filtering implementations that do not return every symbol
* from the underlying source. Specifying the call to `LA(1)`
* allows for lazily initialized streams.
* * `LA(i)` refers to a symbol consumed within a marked region
* that has not yet been released.
*
* <p>If {@code i} represents a position at or beyond the end of the stream,
* this method returns {@link #EOF}.</p>
* If `i` represents a position at or beyond the end of the stream,
* this method returns {@link #EOF}.
*
* <p>The return value is unspecified if {@code i<0} and fewer than {@code -i}
* The return value is unspecified if `i<0` and fewer than `-i`
* calls to {@link #consume consume()} have occurred from the beginning of
* the stream before calling this method.</p>
* the stream before calling this method.
*

@@ -97,3 +91,3 @@ * @throws UnsupportedOperationException if the stream does not support

* A mark provides a guarantee that {@link #seek seek()} operations will be
* valid over a "marked range" extending from the index where {@code mark()}
* valid over a "marked range" extending from the index where `mark()`
* was called to the current `index`. This allows the use of

@@ -103,6 +97,6 @@ * streaming input sources by specifying the minimum buffering requirements

*
* <p>The returned mark is an opaque handle (type {@code int}) which is passed
* The returned mark is an opaque handle (type `int`) which is passed
* to {@link #release release()} when the guarantees provided by the marked
* range are no longer necessary. When calls to
* {@code mark()}/{@code release()} are nested, the marks must be released
* `mark()`/`release()` are nested, the marks must be released
* in reverse order of which they were obtained. Since marked regions are

@@ -112,16 +106,17 @@ * used during performance-critical sections of prediction, the specific

* a mark is released twice, or marks are not released in reverse order from
* which they were created).</p>
* which they were created).
*
* <p>The behavior of this method is unspecified if no call to an
* The behavior of this method is unspecified if no call to an
* {@link IntStream initializing method} has occurred after this stream was
* constructed.</p>
* constructed.
*
* <p>This method does not change the current position in the input stream.</p>
* This method does not change the current position in the input stream.
*
* <p>The following example shows the use of {@link #mark mark()},
* The following example shows the use of {@link #mark mark()},
* {@link #release release(mark)}, `index`, and
* {@link #seek seek(index)} as part of an operation to safely work within a
* marked region, then restore the stream position to its original value and
* release the mark.</p>
* <pre>
* release the mark.
*
* ```
* IntStream stream = ...;

@@ -139,5 +134,5 @@ * int index = -1;

* }
* </pre>
* ```
*
* @return An opaque marker which should be passed to
* @returns An opaque marker which should be passed to
* {@link #release release()} when the marked range is no longer required.

@@ -148,10 +143,10 @@ */

* This method releases a marked range created by a call to
* {@link #mark mark()}. Calls to {@code release()} must appear in the
* reverse order of the corresponding calls to {@code mark()}. If a mark is
* {@link #mark mark()}. Calls to `release()` must appear in the
* reverse order of the corresponding calls to `mark()`. If a mark is
* released twice, or if marks are not released in reverse order of the
* corresponding calls to {@code mark()}, the behavior is unspecified.
* corresponding calls to `mark()`, the behavior is unspecified.
*
* <p>For more information and an example, see {@link #mark}.</p>
* For more information and an example, see {@link #mark}.
*
* @param marker A marker returned by a call to {@code mark()}.
* @param marker A marker returned by a call to `mark()`.
* @see #mark

@@ -162,26 +157,24 @@ */

* Return the index into the stream of the input symbol referred to by
* {@code LA(1)}.
* `LA(1)`.
*
* <p>The behavior of this method is unspecified if no call to an
* The behavior of this method is unspecified if no call to an
* {@link IntStream initializing method} has occurred after this stream was
* constructed.</p>
* constructed.
*/
readonly index: number;
/**
* Set the input cursor to the position indicated by {@code index}. If the
* Set the input cursor to the position indicated by `index`. If the
* specified index lies past the end of the stream, the operation behaves as
* though {@code index} was the index of the EOF symbol. After this method
* though `index` was the index of the EOF symbol. After this method
* returns without throwing an exception, then at least one of the following
* will be true.
*
* <ul>
* <li>`index` will return the index of the first symbol
* appearing at or after the specified {@code index}. Specifically,
* implementations which filter their sources should automatically
* adjust {@code index} forward the minimum amount required for the
* operation to target a non-ignored symbol.</li>
* <li>{@code LA(1)} returns {@link #EOF}</li>
* </ul>
* * `index` will return the index of the first symbol
* appearing at or after the specified `index`. Specifically,
* implementations which filter their sources should automatically
* adjust `index` forward the minimum amount required for the
* operation to target a non-ignored symbol.
* * `LA(1)` returns {@link #EOF}
*
* This operation is guaranteed to not throw an exception if {@code index}
* This operation is guaranteed to not throw an exception if `index`
* lies within a marked region. For more information on marked regions, see

@@ -194,3 +187,3 @@ * {@link #mark}. The behavior of this method is unspecified if no call to

*
* @throws IllegalArgumentException if {@code index} is less than 0
* @throws IllegalArgumentException if `index` is less than 0
* @throws UnsupportedOperationException if the stream does not support

@@ -210,3 +203,3 @@ * seeking to the specified index

* Gets the name of the underlying symbol source. This method returns a
* non-null, non-empty string. If such a name is not known, this method
* non-undefined, non-empty string. If such a name is not known, this method
* returns {@link #UNKNOWN_SOURCE_NAME}.

@@ -213,0 +206,0 @@ */

@@ -5,11 +5,11 @@ /*!

*/
import { CharStream } from './CharStream';
import { IntegerStack } from './misc/IntegerStack';
import { LexerATNSimulator } from './atn/LexerATNSimulator';
import { LexerNoViableAltException } from './LexerNoViableAltException';
import { RecognitionException } from './RecognitionException';
import { Recognizer } from './Recognizer';
import { Token } from './Token';
import { TokenFactory } from './TokenFactory';
import { TokenSource } from './TokenSource';
import { CharStream } from "./CharStream";
import { IntegerStack } from "./misc/IntegerStack";
import { LexerATNSimulator } from "./atn/LexerATNSimulator";
import { LexerNoViableAltException } from "./LexerNoViableAltException";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
import { Token } from "./Token";
import { TokenFactory } from "./TokenFactory";
import { TokenSource } from "./TokenSource";
/** A lexer is recognizer that draws input symbols from a character stream.

@@ -40,3 +40,3 @@ * lexer grammars result in a subclass of this object. A Lexer object

* emissions, then set this to the last token to be matched or
* something nonnull so that the auto token emit mechanism will not
* something non-undefined so that the auto token emit mechanism will not
* emit another token.

@@ -78,3 +78,3 @@ */

* a lexer rule finishes with token set to SKIP_TOKEN. Recall that
* if token==null at end of any token rule, it creates one for you
* if token==undefined at end of any token rule, it creates one for you
* and emits it.

@@ -120,2 +120,3 @@ */

channel: number;
readonly abstract channelNames: string[];
readonly abstract modeNames: string[];

@@ -122,0 +123,0 @@ /** Return a list of all Token objects in input char stream.

@@ -5,6 +5,6 @@ /*!

*/
import { ATN } from './atn/ATN';
import { CharStream } from './CharStream';
import { Lexer } from './Lexer';
import { Vocabulary } from './Vocabulary';
import { ATN } from "./atn/ATN";
import { CharStream } from "./CharStream";
import { Lexer } from "./Lexer";
import { Vocabulary } from "./Vocabulary";
export declare class LexerInterpreter extends Lexer {

@@ -14,10 +14,12 @@ protected _grammarFileName: string;

protected _ruleNames: string[];
protected _channelNames: string[];
protected _modeNames: string[];
private _vocabulary;
constructor(grammarFileName: string, vocabulary: Vocabulary, modeNames: string[], ruleNames: string[], atn: ATN, input: CharStream);
constructor(grammarFileName: string, vocabulary: Vocabulary, ruleNames: string[], channelNames: string[], modeNames: string[], atn: ATN, input: CharStream);
readonly atn: ATN;
readonly grammarFileName: string;
readonly ruleNames: string[];
readonly channelNames: string[];
readonly modeNames: string[];
readonly vocabulary: Vocabulary;
}

@@ -5,6 +5,6 @@ /*!

*/
import { CharStream } from './CharStream';
import { Token } from './Token';
import { TokenFactory } from './TokenFactory';
import { TokenSource } from './TokenSource';
import { CharStream } from "./CharStream";
import { Token } from "./Token";
import { TokenFactory } from "./TokenFactory";
import { TokenSource } from "./TokenSource";
/**

@@ -14,5 +14,5 @@ * Provides an implementation of {@link TokenSource} as a wrapper around a list

*
* <p>If the final token in the list is an {@link Token#EOF} token, it will be used
* If the final token in the list is an {@link Token#EOF} token, it will be used
* as the EOF token for every call to {@link #nextToken} after the end of the
* list is reached. Otherwise, an EOF token will be created.</p>
* list is reached. Otherwise, an EOF token will be created.
*/

@@ -25,3 +25,3 @@ export declare class ListTokenSource implements TokenSource {

/**
* The name of the input source. If this value is {@code null}, a call to
* The name of the input source. If this value is `undefined`, a call to
* {@link #getSourceName} should return the source name used to create the

@@ -54,7 +54,7 @@ * the next token in {@link #tokens} (or the previous token if the end of

* @param sourceName The name of the {@link TokenSource}. If this value is
* {@code null}, {@link #getSourceName} will attempt to infer the name from
* `undefined`, {@link #getSourceName} will attempt to infer the name from
* the next {@link Token} (or the previous token if the end of the input has
* been reached).
*
* @exception NullPointerException if {@code tokens} is {@code null}
* @exception NullPointerException if `tokens` is `undefined`
*/

@@ -61,0 +61,0 @@ constructor(tokens: Token[], sourceName?: string);

@@ -5,4 +5,4 @@ /*!

*/
import { EqualityComparator } from './EqualityComparator';
import { JavaCollection, JavaMap, JavaSet } from './Stubs';
import { EqualityComparator } from "./EqualityComparator";
import { JavaMap } from "./Stubs";
export declare class Array2DHashMap<K, V> implements JavaMap<K, V> {

@@ -14,15 +14,9 @@ private backingStore;

containsKey(key: K): boolean;
containsValue(value: V): boolean;
entrySet(): JavaSet<JavaMap.Entry<K, V>>;
get(key: K): V | undefined;
readonly isEmpty: boolean;
keySet(): JavaSet<K>;
put(key: K, value: V): V | undefined;
putIfAbsent(key: K, value: V): V | undefined;
putAll<K2 extends K, V2 extends V>(m: JavaMap<K2, V2>): void;
remove(key: K): V | undefined;
readonly size: number;
values(): JavaCollection<V>;
hashCode(): number;
equals(o: any): boolean;
}

@@ -5,7 +5,7 @@ /*!

*/
import { EqualityComparator } from './EqualityComparator';
import { Collection, JavaIterator, JavaCollection, JavaSet } from './Stubs';
import { EqualityComparator } from "./EqualityComparator";
import { JavaCollection, JavaSet } from "./Stubs";
export declare class Array2DHashSet<T> implements JavaSet<T> {
protected comparator: EqualityComparator<T>;
protected buckets: (T[] | undefined)[];
protected buckets: Array<T[] | undefined>;
/** How many elements in set */

@@ -17,3 +17,3 @@ protected n: number;

/**
* Add {@code o} to set if not there; return existing value if already
* Add `o` to set if not there; return existing value if already
* there. This method performs the same operation as {@link #add} aside from

@@ -34,10 +34,6 @@ * the return value.

containsFast(obj: T): boolean;
iterator(): JavaIterator<T>;
toArray(a?: any[]): T[];
remove(o: any): boolean;
removeFast(obj: T): boolean;
[Symbol.iterator](): IterableIterator<T>;
toArray(): T[];
containsAll(collection: JavaCollection<T>): boolean;
addAll(c: Collection<T>): boolean;
retainAll(c: JavaCollection<T>): boolean;
removeAll(c: Collection<T>): boolean;
addAll(c: Iterable<T>): boolean;
clear(): void;

@@ -47,5 +43,5 @@ toString(): string;

/**
* Return {@code o} as an instance of the element type {@code T}. If
* {@code o} is non-null but known to not be an instance of {@code T}, this
* method returns {@code null}. The base implementation does not perform any
* Return `o` as an instance of the element type `T`. If
* `o` is non-undefined but known to not be an instance of `T`, this
* method returns `undefined`. The base implementation does not perform any
* type checks; override this method to provide strong type checks for the

@@ -57,13 +53,13 @@ * {@link #contains} and {@link #remove} methods to ensure the arguments to

* @param o the object to try and cast to the element type of the set
* @return {@code o} if it could be an instance of {@code T}, otherwise
* {@code null}.
* @returns `o` if it could be an instance of `T`, otherwise
* `undefined`.
*/
protected asElementType(o: any): T;
/**
* Return an array of {@code T[]} with length {@code capacity}.
* Return an array of `T[]` with length `capacity`.
*
* @param capacity the length of the array to return
* @return the newly constructed array
* @returns the newly constructed array
*/
protected createBuckets(capacity: number): (T[] | undefined)[];
protected createBuckets(capacity: number): Array<T[] | undefined>;
}

@@ -5,4 +5,4 @@ /*!

*/
import { EqualityComparator } from './EqualityComparator';
import { Equatable } from './Stubs';
import { EqualityComparator } from "./EqualityComparator";
import { Equatable } from "./Stubs";
/**

@@ -19,4 +19,4 @@ * This default implementation of {@link EqualityComparator} uses object equality

*
* <p>This implementation returns
* {@code obj.}{@link Object#hashCode hashCode()}.</p>
* This implementation returns
* `obj.`{@link Object#hashCode hashCode()}.
*/

@@ -27,9 +27,9 @@ hashCode(obj: Equatable[]): number;

*
* <p>This implementation relies on object equality. If both objects are
* {@code null}, this method returns {@code true}. Otherwise if only
* {@code a} is {@code null}, this method returns {@code false}. Otherwise,
* This implementation relies on object equality. If both objects are
* `undefined`, this method returns `true`. Otherwise if only
* `a` is `undefined`, this method returns `false`. Otherwise,
* this method returns the result of
* {@code a.}{@link Object#equals equals}{@code (b)}.</p>
* `a.`{@link Object#equals equals}`(b)`.
*/
equals(a: Equatable[], b: Equatable[]): boolean;
}

@@ -11,3 +11,3 @@ /*!

*
* @return index of the search key, if it is contained in the array; otherwise, (-(insertion point) - 1). The
* @returns index of the search key, if it is contained in the array; otherwise, (-(insertion point) - 1). The
* insertion point is defined as the point at which the key would be inserted into the array: the index of the first

@@ -18,3 +18,3 @@ * element greater than the key, or array.length if all elements in the array are less than the specified key. Note

function binarySearch(array: ArrayLike<number>, key: number, fromIndex?: number, toIndex?: number): number;
function toString<T>(array: ArrayLike<T>): string;
function toString<T>(array: Iterable<T>): string;
}

@@ -225,7 +225,9 @@ /*!

/**
* Compares this object against the specified object. The result is `true` if and only if the argument is not `null`
* and is a `Bitset` object that has exactly the same set of bits set to `true` as this bit set. That is, for every
* nonnegative index `k`,
* Compares this object against the specified object. The result is `true` if and only if the argument is not
* `undefined` and is a `Bitset` object that has exactly the same set of bits set to `true` as this bit set. That
* is, for every nonnegative index `k`,
*
* ((BitSet)obj).get(k) == this.get(k)
* ```
* ((BitSet)obj).get(k) == this.get(k)
* ```
*

@@ -232,0 +234,0 @@ * must be true. The current sizes of the two bit sets are not compared.

@@ -5,3 +5,3 @@ /*!

*/
import { EqualityComparator } from './EqualityComparator';
import { EqualityComparator } from "./EqualityComparator";
/**

@@ -18,4 +18,4 @@ * This default implementation of {@link EqualityComparator} uses object equality

*
* <p>This implementation returns
* {@code obj.}{@link Object#hashCode hashCode()}.</p>
* This implementation returns
* `obj.`{@link Object#hashCode hashCode()}.
*/

@@ -26,9 +26,9 @@ hashCode(obj: any): number;

*
* <p>This implementation relies on object equality. If both objects are
* {@code null}, this method returns {@code true}. Otherwise if only
* {@code a} is {@code null}, this method returns {@code false}. Otherwise,
* This implementation relies on object equality. If both objects are
* `undefined` or `null`, this method returns `true`. Otherwise if only
* `a` is `undefined` or `null`, this method returns `false`. Otherwise,
* this method returns the result of
* {@code a.}{@link Object#equals equals}{@code (b)}.</p>
* `a.`{@link Object#equals equals}`(b)`.
*/
equals(a: any, b: any): boolean;
}

@@ -7,3 +7,3 @@ /*!

* This interface provides an abstract concept of object equality independent of
* {@link Object#equals} (object equality) and the {@code ==} operator
* {@link Object#equals} (object equality) and the `==` operator
* (reference equality). It can be used to provide algorithm-specific unordered

@@ -19,3 +19,3 @@ * comparisons without requiring changes to the object itself.

* @param obj The object.
* @return The hash code for {@code obj}.
* @returns The hash code for `obj`.
*/

@@ -28,5 +28,5 @@ hashCode(obj: T): number;

* @param b The second object to compare.
* @return {@code true} if {@code a} equals {@code b}, otherwise {@code false}.
* @returns `true` if `a` equals `b`, otherwise `false`.
*/
equals(a: T, b: T): boolean;
}

@@ -5,20 +5,22 @@ /*!

*/
export * from './Array2DHashMap';
export * from './ArrayEqualityComparator';
export * from './Args';
export * from './Array2DHashSet';
export * from './Arrays';
export * from './BitSet';
export * from './DefaultEqualityComparator';
export * from './EqualityComparator';
export * from './IntegerList';
export * from './IntegerStack';
export * from './Interval';
export * from './IntervalSet';
export * from './IntSet';
export * from './MultiMap';
export * from './MurmurHash';
export * from './ObjectEqualityComparator';
export * from './ParseCancellationException';
export * from './Utils';
export * from './UUID';
export * from "./Array2DHashMap";
export * from "./ArrayEqualityComparator";
export * from "./Args";
export * from "./Array2DHashSet";
export * from "./Arrays";
export * from "./BitSet";
export * from "./Character";
export * from "./DefaultEqualityComparator";
export * from "./EqualityComparator";
export * from "./IntegerList";
export * from "./IntegerStack";
export * from "./InterpreterDataReader";
export * from "./Interval";
export * from "./IntervalSet";
export * from "./IntSet";
export * from "./MultiMap";
export * from "./MurmurHash";
export * from "./ObjectEqualityComparator";
export * from "./ParseCancellationException";
export * from "./Utils";
export * from "./UUID";

@@ -5,3 +5,3 @@ /*!

*/
import { JavaCollection } from './Stubs';
import { JavaCollection } from "./Stubs";
/**

@@ -30,17 +30,17 @@ *

* Compares the specified object with this list for equality. Returns
* {@code true} if and only if the specified object is also an {@link IntegerList},
* `true` if and only if the specified object is also an {@link IntegerList},
* both lists have the same size, and all corresponding pairs of elements in
* the two lists are equal. In other words, two lists are defined to be
* equal if they contain the same elements in the same order.
* <p>
*
* This implementation first checks if the specified object is this
* list. If so, it returns {@code true}; if not, it checks if the
* specified object is an {@link IntegerList}. If not, it returns {@code false};
* list. If so, it returns `true`; if not, it checks if the
* specified object is an {@link IntegerList}. If not, it returns `false`;
* if so, it checks the size of both lists. If the lists are not the same size,
* it returns {@code false}; otherwise it iterates over both lists, comparing
* corresponding pairs of elements. If any comparison returns {@code false},
* this method returns {@code false}.
* it returns `false`; otherwise it iterates over both lists, comparing
* corresponding pairs of elements. If any comparison returns `false`,
* this method returns `false`.
*
* @param o the object to be compared for equality with this list
* @return {@code true} if the specified object is equal to this list
* @returns `true` if the specified object is equal to this list
*/

@@ -51,7 +51,7 @@ equals(o: any): boolean;

*
* <p>This implementation uses exactly the code that is used to define the
* This implementation uses exactly the code that is used to define the
* list hash function in the documentation for the {@link List#hashCode}
* method.</p>
* method.
*
* @return the hash code value for this list
* @returns the hash code value for this list
*/

@@ -65,2 +65,9 @@ hashCode(): number;

private ensureCapacity(capacity);
/** Convert the list to a UTF-16 encoded char array. If all values are less
* than the 0xFFFF 16-bit code point limit then this is just a char array
* of 16-bit char as usual. For values in the supplementary range, encode
* them as two UTF-16 code units.
*/
toCharArray(): Uint16Array;
private charArraySize();
}

@@ -5,3 +5,3 @@ /*!

*/
import { IntegerList } from './IntegerList';
import { IntegerList } from "./IntegerList";
/**

@@ -8,0 +8,0 @@ *

@@ -5,3 +5,3 @@ /*!

*/
import { Equatable } from './Stubs';
import { Equatable } from "./Stubs";
/** An immutable inclusive interval a..b */

@@ -51,5 +51,5 @@ export declare class Interval implements Equatable {

intersection(other: Interval): Interval;
/** Return the interval with elements from {@code this} not in {@code other};
* {@code other} must not be totally enclosed (properly contained)
* within {@code this}, which would result in two disjoint intervals
/** Return the interval with elements from `this` not in `other`;
* `other` must not be totally enclosed (properly contained)
* within `this`, which would result in two disjoint intervals
* instead of the single one returned by this method.

@@ -56,0 +56,0 @@ */

@@ -5,6 +5,6 @@ /*!

*/
import { IntegerList } from './IntegerList';
import { Interval } from './Interval';
import { IntSet } from './IntSet';
import { Vocabulary } from '../Vocabulary';
import { IntegerList } from "./IntegerList";
import { Interval } from "./Interval";
import { IntSet } from "./IntSet";
import { Vocabulary } from "../Vocabulary";
/**

@@ -17,6 +17,5 @@ * This class implements the {@link IntSet} backed by a sorted array of

*
* <p>
* This class is able to represent sets containing any combination of values in
* the range {@link Integer#MIN_VALUE} to {@link Integer#MAX_VALUE}
* (inclusive).</p>
* (inclusive).
*/

@@ -56,3 +55,3 @@ export declare class IntervalSet implements IntSet {

* Compute the set difference between two interval sets. The specific
* operation is {@code left - right}.
* operation is `left - right`.
*/

@@ -67,16 +66,14 @@ static subtract(left: IntervalSet, right: IntervalSet): IntervalSet;

readonly isNil: boolean;
/** {@inheritDoc} */
getSingleElement(): number;
/**
* Returns the maximum value contained in the set.
* Returns the maximum value contained in the set if not isNil.
*
* @return the maximum value contained in the set. If the set is empty, this
* method returns {@link Token#INVALID_TYPE}.
* @return the maximum value contained in the set.
* @throws RangeError if set is empty
*/
readonly maxElement: number;
/**
* Returns the minimum value contained in the set.
* Returns the minimum value contained in the set if not isNil.
*
* @return the minimum value contained in the set. If the set is empty, this
* method returns {@link Token#INVALID_TYPE}.
* @return the minimum value contained in the set.
* @throws RangeError if set is empty
*/

@@ -98,3 +95,2 @@ readonly minElement: number;

toIntegerList(): IntegerList;
toList(): number[];
toSet(): Set<number>;

@@ -101,0 +97,0 @@ toArray(): number[];

@@ -21,7 +21,7 @@ /*!

* Modify the current {@link IntSet} object to contain all elements that are
* present in itself, the specified {@code set}, or both.
* present in itself, the specified `set`, or both.
*
* @param set The set to add to the current set. A {@code null} argument is
* @param set The set to add to the current set. An `undefined` argument is
* treated as though it were an empty set.
* @return {@code this} (to support chained calls)
* @returns `this` (to support chained calls)
*

@@ -33,7 +33,7 @@ * @exception IllegalStateException if the current set is read-only

* Return a new {@link IntSet} object containing all elements that are
* present in both the current set and the specified set {@code a}.
* present in both the current set and the specified set `a`.
*
* @param a The set to intersect with the current set.
* @return A new {@link IntSet} instance containing the intersection of the
* current set and {@code a}.
* @returns A new {@link IntSet} instance containing the intersection of the
* current set and `a`.
*/

@@ -43,14 +43,12 @@ and(a: IntSet): IntSet;

* Return a new {@link IntSet} object containing all elements that are
* present in {@code elements} but not present in the current set. The
* following expressions are equivalent for input non-null {@link IntSet}
* instances {@code x} and {@code y}.
* present in `elements` but not present in the current set. The
* following expressions are equivalent for input non-`undefined` {@link IntSet}
* instances `x` and `y`.
*
* <ul>
* <li>{@code x.complement(y)}</li>
* <li>{@code y.subtract(x)}</li>
* </ul>
* * `x.complement(y)`
* * `y.subtract(x)`
*
* @param elements The set to compare with the current set.
* @return A new {@link IntSet} instance containing the elements present in
* {@code elements} but not present in the current set.
* @returns A new {@link IntSet} instance containing the elements present in
* `elements` but not present in the current set.
*/

@@ -60,12 +58,11 @@ complement(elements: IntSet): IntSet;

* Return a new {@link IntSet} object containing all elements that are
* present in the current set, the specified set {@code a}, or both.
* present in the current set, the specified set `a`, or both.
*
* <p>
* This method is similar to {@link #addAll(IntSet)}, but returns a new
* {@link IntSet} instance instead of modifying the current set.</p>
* {@link IntSet} instance instead of modifying the current set.
*
* @param a The set to union with the current set. A {@code null} argument
* @param a The set to union with the current set. An `undefined` argument
* is treated as though it were an empty set.
* @return A new {@link IntSet} instance containing the union of the current
* set and {@code a}. The value {@code null} may be returned in place of an
* @returns A new {@link IntSet} instance containing the union of the current
* set and `a`. The value `undefined` may be returned in place of an
* empty result set.

@@ -76,16 +73,14 @@ */

* Return a new {@link IntSet} object containing all elements that are
* present in the current set but not present in the input set {@code a}.
* The following expressions are equivalent for input non-null
* {@link IntSet} instances {@code x} and {@code y}.
* present in the current set but not present in the input set `a`.
* The following expressions are equivalent for input non-`undefined`
* {@link IntSet} instances `x` and `y`.
*
* <ul>
* <li>{@code y.subtract(x)}</li>
* <li>{@code x.complement(y)}</li>
* </ul>
* * `y.subtract(x)`
* * `x.complement(y)`
*
* @param a The set to compare with the current set. A {@code null}
* @param a The set to compare with the current set. A `undefined`
* argument is treated as though it were an empty set.
* @return A new {@link IntSet} instance containing the elements present in
* {@code elements} but not present in the current set. The value
* {@code null} may be returned in place of an empty result set.
* @returns A new {@link IntSet} instance containing the elements present in
* `elements` but not present in the current set. The value
* `undefined` may be returned in place of an empty result set.
*/

@@ -96,3 +91,3 @@ subtract(a: IntSet): IntSet;

*
* @return the total number of elements represented by the current set,
* @returns the total number of elements represented by the current set,
* regardless of the manner in which the elements are stored.

@@ -102,6 +97,6 @@ */

/**
* Returns {@code true} if this set contains no elements.
* Returns `true` if this set contains no elements.
*
* @return {@code true} if the current set contains no elements; otherwise,
* {@code false}.
* @returns `true` if the current set contains no elements; otherwise,
* `false`.
*/

@@ -114,14 +109,6 @@ readonly isNil: boolean;

/**
* Returns the single value contained in the set, if {@link #size} is 1;
* otherwise, returns {@link Token#INVALID_TYPE}.
* Returns `true` if the set contains the specified element.
*
* @return the single value contained in the set, if {@link #size} is 1;
* otherwise, returns {@link Token#INVALID_TYPE}.
*/
getSingleElement(): number;
/**
* Returns {@code true} if the set contains the specified element.
*
* @param el The element to check for.
* @return {@code true} if the set contains {@code el}; otherwise {@code false}.
* @returns `true` if the set contains `el`; otherwise `false`.
*/

@@ -139,9 +126,9 @@ contains(el: number): boolean;

/**
* Return a list containing the elements represented by the current set. The
* list is returned in ascending numerical order.
* Return an array containing the elements represented by the current set. The
* array is returned in ascending numerical order.
*
* @return A list containing all element present in the current set, sorted
* @returns An array containing all element present in the current set, sorted
* in ascending numerical order.
*/
toList(): number[];
toArray(): number[];
/**

@@ -148,0 +135,0 @@ * {@inheritDoc}

@@ -8,3 +8,3 @@ /*!

map(key: K, value: V): void;
getPairs(): [K, V][];
getPairs(): Array<[K, V]>;
}

@@ -5,3 +5,3 @@ /*!

*/
import { Equatable } from './Stubs';
import { Equatable } from "./Stubs";
/**

@@ -13,18 +13,18 @@ *

/**
* Initialize the hash using the specified {@code seed}.
* Initialize the hash using the specified `seed`.
*
* @param seed the seed (optional)
* @return the intermediate hash value
* @returns the intermediate hash value
*/
function initialize(seed?: number): number;
/**
* Update the intermediate hash value for the next input {@code value}.
* Update the intermediate hash value for the next input `value`.
*
* @param hash the intermediate hash value
* @param value the value to add to the current hash
* @return the updated intermediate hash value
* @returns the updated intermediate hash value
*/
function update(hash: number, value: number | string | Equatable | null | undefined): number;
/**
* Apply the final computation steps to the intermediate value {@code hash}
* Apply the final computation steps to the intermediate value `hash`
* to form the final result of the MurmurHash 3 hash function.

@@ -34,3 +34,3 @@ *

* @param numberOfWords the number of integer values added to the hash
* @return the final hash result
* @returns the final hash result
*/

@@ -45,5 +45,5 @@ function finish(hash: number, numberOfWords: number): number;

* @param seed the seed for the MurmurHash algorithm
* @return the hash code of the data
* @returns the hash code of the data
*/
function hashCode<T extends number | string | Equatable>(data: Iterable<T>, seed?: number): number;
}

@@ -5,4 +5,4 @@ /*!

*/
import { EqualityComparator } from './EqualityComparator';
import { Equatable } from './Stubs';
import { EqualityComparator } from "./EqualityComparator";
import { Equatable } from "./Stubs";
/**

@@ -19,4 +19,4 @@ * This default implementation of {@link EqualityComparator} uses object equality

*
* <p>This implementation returns
* {@code obj.}{@link Object#hashCode hashCode()}.</p>
* This implementation returns
* `obj.`{@link Object#hashCode hashCode()}.
*/

@@ -27,9 +27,9 @@ hashCode(obj: Equatable | null | undefined): number;

*
* <p>This implementation relies on object equality. If both objects are
* {@code null}, this method returns {@code true}. Otherwise if only
* {@code a} is {@code null}, this method returns {@code false}. Otherwise,
* This implementation relies on object equality. If both objects are
* `undefined` or `null`, this method returns `true`. Otherwise if only
* `a` is `undefined` or `null`, this method returns `false`. Otherwise,
* this method returns the result of
* {@code a.}{@link Object#equals equals}{@code (b)}.</p>
* `a.`{@link Object#equals equals}`(b)`.
*/
equals(a: Equatable | null | undefined, b: Equatable | null | undefined): boolean;
}

@@ -12,26 +12,11 @@ /*!

}
export interface JavaIterator<E> {
hasNext(): boolean;
next(): E;
remove(): void;
}
export interface JavaIterable<E> {
iterator(): JavaIterator<E>;
}
export interface JavaCollection<E> extends JavaIterable<E> {
export interface JavaCollection<E> extends Iterable<E>, Equatable {
add(e: E): boolean;
addAll(collection: Collection<E>): boolean;
addAll(collection: Iterable<E>): boolean;
clear(): void;
contains(o: any): boolean;
containsAll(collection: Collection<any>): boolean;
equals(o: any): boolean;
hashCode(): number;
containsAll(collection: Iterable<any>): boolean;
readonly isEmpty: boolean;
iterator(): JavaIterator<E>;
remove(o: any): boolean;
removeAll(collection: Collection<any>): boolean;
retainAll(collection: Collection<any>): boolean;
readonly size: number;
toArray(): any[];
toArray(a: E[]): E[];
toArray(): E[];
}

@@ -43,27 +28,6 @@ export interface JavaSet<E> extends JavaCollection<E> {

containsKey(key: K): boolean;
containsValue(value: V): boolean;
entrySet(): JavaSet<JavaMap.Entry<K, V>>;
get(key: K): V | undefined;
readonly isEmpty: boolean;
keySet(): JavaSet<K>;
put(key: K, value: V): V | undefined;
putAll<K2 extends K, V2 extends V>(m: JavaMap<K2, V2>): void;
remove(key: K): V | undefined;
readonly size: number;
values(): JavaCollection<V>;
}
export declare namespace JavaMap {
interface Entry<K, V> extends Equatable {
getKey(): K;
getValue(): V;
setValue(value: V): V;
}
}
/**
* Collection is a hybrid type can accept either JavaCollection or JavaScript Iterable
*/
export declare type Collection<T> = JavaCollection<T> | Iterable<T>;
/**
* This adapter function allows Collection<T> arguments to be used in JavaScript for...of loops
*/
export declare function asIterable<T>(collection: Collection<T>): Iterable<T>;

@@ -5,5 +5,7 @@ /*!

*/
import { Equatable } from "./Stubs";
import { IntegerList } from "./IntegerList";
export declare function escapeWhitespace(s: string, escapeSpaces: boolean): string;
export declare function join(collection: Iterable<any>, separator: string): string;
export declare function equals(x: any, y: any): boolean;
export declare function equals(x: Equatable | undefined, y: Equatable | undefined): boolean;
/** Convert array of strings to string&rarr;index map. Useful for

@@ -14,1 +16,2 @@ * converting rulenames to name&rarr;ruleindex map.

export declare function toCharArray(str: string): Uint16Array;
export declare function toCharArray(data: IntegerList): Uint16Array;

@@ -5,3 +5,3 @@ /*!

*/
import { Equatable } from './Stubs';
import { Equatable } from "./Stubs";
export declare class UUID implements Equatable {

@@ -8,0 +8,0 @@ private readonly data;

@@ -5,2 +5,9 @@ /*!

*/
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
import { Token } from "./Token";
import { TokenStream } from "./TokenStream";
/** Indicates that the parser could not decide which of two or more paths

@@ -11,9 +18,2 @@ * to take based upon the remaining input. It tracks the starting token

*/
import { ATNConfigSet } from './atn/ATNConfigSet';
import { Parser } from './Parser';
import { ParserRuleContext } from './ParserRuleContext';
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from './Recognizer';
import { Token } from "./Token";
import { TokenStream } from "./TokenStream";
export declare class NoViableAltException extends RecognitionException {

@@ -20,0 +20,0 @@ /** Which configurations did we try at input.index that couldn't match input.LT(1)? */

@@ -5,19 +5,21 @@ /*!

*/
import { ANTLRErrorStrategy } from './ANTLRErrorStrategy';
import { ATN } from './atn/ATN';
import { IntegerStack } from './misc/IntegerStack';
import { IntervalSet } from './misc/IntervalSet';
import { Lexer } from './Lexer';
import { ParseInfo } from './atn/ParseInfo';
import { ParserATNSimulator } from './atn/ParserATNSimulator';
import { ParserErrorListener } from './ParserErrorListener';
import { ParserRuleContext } from './ParserRuleContext';
import { ParseTreeListener } from './tree/ParseTreeListener';
import { ParseTreePattern } from './tree/pattern/ParseTreePattern';
import { RecognitionException } from './RecognitionException';
import { Recognizer } from './Recognizer';
import { RuleContext } from './RuleContext';
import { Token } from './Token';
import { TokenFactory } from './TokenFactory';
import { TokenStream } from './TokenStream';
import { ANTLRErrorStrategy } from "./ANTLRErrorStrategy";
import { ATN } from "./atn/ATN";
import { ErrorNode } from "./tree/ErrorNode";
import { IntegerStack } from "./misc/IntegerStack";
import { IntervalSet } from "./misc/IntervalSet";
import { Lexer } from "./Lexer";
import { ParseInfo } from "./atn/ParseInfo";
import { ParserATNSimulator } from "./atn/ParserATNSimulator";
import { ParserErrorListener } from "./ParserErrorListener";
import { ParserRuleContext } from "./ParserRuleContext";
import { ParseTreeListener } from "./tree/ParseTreeListener";
import { ParseTreePattern } from "./tree/pattern/ParseTreePattern";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
import { RuleContext } from "./RuleContext";
import { TerminalNode } from "./tree/TerminalNode";
import { Token } from "./Token";
import { TokenFactory } from "./TokenFactory";
import { TokenStream } from "./TokenStream";
/** This is all the parsing support code essentially; most of it is error recovery stuff. */

@@ -51,3 +53,3 @@ export declare abstract class Parser extends Recognizer<Token, ParserATNSimulator> {

*
* This is always non-null during the parsing process.
* This is always non-undefined during the parsing process.
*/

@@ -63,5 +65,5 @@ protected _ctx: ParserRuleContext;

/**
* When {@link #setTrace}{@code (true)} is called, a reference to the
* When {@link #setTrace}`(true)` is called, a reference to the
* {@link TraceListener} is stored here so it can be easily removed in a
* later call to {@link #setTrace}{@code (false)}. The listener itself is
* later call to {@link #setTrace}`(false)`. The listener itself is
* implemented as a parser listener so this field is not directly used by

@@ -90,17 +92,18 @@ * other parser methods.

/**
* Match current input symbol against {@code ttype}. If the symbol type
* Match current input symbol against `ttype`. If the symbol type
* matches, {@link ANTLRErrorStrategy#reportMatch} and {@link #consume} are
* called to complete the match process.
*
* <p>If the symbol type does not match,
* If the symbol type does not match,
* {@link ANTLRErrorStrategy#recoverInline} is called on the current error
* strategy to attempt recovery. If {@link #getBuildParseTree} is
* {@code true} and the token index of the symbol returned by
* `true` and the token index of the symbol returned by
* {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
* the parse tree by calling {@link ParserRuleContext#addErrorNode}.</p>
* the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)}.
*
* @param ttype the token type to match
* @return the matched symbol
* @returns the matched symbol
* @ if the current input symbol did not match
* {@code ttype} and the error strategy could not recover from the
* `ttype` and the error strategy could not recover from the
* mismatched symbol

@@ -114,10 +117,11 @@ */

*
* <p>If the symbol type does not match,
* If the symbol type does not match,
* {@link ANTLRErrorStrategy#recoverInline} is called on the current error
* strategy to attempt recovery. If {@link #getBuildParseTree} is
* {@code true} and the token index of the symbol returned by
* `true` and the token index of the symbol returned by
* {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
* the parse tree by calling {@link ParserRuleContext#addErrorNode}.</p>
* the parse tree by calling {@link Parser#createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)}.
*
* @return the matched symbol
* @returns the matched symbol
* @ if the current input symbol did not match

@@ -130,6 +134,6 @@ * a wildcard and the error strategy could not recover from the mismatched

* Gets whether or not a complete parse tree will be constructed while
* parsing. This property is {@code true} for a newly constructed parser.
* parsing. This property is `true` for a newly constructed parser.
*
* @return {@code true} if a complete parse tree will be constructed while
* parsing, otherwise {@code false}
* @returns `true` if a complete parse tree will be constructed while
* parsing, otherwise `false`
*/

@@ -142,10 +146,10 @@ /**

*
* <p>Note that if we are not building parse trees, rule contexts only point
* Note that if we are not building parse trees, rule contexts only point
* upwards. When a rule exits, it returns the context but that gets garbage
* collected if nobody holds a reference. It points upwards but nobody
* points at it.</p>
* points at it.
*
* <p>When we build parse trees, we are adding all of these contexts to
* When we build parse trees, we are adding all of these contexts to
* {@link ParserRuleContext#children} list. Contexts are then not candidates
* for garbage collection.</p>
* for garbage collection.
*/

@@ -155,5 +159,5 @@ buildParseTree: boolean;

/**
* Registers {@code listener} to receive events during the parsing process.
* Registers `listener` to receive events during the parsing process.
*
* <p>To support output-preserving grammar transformations (including but not
* To support output-preserving grammar transformations (including but not
* limited to left-recursion removal, automated left-factoring, and

@@ -165,27 +169,25 @@ * optimized code generation), calls to listener methods during the parse

* during the parse than after the parser. In addition, calls to certain
* rule entry methods may be omitted.</p>
* rule entry methods may be omitted.
*
* <p>With the following specific exceptions, calls to listener events are
* <em>deterministic</em>, i.e. for identical input the calls to listener
* methods will be the same.</p>
* With the following specific exceptions, calls to listener events are
* *deterministic*, i.e. for identical input the calls to listener
* methods will be the same.
*
* <ul>
* <li>Alterations to the grammar used to generate code may change the
* behavior of the listener calls.</li>
* <li>Alterations to the command line options passed to ANTLR 4 when
* generating the parser may change the behavior of the listener calls.</li>
* <li>Changing the version of the ANTLR Tool used to generate the parser
* may change the behavior of the listener calls.</li>
* </ul>
* * Alterations to the grammar used to generate code may change the
* behavior of the listener calls.
* * Alterations to the command line options passed to ANTLR 4 when
* generating the parser may change the behavior of the listener calls.
* * Changing the version of the ANTLR Tool used to generate the parser
* may change the behavior of the listener calls.
*
* @param listener the listener to add
*
* @ if {@code} listener is {@code null}
* @throws {@link TypeError} if `listener` is `undefined`
*/
addParseListener(listener: ParseTreeListener): void;
/**
* Remove {@code listener} from the list of parse listeners.
* Remove `listener` from the list of parse listeners.
*
* <p>If {@code listener} is {@code null} or has not been added as a parse
* listener, this method does nothing.</p>
* If `listener` is `undefined` or has not been added as a parse
* listener, this method does nothing.
*

@@ -235,10 +237,10 @@ * @see #addParseListener

*
* <pre>
* ParseTree t = parser.expr();
* ParseTreePattern p = parser.compileParseTreePattern("&lt;ID&gt;+0", MyParser.RULE_expr);
* ParseTreeMatch m = p.match(t);
* String id = m.get("ID");
* </pre>
* ```
* let t: ParseTree = parser.expr();
* let p: ParseTreePattern = await parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
* let m: ParseTreeMatch = p.match(t);
* let id: string = m.get("ID");
* ```
*/
compileParseTreePattern(pattern: string, patternRuleIndex: number): ParseTreePattern;
compileParseTreePattern(pattern: string, patternRuleIndex: number): Promise<ParseTreePattern>;
/**

@@ -248,3 +250,3 @@ * The same as {@link #compileParseTreePattern(String, int)} but specify a

*/
compileParseTreePattern(pattern: string, patternRuleIndex: number, lexer?: Lexer): ParseTreePattern;
compileParseTreePattern(pattern: string, patternRuleIndex: number, lexer?: Lexer): Promise<ParseTreePattern>;
errorHandler: ANTLRErrorStrategy;

@@ -262,17 +264,17 @@ /** Set the token stream and reset the parser. */

*
* <p>E.g., given the following input with {@code A} being the current
* lookahead symbol, this function moves the cursor to {@code B} and returns
* {@code A}.</p>
* E.g., given the following input with `A` being the current
* lookahead symbol, this function moves the cursor to `B` and returns
* `A`.
*
* <pre>
* A B
* ^
* </pre>
* ```
* A B
* ^
* ```
*
* If the parser is not in error recovery mode, the consumed symbol is added
* to the parse tree using {@link ParserRuleContext#addChild(Token)}, and
* to the parse tree using {@link ParserRuleContext#addChild(TerminalNode)}, and
* {@link ParseTreeListener#visitTerminal} is called on any parse listeners.
* If the parser <em>is</em> in error recovery mode, the consumed symbol is
* added to the parse tree using
* {@link ParserRuleContext#addErrorNode(Token)}, and
* If the parser *is* in error recovery mode, the consumed symbol is
* added to the parse tree using {@link #createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)} and
* {@link ParseTreeListener#visitErrorNode} is called on any parse

@@ -282,2 +284,16 @@ * listeners.

consume(): Token;
/**
* How to create a token leaf node associated with a parent.
* Typically, the terminal node to create is not a function of the parent.
*
* @since 4.7
*/
createTerminalNode(parent: ParserRuleContext, t: Token): TerminalNode;
/**
* How to create an error node, given a token, associated with a parent.
* Typically, the error node to create is not a function of the parent.
*
* @since 4.7
*/
createErrorNode(parent: ParserRuleContext, t: Token): ErrorNode;
protected addContextToParseTree(): void;

@@ -295,3 +311,3 @@ /**

*
* @return The precedence level for the top-most precedence rule, or -1 if
* @returns The precedence level for the top-most precedence rule, or -1 if
* the parser context is not nested within a precedence rule.

@@ -312,3 +328,3 @@ */

/**
* Checks whether or not {@code symbol} can follow the current state in the
* Checks whether or not `symbol` can follow the current state in the
* ATN. The behavior of this method is equivalent to the following, but is

@@ -318,9 +334,9 @@ * implemented such that the complete context-sensitive follow set does not

*
* <pre>
* ```
* return getExpectedTokens().contains(symbol);
* </pre>
* ```
*
* @param symbol the symbol type to check
* @return {@code true} if {@code symbol} can follow the current state in
* the ATN, otherwise {@code false}.
* @returns `true` if `symbol` can follow the current state in
* the ATN, otherwise `false`.
*/

@@ -338,3 +354,3 @@ isExpectedToken(symbol: number): boolean;

getExpectedTokensWithinCurrentRule(): IntervalSet;
/** Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found. */
/** Get a rule's index (i.e., `RULE_ruleName` field) or -1 if not found. */
getRuleIndex(ruleName: string): number;

@@ -355,7 +371,7 @@ readonly ruleContext: ParserRuleContext;

readonly sourceName: string;
readonly parseInfo: ParseInfo | undefined;
readonly parseInfo: Promise<ParseInfo | undefined>;
/**
* @since 4.3
*/
setProfile(profile: boolean): void;
setProfile(profile: boolean): Promise<void>;
/**

@@ -362,0 +378,0 @@ * Gets whether a {@link TraceListener} is registered as a parse listener

@@ -5,9 +5,9 @@ /*!

*/
import { ANTLRErrorListener } from './ANTLRErrorListener';
import { ATNConfigSet } from './atn/ATNConfigSet';
import { BitSet } from './misc/BitSet';
import { DFA } from './dfa/DFA';
import { Parser } from './Parser';
import { SimulatorState } from './atn/SimulatorState';
import { Token } from './Token';
import { ANTLRErrorListener } from "./ANTLRErrorListener";
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { BitSet } from "./misc/BitSet";
import { DFA } from "./dfa/DFA";
import { Parser } from "./Parser";
import { SimulatorState } from "./atn/SimulatorState";
import { Token } from "./Token";
/** How to emit recognition errors for parsers.

@@ -20,24 +20,23 @@ */

*
* <p>Each full-context prediction which does not result in a syntax error
* Each full-context prediction which does not result in a syntax error
* will call either {@link #reportContextSensitivity} or
* {@link #reportAmbiguity}.</p>
* {@link #reportAmbiguity}.
*
* <p>
* When {@code ambigAlts} is not null, it contains the set of potentially
* When `ambigAlts` is not `undefined`, it contains the set of potentially
* viable alternatives identified by the prediction algorithm. When
* {@code ambigAlts} is null, use
* `ambigAlts` is `undefined`, use
* {@link ATNConfigSet#getRepresentedAlternatives} to obtain the represented
* alternatives from the {@code configs} argument.</p>
* alternatives from the `configs` argument.
*
* <p>When {@code exact} is {@code true}, <em>all</em> of the potentially
* When `exact` is `true`, *all* of the potentially
* viable alternatives are truly viable, i.e. this is reporting an exact
* ambiguity. When {@code exact} is {@code false}, <em>at least two</em> of
* ambiguity. When `exact` is `false`, *at least two* of
* the potentially viable alternatives are viable for the current input, but
* the prediction algorithm terminated as soon as it determined that at
* least the <em>minimum</em> potentially viable alternative is truly
* viable.</p>
* least the *minimum* potentially viable alternative is truly
* viable.
*
* <p>When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction
* When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction
* mode is used, the parser is required to identify exact ambiguities so
* {@code exact} will always be {@code true}.</p>
* `exact` will always be `true`.
*

@@ -48,8 +47,8 @@ * @param recognizer the parser instance

* @param stopIndex the input input where the ambiguity was identified
* @param exact {@code true} if the ambiguity is exactly known, otherwise
* {@code false}. This is always {@code true} when
* @param exact `true` if the ambiguity is exactly known, otherwise
* `false`. This is always `true` when
* {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
* @param ambigAlts the potentially ambiguous alternatives, or {@code null}
* @param ambigAlts the potentially ambiguous alternatives, or `undefined`
* to indicate that the potentially ambiguous alternatives are the complete
* set of represented alternatives in {@code configs}
* set of represented alternatives in `configs`
* @param configs the ATN configuration set where the ambiguity was

@@ -63,6 +62,6 @@ * identified

*
* <p>If one or more configurations in {@code configs} contains a semantic
* If one or more configurations in `configs` contains a semantic
* predicate, the predicates are evaluated before this method is called. The
* subset of alternatives which are still viable after predicates are
* evaluated is reported in {@code conflictingAlts}.</p>
* evaluated is reported in `conflictingAlts`.
*

@@ -74,4 +73,4 @@ * @param recognizer the parser instance

* @param conflictingAlts The specific conflicting alternatives. If this is
* {@code null}, the conflicting alternatives are all alternatives
* represented in {@code configs}.
* `undefined`, the conflicting alternatives are all alternatives
* represented in `configs`.
* @param conflictState the simulator state when the SLL conflict was

@@ -85,7 +84,7 @@ * detected

*
* <p>Each full-context prediction which does not result in a syntax error
* Each full-context prediction which does not result in a syntax error
* will call either {@link #reportContextSensitivity} or
* {@link #reportAmbiguity}.</p>
* {@link #reportAmbiguity}.
*
* <p>For prediction implementations that only evaluate full-context
* For prediction implementations that only evaluate full-context
* predictions when an SLL conflict is found (including the default

@@ -96,14 +95,14 @@ * {@link ParserATNSimulator} implementation), this method reports cases

* indicate a problem, and it may appear even in completely unambiguous
* grammars.</p>
* grammars.
*
* <p>{@code configs} may have more than one represented alternative if the
* `configs` may have more than one represented alternative if the
* full-context prediction algorithm does not evaluate predicates before
* beginning the full-context prediction. In all cases, the final prediction
* is passed as the {@code prediction} argument.</p>
* is passed as the `prediction` argument.
*
* <p>Note that the definition of "context sensitivity" in this method
* Note that the definition of "context sensitivity" in this method
* differs from the concept in {@link DecisionInfo#contextSensitivities}.
* This method reports all instances where an SLL conflict occurred but LL
* parsing produced a unique result, whether or not that unique result
* matches the minimum alternative in the SLL conflicting set.</p>
* matches the minimum alternative in the SLL conflicting set.
*

@@ -110,0 +109,0 @@ * @param recognizer the parser instance

@@ -5,13 +5,13 @@ /*!

*/
import { ATN } from './atn/ATN';
import { ATNState } from './atn/ATNState';
import { BitSet } from './misc/BitSet';
import { DecisionState } from './atn/DecisionState';
import { InterpreterRuleContext } from './InterpreterRuleContext';
import { Parser } from './Parser';
import { ParserRuleContext } from './ParserRuleContext';
import { RecognitionException } from './RecognitionException';
import { Token } from './Token';
import { TokenStream } from './TokenStream';
import { Vocabulary } from './Vocabulary';
import { ATN } from "./atn/ATN";
import { ATNState } from "./atn/ATNState";
import { BitSet } from "./misc/BitSet";
import { DecisionState } from "./atn/DecisionState";
import { InterpreterRuleContext } from "./InterpreterRuleContext";
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
import { RecognitionException } from "./RecognitionException";
import { Token } from "./Token";
import { TokenStream } from "./TokenStream";
import { Vocabulary } from "./Vocabulary";
/** A parser simulator that mimics what ANTLR's generated

@@ -52,3 +52,3 @@ * parser code does. A ParserATNSimulator is used to make

*/
protected readonly _parentContextStack: [ParserRuleContext, number][];
protected readonly _parentContextStack: Array<[ParserRuleContext, number]>;
/** We need a map from (decision,inputIndex)->forced alt for computing ambiguous

@@ -55,0 +55,0 @@ * parse trees. For now, we allow exactly one override.

@@ -67,3 +67,3 @@ /*!

* The exception that forced this rule to return. If the rule successfully
* completed, this is {@code null}.
* completed, this is `undefined`.
*/

@@ -76,3 +76,4 @@ exception?: RecognitionException;

* COPY a ctx (I'm deliberately not using copy constructor) to avoid
* confusion with creating node with parent. Does not copy children.
* confusion with creating node with parent. Does not copy children
* (except error leaves).
*

@@ -90,5 +91,40 @@ * This is used in the generated parser code to flip a generic XContext

exitRule(listener: ParseTreeListener): void;
/** Add a parse tree node to this as a child. Works for
* internal and leaf nodes. Does not set parent link;
* other add methods must do that. Other addChild methods
* call this.
*
* We cannot set the parent pointer of the incoming node
* because the existing interfaces do not have a setParent()
* method and I don't want to break backward compatibility for this.
*
* @since 4.7
*/
addAnyChild<T extends ParseTree>(t: T): T;
/** Add a token leaf node child and force its parent to be this node. */
addChild(t: TerminalNode): void;
addChild(ruleInvocation: RuleContext): void;
/**
* Add a child to this node based upon matchedToken. It
* creates a TerminalNodeImpl rather than using
* {@link Parser#createTerminalNode(ParserRuleContext, Token)}. I'm leaving this
* in for compatibility but the parser doesn't use this anymore.
*
* @deprecated Use another overload instead.
*/
addChild(matchedToken: Token): TerminalNode;
/** Add an error node child and force its parent to be this node.
*
* @since 4.7
*/
addErrorNode(errorNode: ErrorNode): ErrorNode;
/**
* Add a child to this node based upon badToken. It
* creates a ErrorNode rather than using
* {@link Parser#createErrorNode(ParserRuleContext, Token)}. I'm leaving this
* in for compatibility but the parser doesn't use this anymore.
*
* @deprecated Use another overload instead.
*/
addErrorNode(badToken: Token): ErrorNode;
/** Used by enterOuterAlt to toss out a RuleContext previously added as

@@ -99,3 +135,2 @@ * we entered a rule. If we have # label, we will need to remove

removeLastChild(): void;
addErrorNode(badToken: Token): ErrorNode;
readonly parent: ParserRuleContext | undefined;

@@ -102,0 +137,0 @@ getChild(i: number): ParseTree;

@@ -5,3 +5,3 @@ /*!

*/
import { ANTLRErrorListener } from './ANTLRErrorListener';
import { ANTLRErrorListener } from "./ANTLRErrorListener";
import { RecognitionException } from "./RecognitionException";

@@ -16,7 +16,7 @@ import { Recognizer } from "./Recognizer";

*/
export declare class ProxyErrorListener<Symbol, TListener extends ANTLRErrorListener<Symbol>> implements ANTLRErrorListener<Symbol> {
export declare class ProxyErrorListener<TSymbol, TListener extends ANTLRErrorListener<TSymbol>> implements ANTLRErrorListener<TSymbol> {
private delegates;
constructor(delegates: TListener[]);
protected getDelegates(): ReadonlyArray<TListener>;
syntaxError<T extends Symbol>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined): void;
syntaxError<T extends TSymbol>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined): void;
}

@@ -5,10 +5,10 @@ /*!

*/
import { ATNConfigSet } from './atn/ATNConfigSet';
import { BitSet } from './misc/BitSet';
import { DFA } from './dfa/DFA';
import { Parser } from './Parser';
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { BitSet } from "./misc/BitSet";
import { DFA } from "./dfa/DFA";
import { Parser } from "./Parser";
import { ProxyErrorListener } from "./ProxyErrorListener";
import { ParserErrorListener } from "./ParserErrorListener";
import { SimulatorState } from './atn/SimulatorState';
import { Token } from './Token';
import { SimulatorState } from "./atn/SimulatorState";
import { Token } from "./Token";
/**

@@ -19,5 +19,5 @@ * @author Sam Harwell

constructor(delegates: ParserErrorListener[]);
reportAmbiguity(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, exact: boolean, ambigAlts: BitSet, configs: ATNConfigSet): void;
reportAttemptingFullContext(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, conflictingAlts: BitSet, conflictState: SimulatorState): void;
reportAmbiguity(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, exact: boolean, ambigAlts: BitSet | undefined, configs: ATNConfigSet): void;
reportAttemptingFullContext(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, conflictingAlts: BitSet | undefined, conflictState: SimulatorState): void;
reportContextSensitivity(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, prediction: number, acceptState: SimulatorState): void;
}

@@ -7,6 +7,6 @@ /*!

import { IntervalSet } from "./misc/IntervalSet";
import { IntStream } from './IntStream';
import { IntStream } from "./IntStream";
import { Lexer } from "./Lexer";
import { ParserRuleContext } from "./ParserRuleContext";
import { Recognizer } from './Recognizer';
import { Recognizer } from "./Recognizer";
import { RuleContext } from "./RuleContext";

@@ -23,4 +23,4 @@ import { Token } from "./Token";

private _recognizer?;
private _ctx?;
protected _input?: IntStream;
private ctx?;
private input?;
/**

@@ -31,3 +31,3 @@ * The current {@link Token} when an error occurred. Since not all streams

*/
private _offendingToken?;
private offendingToken?;
private _offendingState;

@@ -44,3 +44,3 @@ constructor(lexer: Lexer | undefined, input: CharStream);

*
* <p>If the state number is not known, this method returns -1.</p>
* If the state number is not known, this method returns -1.
*/

@@ -53,7 +53,7 @@ readonly offendingState: number;

*
* <p>If the set of expected tokens is not known and could not be computed,
* this method returns {@code null}.</p>
* If the set of expected tokens is not known and could not be computed,
* this method returns `undefined`.
*
* @return The set of token types that could potentially follow the current
* state in the ATN, or {@code null} if the information is not available.
* @returns The set of token types that could potentially follow the current
* state in the ATN, or `undefined` if the information is not available.
*/

@@ -64,6 +64,6 @@ readonly expectedTokens: IntervalSet | undefined;

*
* <p>If the context is not available, this method returns {@code null}.</p>
* If the context is not available, this method returns `undefined`.
*
* @return The {@link RuleContext} at the time this exception was thrown.
* If the context is not available, this method returns {@code null}.
* @returns The {@link RuleContext} at the time this exception was thrown.
* If the context is not available, this method returns `undefined`.
*/

@@ -75,6 +75,6 @@ readonly context: RuleContext | undefined;

*
* <p>If the input stream is not available, this method returns {@code null}.</p>
* If the input stream is not available, this method returns `undefined`.
*
* @return The input stream which is the symbol source for the recognizer
* where this exception was thrown, or {@code null} if the stream is not
* @returns The input stream which is the symbol source for the recognizer
* where this exception was thrown, or `undefined` if the stream is not
* available.

@@ -84,9 +84,9 @@ */

getOffendingToken(recognizer?: Recognizer<Token, any>): Token | undefined;
protected setOffendingToken<Symbol extends Token>(recognizer: Recognizer<Symbol, any>, offendingToken?: Symbol): void;
protected setOffendingToken<TSymbol extends Token>(recognizer: Recognizer<TSymbol, any>, offendingToken?: TSymbol): void;
/**
* Gets the {@link Recognizer} where this exception occurred.
*
* <p>If the recognizer is not available, this method returns {@code null}.</p>
* If the recognizer is not available, this method returns `undefined`.
*
* @return The recognizer where this exception occurred, or {@code null} if
* @returns The recognizer where this exception occurred, or `undefined` if
* the recognizer is not available.

@@ -93,0 +93,0 @@ */

@@ -13,3 +13,3 @@ /*!

import { Vocabulary } from "./Vocabulary";
export declare abstract class Recognizer<Symbol, ATNInterpreter extends ATNSimulator> {
export declare abstract class Recognizer<TSymbol, ATNInterpreter extends ATNSimulator> {
static readonly EOF: number;

@@ -25,3 +25,3 @@ private static tokenTypeMapCache;

*
* @return A {@link Vocabulary} instance providing information about the
* @returns A {@link Vocabulary} instance providing information about the
* vocabulary used by the grammar.

@@ -33,3 +33,3 @@ */

*
* <p>Used for XPath and tree pattern compilation.</p>
* Used for XPath and tree pattern compilation.
*/

@@ -40,3 +40,3 @@ getTokenTypeMap(): ReadonlyMap<string, number>;

*
* <p>Used for XPath and tree pattern compilation.</p>
* Used for XPath and tree pattern compilation.
*/

@@ -49,4 +49,4 @@ getRuleIndexMap(): ReadonlyMap<string, number>;

*
* <p>For interpreters, we don't know their serialized ATN despite having
* created the interpreter from it.</p>
* For interpreters, we don't know their serialized ATN despite having
* created the interpreter from it.
*/

@@ -61,3 +61,3 @@ readonly serializedATN: string;

*
* @return The {@link ATN} used by the recognizer for prediction.
* @returns The {@link ATN} used by the recognizer for prediction.
*/

@@ -68,3 +68,3 @@ readonly atn: ATN;

*
* @return The ATN interpreter used by the recognizer for prediction.
* @returns The ATN interpreter used by the recognizer for prediction.
*/

@@ -83,13 +83,13 @@ /**

*/
readonly parseInfo: ParseInfo | undefined;
readonly parseInfo: Promise<ParseInfo | undefined>;
/** What is the error header, normally line/character position information? */
getErrorHeader(e: RecognitionException): string;
/**
* @exception NullPointerException if {@code listener} is {@code null}.
* @exception NullPointerException if `listener` is `undefined`.
*/
addErrorListener(listener: ANTLRErrorListener<Symbol>): void;
removeErrorListener(listener: ANTLRErrorListener<Symbol>): void;
addErrorListener(listener: ANTLRErrorListener<TSymbol>): void;
removeErrorListener(listener: ANTLRErrorListener<TSymbol>): void;
removeErrorListeners(): void;
getErrorListeners(): ANTLRErrorListener<Symbol>[];
getErrorListenerDispatch(): ANTLRErrorListener<Symbol>;
getErrorListeners(): Array<ANTLRErrorListener<TSymbol>>;
getErrorListenerDispatch(): ANTLRErrorListener<TSymbol>;
sempred(_localctx: RuleContext | undefined, ruleIndex: number, actionIndex: number): boolean;

@@ -96,0 +96,0 @@ precpred(localctx: RuleContext | undefined, precedence: number): boolean;

@@ -5,4 +5,4 @@ /*!

*/
import { Parser } from './Parser';
import { Recognizer } from './Recognizer';
import { Parser } from "./Parser";
import { Recognizer } from "./Recognizer";
import { RuleNode } from "./tree/RuleNode";

@@ -12,2 +12,52 @@ import { ParseTree } from "./tree/ParseTree";

import { ParseTreeVisitor } from "./tree/ParseTreeVisitor";
/** A rule context is a record of a single rule invocation.
*
* We form a stack of these context objects using the parent
* pointer. A parent pointer of `undefined` indicates that the current
* context is the bottom of the stack. The ParserRuleContext subclass
* as a children list so that we can turn this data structure into a
* tree.
*
* The root node always has a `undefined` pointer and invokingState of -1.
*
* Upon entry to parsing, the first invoked rule function creates a
* context object (a subclass specialized for that rule such as
* SContext) and makes it the root of a parse tree, recorded by field
* Parser._ctx.
*
* public final SContext s() throws RecognitionException {
* SContext _localctx = new SContext(_ctx, state); <-- create new node
* enterRule(_localctx, 0, RULE_s); <-- push it
* ...
* exitRule(); <-- pop back to _localctx
* return _localctx;
* }
*
* A subsequent rule invocation of r from the start rule s pushes a
* new context object for r whose parent points at s and use invoking
* state is the state with r emanating as edge label.
*
* The invokingState fields from a context object to the root
* together form a stack of rule indication states where the root
* (bottom of the stack) has a -1 sentinel value. If we invoke start
* symbol s then call r1, which calls r2, the would look like
* this:
*
* SContext[-1] <- root node (bottom of the stack)
* R1Context[p] <- p in rule s called r1
* R2Context[q] <- q in rule r1 called r2
*
* So the top of the stack, _ctx, represents a call to the current
* rule and it holds the return address from another rule that invoke
* to this rule. To invoke a rule, we must always have a current context.
*
* The parent contexts are useful for computing lookahead sets and
* getting error information.
*
* These objects are used during parsing and prediction.
* For the special case of parsers, we use the subclass
* ParserRuleContext.
*
* @see ParserRuleContext
*/
export declare class RuleContext extends RuleNode {

@@ -27,6 +77,8 @@ _parent: RuleContext | undefined;

readonly parent: RuleContext | undefined;
/** @since 4.7. {@see ParseTree#setParent} comment */
setParent(parent: RuleContext): void;
readonly payload: RuleContext;
/** Return the combined text of all child nodes. This method only considers
* tokens which have been added to the parse tree.
* <p>
*
* Since tokens on hidden channels (e.g. whitespace or comments) are not

@@ -33,0 +85,0 @@ * added to the parse trees, they will not appear in the output of this

@@ -5,3 +5,3 @@ /*!

*/
import { ParserRuleContext } from './ParserRuleContext';
import { ParserRuleContext } from "./ParserRuleContext";
/** A handy class for use with

@@ -8,0 +8,0 @@ *

@@ -5,4 +5,4 @@ /*!

*/
import { Dependents } from './Dependents';
import { Parser } from './Parser';
import { Dependents } from "./Dependents";
import { Parser } from "./Parser";
/**

@@ -16,3 +16,3 @@ * Declares a dependency upon a grammar rule, along with a set of zero or more dependent rules.

*/
export declare function RuleDependency(dependency: DependencySpecification): (target: Object, propertyKey: PropertyKey, propertyDescriptor: PropertyDescriptor) => void;
export declare function RuleDependency(dependency: DependencySpecification): (target: object, propertyKey: PropertyKey, propertyDescriptor: PropertyDescriptor) => void;
export interface DependencySpecification {

@@ -19,0 +19,0 @@ readonly recognizer: {

@@ -5,4 +5,4 @@ /*!

*/
import { Parser } from './Parser';
import { ParserRuleContext } from './ParserRuleContext';
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
/**

@@ -12,2 +12,2 @@ *

*/
export declare function RuleVersion(version: number): (target: Parser, propertyKey: PropertyKey, propertyDescriptor: TypedPropertyDescriptor<(...args: any[]) => ParserRuleContext>) => void;
export declare function RuleVersion(version: number): <T extends ParserRuleContext>(target: Parser, propertyKey: PropertyKey, propertyDescriptor: TypedPropertyDescriptor<(...args: any[]) => T>) => void;

@@ -5,4 +5,4 @@ /*!

*/
import { CharStream } from './CharStream';
import { TokenSource } from './TokenSource';
import { CharStream } from "./CharStream";
import { TokenSource } from "./TokenSource";
/** A token has properties: text, type, line, character position in the line

@@ -77,6 +77,5 @@ * (so we can ignore tabs), token channel, index, and source from which

*
* <p>
* The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are
* assigned to the predefined channels {@link #DEFAULT_CHANNEL} and
* {@link #HIDDEN_CHANNEL}.</p>
* {@link #HIDDEN_CHANNEL}.
*

@@ -83,0 +82,0 @@ * @see `Token.channel`

@@ -5,12 +5,12 @@ /*!

*/
import { CharStream } from './CharStream';
import { Token } from './Token';
import { TokenSource } from './TokenSource';
import { CharStream } from "./CharStream";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
/** The default mechanism for creating tokens. It's used by default in Lexer and
* the error handling strategy (to create missing tokens). Notifying the parser
* of a new factory means that it notifies it's token source and error strategy.
* of a new factory means that it notifies its token source and error strategy.
*/
export interface TokenFactory {
/** This is the method used to create tokens in the lexer and in the
* error handling strategy. If text!=null, than the start and stop positions
* error handling strategy. If text!=undefined, than the start and stop positions
* are wiped to -1 in the text override is set in the CommonToken.

@@ -17,0 +17,0 @@ */

@@ -5,5 +5,5 @@ /*!

*/
import { CharStream } from './CharStream';
import { Token } from './Token';
import { TokenFactory } from './TokenFactory';
import { CharStream } from "./CharStream";
import { Token } from "./Token";
import { TokenFactory } from "./TokenFactory";
/**

@@ -15,3 +15,3 @@ * A source of tokens must provide a sequence of tokens via {@link #nextToken()}

*
* <p>Errors from the lexer are never passed to the parser. Either you want to keep
* Errors from the lexer are never passed to the parser. Either you want to keep
* going or you do not upon token recognition error. If you do not want to

@@ -23,3 +23,3 @@ * continue lexing then you do not want to continue parsing. Just throw an

* token. Keep lexing until you get a valid one. Just report errors and keep
* going, looking for a valid token.</p>
* going, looking for a valid token.
*/

@@ -38,3 +38,3 @@ export interface TokenSource {

*
* @return The line number for the current position in the input stream, or
* @returns The line number for the current position in the input stream, or
* 0 if the current token source does not track line numbers.

@@ -47,3 +47,3 @@ */

*
* @return The line number for the current position in the input stream, or
* @returns The line number for the current position in the input stream, or
* -1 if the current token source does not track character positions.

@@ -56,4 +56,4 @@ */

*
* @return The {@link CharStream} associated with the current position in
* the input, or {@code null} if no input stream is available for the token
* @returns The {@link CharStream} associated with the current position in
* the input, or `undefined` if no input stream is available for the token
* source.

@@ -64,3 +64,3 @@ */

* Gets the name of the underlying input source. This method returns a
* non-null, non-empty string. If such a name is not known, this method
* non-undefined, non-empty string. If such a name is not known, this method
* returns {@link IntStream#UNKNOWN_SOURCE_NAME}.

@@ -67,0 +67,0 @@ */

@@ -5,7 +5,7 @@ /*!

*/
import { Interval } from './misc/Interval';
import { IntStream } from './IntStream';
import { RuleContext } from './RuleContext';
import { Token } from './Token';
import { TokenSource } from './TokenSource';
import { Interval } from "./misc/Interval";
import { IntStream } from "./IntStream";
import { RuleContext } from "./RuleContext";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
/**

@@ -18,3 +18,3 @@ * An {@link IntStream} whose symbols are {@link Token} instances.

* post-conditions as `IntStream.LA`. In addition, when the preconditions of this method are met, the return value
* is non-null and the value of `LT(k).type === LA(k)`.
* is non-undefined and the value of `LT(k).type === LA(k)`.
*

@@ -30,3 +30,3 @@ * A `RangeError` is thrown if `k<0` and fewer than `-k` calls to `consume()` have occurred from the beginning of

* post-conditions as `IntStream.LA`. In addition, when the preconditions of this method are met, the return value
* is non-null and the value of `tryLT(k).type === LA(k)`.
* is non-undefined and the value of `tryLT(k).type === LA(k)`.
*

@@ -40,14 +40,14 @@ * The return value is `undefined` if `k<0` and fewer than `-k` calls to `consume()` have occurred from the

/**
* Gets the {@link Token} at the specified {@code index} in the stream. When
* the preconditions of this method are met, the return value is non-null.
* Gets the {@link Token} at the specified `index` in the stream. When
* the preconditions of this method are met, the return value is non-undefined.
*
* <p>The preconditions for this method are the same as the preconditions of
* {@link IntStream#seek}. If the behavior of {@code seek(index)} is
* unspecified for the current state and given {@code index}, then the
* behavior of this method is also unspecified.</p>
* The preconditions for this method are the same as the preconditions of
* {@link IntStream#seek}. If the behavior of `seek(index)` is
* unspecified for the current state and given `index`, then the
* behavior of this method is also unspecified.
*
* <p>The symbol referred to by {@code index} differs from {@code seek()} only
* in the case of filtering streams where {@code index} lies before the end
* of the stream. Unlike {@code seek()}, this method does not adjust
* {@code index} to point to a non-ignored symbol.</p>
* The symbol referred to by `index` differs from `seek()` only
* in the case of filtering streams where `index` lies before the end
* of the stream. Unlike `seek()`, this method does not adjust
* `index` to point to a non-ignored symbol.
*

@@ -65,3 +65,3 @@ * @throws IllegalArgumentException if {code index} is less than 0

/**
* Return the text of all tokens within the specified {@code interval}. This
* Return the text of all tokens within the specified `interval`. This
* method behaves like the following code (including potential exceptions

@@ -71,16 +71,16 @@ * for violating preconditions of {@link #get}, but may be optimized by the

*
* <pre>
* ```
* TokenStream stream = ...;
* String text = "";
* for (int i = interval.a; i &lt;= interval.b; i++) {
* for (int i = interval.a; i <= interval.b; i++) {
* text += stream.get(i).text;
* }
* </pre>
* ```
*
* @param interval The interval of tokens within this stream to get text
* for.
* @return The text of all tokens within the specified interval in this
* @returns The text of all tokens within the specified interval in this
* stream.
*
* @throws NullPointerException if {@code interval} is {@code null}
* @throws NullPointerException if `interval` is `undefined`
*/

@@ -94,8 +94,8 @@ getText(interval: Interval): string;

*
* <pre>
* ```
* TokenStream stream = ...;
* String text = stream.getText(new Interval(0, stream.size));
* </pre>
* ```
*
* @return The text of all tokens in the stream.
* @returns The text of all tokens in the stream.
*/

@@ -109,40 +109,40 @@ getText(): string;

*
* <p>If {@code ctx.sourceInterval} does not return a valid interval of
* tokens provided by this stream, the behavior is unspecified.</p>
* If `ctx.sourceInterval` does not return a valid interval of
* tokens provided by this stream, the behavior is unspecified.
*
* <pre>
* ```
* TokenStream stream = ...;
* String text = stream.getText(ctx.sourceInterval);
* </pre>
* ```
*
* @param ctx The context providing the source interval of tokens to get
* text for.
* @return The text of all tokens within the source interval of {@code ctx}.
* @returns The text of all tokens within the source interval of `ctx`.
*/
getText(ctx: RuleContext): string;
/**
* Return the text of all tokens in this stream between {@code start} and
* {@code stop} (inclusive).
* Return the text of all tokens in this stream between `start` and
* `stop` (inclusive).
*
* <p>If the specified {@code start} or {@code stop} token was not provided by
* this stream, or if the {@code stop} occurred before the {@code start}
* token, the behavior is unspecified.</p>
* If the specified `start` or `stop` token was not provided by
* this stream, or if the `stop` occurred before the `start`}
* token, the behavior is unspecified.
*
* <p>For streams which ensure that the `Token.tokenIndex` method is
* For streams which ensure that the `Token.tokenIndex` method is
* accurate for all of its provided tokens, this method behaves like the
* following code. Other streams may implement this method in other ways
* provided the behavior is consistent with this at a high level.</p>
* provided the behavior is consistent with this at a high level.
*
* <pre>
* ```
* TokenStream stream = ...;
* String text = "";
* for (int i = start.tokenIndex; i &lt;= stop.tokenIndex; i++) {
* for (int i = start.tokenIndex; i <= stop.tokenIndex; i++) {
* text += stream.get(i).text;
* }
* </pre>
* ```
*
* @param start The first token in the interval to get text for.
* @param stop The last token in the interval to get text for (inclusive).
* @return The text of all tokens lying between the specified {@code start}
* and {@code stop} tokens.
* @returns The text of all tokens lying between the specified `start`
* and `stop` tokens.
*

@@ -149,0 +149,0 @@ * @throws UnsupportedOperationException if this stream does not support

@@ -5,5 +5,5 @@ /*!

*/
import { Interval } from './misc/Interval';
import { Token } from './Token';
import { TokenStream } from './TokenStream';
import { Interval } from "./misc/Interval";
import { Token } from "./Token";
import { TokenStream } from "./TokenStream";
/**

@@ -13,3 +13,2 @@ * Useful for rewriting out a buffered input token stream after doing some

*
* <p>
* You can insert stuff, replace, and delete chunks. Note that the operations

@@ -23,29 +22,25 @@ * are done lazily--only if you convert the buffer to a {@link String} with

* buffer. This is like having multiple Turing machine instruction streams
* (programs) operating on a single input tape. :)</p>
* (programs) operating on a single input tape. :)
*
* <p>
* This rewriter makes no modifications to the token stream. It does not ask the
* stream to fill itself up nor does it advance the input cursor. The token
* stream `TokenStream.index` will return the same value before and
* after any {@link #getText()} call.</p>
* after any {@link #getText()} call.
*
* <p>
* The rewriter only works on tokens that you have in the buffer and ignores the
* current input cursor. If you are buffering tokens on-demand, calling
* {@link #getText()} halfway through the input will only do rewrites for those
* tokens in the first half of the file.</p>
* tokens in the first half of the file.
*
* <p>
* Since the operations are done lazily at {@link #getText}-time, operations do
* not screw up the token index values. That is, an insert operation at token
* index {@code i} does not change the index values for tokens
* {@code i}+1..n-1.</p>
* index `i` does not change the index values for tokens
* `i`+1..n-1.
*
* <p>
* Because operations never actually alter the buffer, you may always get the
* original token stream back without undoing anything. Since the instructions
* are queued up, you can easily simulate transactions and roll back any changes
* if there is an error just by removing instructions. For example,</p>
* if there is an error just by removing instructions. For example,
*
* <pre>
* ```
* CharStream input = new ANTLRFileStream("input");

@@ -57,8 +52,7 @@ * TLexer lex = new TLexer(input);

* parser.startRule();
* </pre>
* ```
*
* <p>
* Then in the rules, you can execute (assuming rewriter is visible):</p>
* Then in the rules, you can execute (assuming rewriter is visible):
*
* <pre>
* ```
* Token t,u;

@@ -69,11 +63,10 @@ * ...

* System.out.println(rewriter.getText());
* </pre>
* ```
*
* <p>
* You can also have multiple "instruction streams" and get multiple rewrites
* from a single pass over the input. Just name the instruction streams and use
* that name again when printing the buffer. This could be useful for generating
* a C file and also its header file--all from the same buffer:</p>
* a C file and also its header file--all from the same buffer:
*
* <pre>
* ```
* rewriter.insertAfter("pass1", t, "text to put after t");}

@@ -83,7 +76,6 @@ * rewriter.insertAfter("pass2", u, "text after u");}

* System.out.println(rewriter.getText("pass2"));
* </pre>
* ```
*
* <p>
* If you don't use named rewrite streams, a "default" stream is used as the
* first example shows.</p>
* first example shows.
*/

@@ -114,16 +106,16 @@ export declare class TokenStreamRewriter {

deleteProgram(programName: string): void;
insertAfter(t: Token, text: any): void;
insertAfter(index: number, text: any): void;
insertAfter(t: Token, text: any, programName: string): void;
insertAfter(index: number, text: any, programName: string): void;
insertBefore(t: Token, text: any): void;
insertBefore(index: number, text: any): void;
insertBefore(t: Token, text: any, programName: string): void;
insertBefore(index: number, text: any, programName: string): void;
replaceSingle(index: number, text: any): void;
replaceSingle(indexT: Token, text: any): void;
replace(from: number, to: number, text: any): void;
replace(from: Token, to: Token, text: any): void;
replace(from: number, to: number, text: any, programName: string): void;
replace(from: Token, to: Token, text: any, programName: string): void;
insertAfter(t: Token, text: {}): void;
insertAfter(index: number, text: {}): void;
insertAfter(t: Token, text: {}, programName: string): void;
insertAfter(index: number, text: {}, programName: string): void;
insertBefore(t: Token, text: {}): void;
insertBefore(index: number, text: {}): void;
insertBefore(t: Token, text: {}, programName: string): void;
insertBefore(index: number, text: {}, programName: string): void;
replaceSingle(index: number, text: {}): void;
replaceSingle(indexT: Token, text: {}): void;
replace(from: number, to: number, text: {}): void;
replace(from: Token, to: Token, text: {}): void;
replace(from: number, to: number, text: {}, programName: string): void;
replace(from: Token, to: Token, text: {}, programName: string): void;
delete(index: number): void;

@@ -173,7 +165,7 @@ delete(from: number, to: number): void;

*
* Delete special case of replace (text==null):
* Delete special case of replace (text==undefined):
* D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
*
* I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
* we're not deleting i)
* we're not deleting i)
* I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping

@@ -190,5 +182,5 @@ * R.x-y.v I.i.u | i in x-y ERROR

* 1. wipe out any insertions before op within that range.
* 2. Drop any replace op before that is contained completely within
* that range.
* 3. Throw exception upon boundary overlap with any previous replace.
* 2. Drop any replace op before that is contained completely within
* that range.
* 3. Throw exception upon boundary overlap with any previous replace.
*

@@ -199,6 +191,6 @@ * Then we can deal with inserts:

* 2. for any prior replace with same left boundary, combine this
* insert with replace and delete this replace.
* insert with replace and delete this replace.
* 3. throw exception if index in same range as previous replace
*
* Don't actually delete; make op null in list. Easier to walk list.
* Don't actually delete; make op undefined in list. Easier to walk list.
* Later we can throw as we add to index &rarr; op map.

@@ -213,6 +205,6 @@ *

*/
protected reduceToSingleOperationPerIndex(rewrites: (RewriteOperation | undefined)[]): Map<number, RewriteOperation>;
protected catOpText(a: any, b: any): string;
protected reduceToSingleOperationPerIndex(rewrites: Array<RewriteOperation | undefined>): Map<number, RewriteOperation>;
protected catOpText(a: {}, b: {}): string;
/** Get all operations before an index of a particular kind */
protected getKindOfOps<T extends RewriteOperation>(rewrites: (RewriteOperation | undefined)[], kind: {
protected getKindOfOps<T extends RewriteOperation>(rewrites: Array<RewriteOperation | undefined>, kind: {
new (...args: any[]): T;

@@ -227,5 +219,5 @@ }, before: number): T[];

index: number;
text: any;
text: {};
constructor(tokens: TokenStream, index: number);
constructor(tokens: TokenStream, index: number, text: any);
constructor(tokens: TokenStream, index: number, text: {});
/** Execute the rewrite operation by possibly adding to the buffer.

@@ -232,0 +224,0 @@ * Return the index of the next token to operate on.

@@ -5,7 +5,7 @@ /*!

*/
import { ErrorNode } from './ErrorNode';
import { ParseTree } from './ParseTree';
import { ParseTreeVisitor } from './ParseTreeVisitor';
import { RuleNode } from './RuleNode';
import { TerminalNode } from './TerminalNode';
import { ErrorNode } from "./ErrorNode";
import { ParseTree } from "./ParseTree";
import { ParseTreeVisitor } from "./ParseTreeVisitor";
import { RuleNode } from "./RuleNode";
import { TerminalNode } from "./TerminalNode";
export declare abstract class AbstractParseTreeVisitor<Result> implements ParseTreeVisitor<Result> {

@@ -15,4 +15,4 @@ /**

*
* <p>The default implementation calls {@link ParseTree#accept} on the
* specified tree.</p>
* The default implementation calls {@link ParseTree#accept} on the
* specified tree.
*/

@@ -23,13 +23,13 @@ visit(tree: ParseTree): Result;

*
* <p>The default implementation initializes the aggregate result to
* The default implementation initializes the aggregate result to
* {@link #defaultResult defaultResult()}. Before visiting each child, it
* calls {@link #shouldVisitNextChild shouldVisitNextChild}; if the result
* is {@code false} no more children are visited and the current aggregate
* is `false` no more children are visited and the current aggregate
* result is returned. After visiting a child, the aggregate result is
* updated by calling {@link #aggregateResult aggregateResult} with the
* previous aggregate result and the result of visiting the child.</p>
* previous aggregate result and the result of visiting the child.
*
* <p>The default implementation is not safe for use in visitors that modify
* The default implementation is not safe for use in visitors that modify
* the tree structure. Visitors that modify the tree should override this
* method to behave properly in respect to the specific algorithm in use.</p>
* method to behave properly in respect to the specific algorithm in use.
*/

@@ -40,4 +40,4 @@ visitChildren(node: RuleNode): Result;

*
* <p>The default implementation returns the result of
* {@link #defaultResult defaultResult}.</p>
* The default implementation returns the result of
* {@link #defaultResult defaultResult}.
*/

@@ -48,4 +48,4 @@ visitTerminal(node: TerminalNode): Result;

*
* <p>The default implementation returns the result of
* {@link #defaultResult defaultResult}.</p>
* The default implementation returns the result of
* {@link #defaultResult defaultResult}.
*/

@@ -60,3 +60,3 @@ visitErrorNode(node: ErrorNode): Result;

*
* @return The default value returned by visitor methods.
* @returns The default value returned by visitor methods.
*/

@@ -67,12 +67,12 @@ protected abstract defaultResult(): Result;

* either all children are visited or {@link #shouldVisitNextChild} returns
* {@code false}, the aggregate value is returned as the result of
* `false`, the aggregate value is returned as the result of
* {@link #visitChildren}.
*
* <p>The default implementation returns {@code nextResult}, meaning
* The default implementation returns `nextResult`, meaning
* {@link #visitChildren} will return the result of the last child visited
* (or return the initial value if the node has no children).</p>
* (or return the initial value if the node has no children).
*
* @param aggregate The previous aggregate value. In the default
* implementation, the aggregate value is initialized to
* {@link #defaultResult}, which is passed as the {@code aggregate} argument
* {@link #defaultResult}, which is passed as the `aggregate` argument
* to this method after the first child node is visited.

@@ -82,3 +82,3 @@ * @param nextResult The result of the immediately preceeding call to visit

*
* @return The updated aggregate result.
* @returns The updated aggregate result.
*/

@@ -89,3 +89,3 @@ protected aggregateResult(aggregate: Result, nextResult: Result): Result;

* {@link #visitChildren}. This method is first called before the first
* child is visited; at that point {@code currentResult} will be the initial
* child is visited; at that point `currentResult` will be the initial
* value (in the default implementation, the initial value is returned by a

@@ -95,8 +95,8 @@ * call to {@link #defaultResult}. This method is not called after the last

*
* <p>The default implementation always returns {@code true}, indicating that
* {@code visitChildren} should only return after all children are visited.
* The default implementation always returns `true`, indicating that
* `visitChildren` should only return after all children are visited.
* One reason to override this method is to provide a "short circuit"
* evaluation option for situations where the result of visiting a single
* child has the potential to determine the result of the visit operation as
* a whole.</p>
* a whole.
*

@@ -108,4 +108,4 @@ * @param node The {@link RuleNode} whose children are currently being

*
* @return {@code true} to continue visiting children. Otherwise return
* {@code false} to stop visiting children and immediately return the
* @returns `true` to continue visiting children. Otherwise return
* `false` to stop visiting children and immediately return the
* current aggregate result from {@link #visitChildren}.

@@ -112,0 +112,0 @@ */

@@ -5,5 +5,5 @@ /*!

*/
import { ParseTreeVisitor } from './ParseTreeVisitor';
import { TerminalNode } from './TerminalNode';
import { Token } from '../Token';
import { ParseTreeVisitor } from "./ParseTreeVisitor";
import { TerminalNode } from "./TerminalNode";
import { Token } from "../Token";
/** Represents a token that was consumed during resynchronization

@@ -10,0 +10,0 @@ * rather than during a valid match operation. For example,

@@ -5,13 +5,15 @@ /*!

*/
export * from './AbstractParseTreeVisitor';
export * from './ErrorNode';
export * from './ParseTree';
export * from './ParseTreeListener';
export * from './ParseTreeProperty';
export * from './ParseTreeVisitor';
export * from './ParseTreeWalker';
export * from './RuleNode';
export * from './SyntaxTree';
export * from './TerminalNode';
export * from './Tree';
export * from './Trees';
export * from "./AbstractParseTreeVisitor";
export * from "./ErrorNode";
export * from "./ParseTree";
export * from "./ParseTreeListener";
export * from "./ParseTreeProperty";
export * from "./ParseTreeVisitor";
export * from "./ParseTreeWalker";
export * from "./RuleNode";
export * from "./SyntaxTree";
export * from "./TerminalNode";
export * from "./Tree";
export * from "./Trees";
export * from "./xpath";
export * from "./pattern";

@@ -5,5 +5,6 @@ /*!

*/
import { Parser } from '../Parser';
import { ParseTreeVisitor } from './ParseTreeVisitor';
import { SyntaxTree } from './SyntaxTree';
import { Parser } from "../Parser";
import { ParseTreeVisitor } from "./ParseTreeVisitor";
import { RuleContext } from "../RuleContext";
import { SyntaxTree } from "./SyntaxTree";
/** An interface to access the tree of {@link RuleContext} objects created

@@ -14,6 +15,12 @@ * during a parse that makes the data structure look like a simple parse tree.

*
* <p>The payload is either a {@link Token} or a {@link RuleContext} object.</p>
* The payload is either a {@link Token} or a {@link RuleContext} object.
*/
export interface ParseTree extends SyntaxTree {
readonly parent: ParseTree | undefined;
/**
* Set the parent for this node.
*
* @since 4.7
*/
setParent(parent: RuleContext): void;
getChild(i: number): ParseTree;

@@ -20,0 +27,0 @@ /** The {@link ParseTreeVisitor} needs a double dispatch method. */

@@ -5,10 +5,12 @@ /*!

*/
import { ErrorNode } from './ErrorNode';
import { ParserRuleContext } from '../ParserRuleContext';
import { TerminalNode } from './TerminalNode';
import { ErrorNode } from "./ErrorNode";
import { ParserRuleContext } from "../ParserRuleContext";
import { TerminalNode } from "./TerminalNode";
/** This interface describes the minimal core of methods triggered
* by {@link ParseTreeWalker}. E.g.,
*
* ParseTreeWalker walker = new ParseTreeWalker();
* walker.walk(myParseTreeListener, myParseTree); <-- triggers events in your listener
* ```
* ParseTreeWalker walker = new ParseTreeWalker();
* walker.walk(myParseTreeListener, myParseTree); <-- triggers events in your listener
* ```
*

@@ -15,0 +17,0 @@ * If you want to trigger events in multiple listeners during a single

@@ -5,2 +5,3 @@ /*!

*/
import { ParseTree } from "./ParseTree";
/**

@@ -12,8 +13,8 @@ * Associate a property with a parse tree node. Useful with parse tree listeners

*
* <pre>
* ParseTreeProperty&lt;Integer&gt; values = new ParseTreeProperty&lt;Integer&gt;();
* ```
* ParseTreeProperty<Integer> values = new ParseTreeProperty<Integer>();
* values.put(tree, 36);
* int x = values.get(tree);
* values.removeFrom(tree);
* </pre>
* ```
*

@@ -23,3 +24,2 @@ * You would make one decl (values here) in the listener and use lots of times

*/
import { ParseTree } from "./ParseTree";
export declare class ParseTreeProperty<V> {

@@ -26,0 +26,0 @@ private _symbol;

@@ -5,10 +5,10 @@ /*!

*/
import { ErrorNode } from './ErrorNode';
import { ParseTree } from './ParseTree';
import { RuleNode } from './RuleNode';
import { TerminalNode } from './TerminalNode';
import { ErrorNode } from "./ErrorNode";
import { ParseTree } from "./ParseTree";
import { RuleNode } from "./RuleNode";
import { TerminalNode } from "./TerminalNode";
/**
* This interface defines the basic notion of a parse tree visitor. Generated
* visitors implement this interface and the {@code XVisitor} interface for
* grammar {@code X}.
* visitors implement this interface and the `XVisitor` interface for
* grammar `X`.
*

@@ -24,3 +24,3 @@ * @author Sam Harwell

* @param tree The {@link ParseTree} to visit.
* @return The result of visiting the parse tree.
* @returns The result of visiting the parse tree.
*/

@@ -33,3 +33,3 @@ visit(tree: ParseTree): Result;

* @param node The {@link RuleNode} whose children should be visited.
* @return The result of visiting the children of the node.
* @returns The result of visiting the children of the node.
*/

@@ -41,3 +41,3 @@ visitChildren(node: RuleNode): Result;

* @param node The {@link TerminalNode} to visit.
* @return The result of visiting the node.
* @returns The result of visiting the node.
*/

@@ -49,5 +49,5 @@ visitTerminal(node: TerminalNode): Result;

* @param node The {@link ErrorNode} to visit.
* @return The result of visiting the node.
* @returns The result of visiting the node.
*/
visitErrorNode(node: ErrorNode): Result;
}

@@ -9,3 +9,3 @@ /*!

*
* <p>The method {@link ParseTreePatternMatcher#split(String)} returns a list of
* The method {@link ParseTreePatternMatcher#split(String)} returns a list of
* chunks in preparation for creating a token stream by

@@ -15,5 +15,5 @@ * {@link ParseTreePatternMatcher#tokenize(String)}. From there, we get a parse

* chunks are converted to {@link RuleTagToken}, {@link TokenTagToken}, or the
* regular tokens of the text surrounding the tags.</p>
* regular tokens of the text surrounding the tags.
*/
export declare abstract class Chunk {
}

@@ -5,6 +5,6 @@ /*!

*/
export * from './ParseTreeMatch';
export * from './ParseTreePattern';
export * from './ParseTreePatternMatcher';
export * from './RuleTagToken';
export * from './TokenTagToken';
export * from "./ParseTreeMatch";
export * from "./ParseTreePattern";
export * from "./ParseTreePatternMatcher";
export * from "./RuleTagToken";
export * from "./TokenTagToken";

@@ -39,22 +39,22 @@ /*!

*
* @exception IllegalArgumentException if {@code tree} is {@code null}
* @exception IllegalArgumentException if {@code pattern} is {@code null}
* @exception IllegalArgumentException if {@code labels} is {@code null}
* @throws {@link Error} if `tree` is not defined
* @throws {@link Error} if `pattern` is not defined
* @throws {@link Error} if `labels` is not defined
*/
constructor(tree: ParseTree, pattern: ParseTreePattern, labels: MultiMap<string, ParseTree>, mismatchedNode: ParseTree | undefined);
/**
* Get the last node associated with a specific {@code label}.
* Get the last node associated with a specific `label`.
*
* <p>For example, for pattern {@code <id:ID>}, {@code get("id")} returns the
* node matched for that {@code ID}. If more than one node
* For example, for pattern `<id:ID>`, `get("id")` returns the
* node matched for that `ID`. If more than one node
* matched the specified label, only the last is returned. If there is
* no node associated with the label, this returns {@code null}.</p>
* no node associated with the label, this returns `undefined`.
*
* <p>Pattern tags like {@code <ID>} and {@code <expr>} without labels are
* considered to be labeled with {@code ID} and {@code expr}, respectively.</p>
* Pattern tags like `<ID>` and `<expr>` without labels are
* considered to be labeled with `ID` and `expr`, respectively.
*
* @param label The label to check.
*
* @return The last {@link ParseTree} to match a tag with the specified
* label, or {@code null} if no parse tree matched a tag with the label.
* @returns The last {@link ParseTree} to match a tag with the specified
* label, or `undefined` if no parse tree matched a tag with the label.
*/

@@ -65,20 +65,18 @@ get(label: string): ParseTree | undefined;

*
* <p>If the {@code label} is the name of a parser rule or token in the
* If the `label` is the name of a parser rule or token in the
* grammar, the resulting list will contain both the parse trees matching
* rule or tags explicitly labeled with the label and the complete set of
* parse trees matching the labeled and unlabeled tags in the pattern for
* the parser rule or token. For example, if {@code label} is {@code "foo"},
* the result will contain <em>all</em> of the following.</p>
* the parser rule or token. For example, if `label` is `"foo"`,
* the result will contain *all* of the following.
*
* <ul>
* <li>Parse tree nodes matching tags of the form {@code <foo:anyRuleName>} and
* {@code <foo:AnyTokenName>}.</li>
* <li>Parse tree nodes matching tags of the form {@code <anyLabel:foo>}.</li>
* <li>Parse tree nodes matching tags of the form {@code <foo>}.</li>
* </ul>
* * Parse tree nodes matching tags of the form `<foo:anyRuleName>` and
* `<foo:AnyTokenName>`.
* * Parse tree nodes matching tags of the form `<anyLabel:foo>`.
* * Parse tree nodes matching tags of the form `<foo>`.
*
* @param label The label.
*
* @return A collection of all {@link ParseTree} nodes matching tags with
* the specified {@code label}. If no nodes matched the label, an empty list
* @returns A collection of all {@link ParseTree} nodes matching tags with
* the specified `label`. If no nodes matched the label, an empty list
* is returned.

@@ -90,7 +88,7 @@ */

*
* <p>The map includes special entries corresponding to the names of rules and
* The map includes special entries corresponding to the names of rules and
* tokens referenced in tags in the original pattern. For additional
* information, see the description of {@link #getAll(String)}.</p>
* information, see the description of {@link #getAll(String)}.
*
* @return A mapping from labels to parse tree nodes. If the parse tree
* @returns A mapping from labels to parse tree nodes. If the parse tree
* pattern did not contain any rule or token tags, this map will be empty.

@@ -102,3 +100,3 @@ */

*
* @return the node at which we first detected a mismatch, or {@code null}
* @returns the node at which we first detected a mismatch, or `undefined`
* if the match was successful.

@@ -110,4 +108,4 @@ */

*
* @return {@code true} if the match operation succeeded; otherwise,
* {@code false}.
* @returns `true` if the match operation succeeded; otherwise,
* `false`.
*/

@@ -118,3 +116,3 @@ readonly succeeded: boolean;

*
* @return The tree pattern we are matching against.
* @returns The tree pattern we are matching against.
*/

@@ -125,3 +123,3 @@ readonly pattern: ParseTreePattern;

*
* @return The {@link ParseTree} we are trying to match to a pattern.
* @returns The {@link ParseTree} we are trying to match to a pattern.
*/

@@ -128,0 +126,0 @@ readonly tree: ParseTree;

@@ -9,3 +9,3 @@ /*!

/**
* A pattern like {@code <ID> = <expr>;} converted to a {@link ParseTree} by
* A pattern like `<ID> = <expr>;` converted to a {@link ParseTree} by
* {@link ParseTreePatternMatcher#compile(String, int)}.

@@ -45,3 +45,3 @@ */

* @param tree The parse tree to match against this tree pattern.
* @return A {@link ParseTreeMatch} object describing the result of the
* @returns A {@link ParseTreeMatch} object describing the result of the
* match operation. The `ParseTreeMatch.succeeded` method can be

@@ -55,4 +55,4 @@ * used to determine whether or not the match was successful.

* @param tree The parse tree to match against this tree pattern.
* @return {@code true} if {@code tree} is a match for the current tree
* pattern; otherwise, {@code false}.
* @returns `true` if `tree` is a match for the current tree
* pattern; otherwise, `false`.
*/

@@ -67,3 +67,3 @@ matches(tree: ParseTree): boolean;

*
* @return A collection of {@link ParseTreeMatch} objects describing the
* @returns A collection of {@link ParseTreeMatch} objects describing the
* successful matches. Unsuccessful matches are omitted from the result,

@@ -76,3 +76,3 @@ * regardless of the reason for the failure.

*
* @return The {@link ParseTreePatternMatcher} which created this tree
* @returns The {@link ParseTreePatternMatcher} which created this tree
* pattern.

@@ -84,3 +84,3 @@ */

*
* @return The tree pattern in concrete syntax form.
* @returns The tree pattern in concrete syntax form.
*/

@@ -92,3 +92,3 @@ readonly pattern: string;

*
* @return The parser rule which serves as the outermost rule for the tree
* @returns The parser rule which serves as the outermost rule for the tree
* pattern.

@@ -102,5 +102,5 @@ */

*
* @return The tree pattern as a {@link ParseTree}.
* @returns The tree pattern as a {@link ParseTree}.
*/
readonly patternTree: ParseTree;
}

@@ -6,5 +6,5 @@ /*!

import { Chunk } from "./Chunk";
import { Lexer } from '../../Lexer';
import { Lexer } from "../../Lexer";
import { MultiMap } from "../../misc/MultiMap";
import { Parser } from '../../Parser';
import { Parser } from "../../Parser";
import { ParseTree } from "../ParseTree";

@@ -18,19 +18,21 @@ import { ParseTreeMatch } from "./ParseTreeMatch";

*
* <p>Patterns are strings of source input text with special tags representing
* token or rule references such as:</p>
* Patterns are strings of source input text with special tags representing
* token or rule references such as:
*
* <p>{@code <ID> = <expr>;}</p>
* ```
* <ID> = <expr>;
* ```
*
* <p>Given a pattern start rule such as {@code statement}, this object constructs
* a {@link ParseTree} with placeholders for the {@code ID} and {@code expr}
* Given a pattern start rule such as `statement`, this object constructs
* a {@link ParseTree} with placeholders for the `ID` and `expr`
* subtree. Then the {@link #match} routines can compare an actual
* {@link ParseTree} from a parse with this pattern. Tag {@code <ID>} matches
* any {@code ID} token and tag {@code <expr>} references the result of the
* {@code expr} rule (generally an instance of {@code ExprContext}.</p>
* {@link ParseTree} from a parse with this pattern. Tag `<ID>` matches
* any `ID` token and tag `<expr>` references the result of the
* `expr` rule (generally an instance of `ExprContext`.
*
* <p>Pattern {@code x = 0;} is a similar pattern that matches the same pattern
* except that it requires the identifier to be {@code x} and the expression to
* be {@code 0}.</p>
* Pattern `x = 0;` is a similar pattern that matches the same pattern
* except that it requires the identifier to be `x` and the expression to
* be `0`.
*
* <p>The {@link #matches} routines return {@code true} or {@code false} based
* The {@link #matches} routines return `true` or `false` based
* upon a match for the tree rooted at the parameter sent in. The

@@ -41,8 +43,8 @@ * {@link #match} routines return a {@link ParseTreeMatch} object that

* {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not
* match.</p>
* match.
*
* <p>For efficiency, you can compile a tree pattern in string form to a
* {@link ParseTreePattern} object.</p>
* For efficiency, you can compile a tree pattern in string form to a
* {@link ParseTreePattern} object.
*
* <p>See {@code TestParseTreeMatcher} for lots of examples.
* See `TestParseTreeMatcher` for lots of examples.
* {@link ParseTreePattern} has two static helper methods:

@@ -52,7 +54,7 @@ * {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that

* {@link ParseTreePatternMatcher} objects each time and have to compile the
* pattern in string form before using it.</p>
* pattern in string form before using it.
*
* <p>The lexer and parser that you pass into the {@link ParseTreePatternMatcher}
* The lexer and parser that you pass into the {@link ParseTreePatternMatcher}
* constructor are used to parse the pattern in string form. The lexer converts
* the {@code <ID> = <expr>;} into a sequence of four tokens (assuming lexer
* the `<ID> = <expr>;` into a sequence of four tokens (assuming lexer
* throws out whitespace or puts it on a hidden channel). Be aware that the

@@ -62,14 +64,14 @@ * input stream is reset for the lexer (but not the parser; a

* fields you have put into the lexer might get changed when this mechanism asks
* it to scan the pattern string.</p>
* it to scan the pattern string.
*
* <p>Normally a parser does not accept token {@code <expr>} as a valid
* {@code expr} but, from the parser passed in, we create a special version of
* Normally a parser does not accept token `<expr>` as a valid
* `expr` but, from the parser passed in, we create a special version of
* the underlying grammar representation (an {@link ATN}) that allows imaginary
* tokens representing rules ({@code <expr>}) to match entire rules. We call
* these <em>bypass alternatives</em>.</p>
* tokens representing rules (`<expr>`) to match entire rules. We call
* these *bypass alternatives*.
*
* <p>Delimiters are {@code <} and {@code >}, with {@code \} as the escape string
* Delimiters are `<`} and `>`}, with `\` as the escape string
* by default, but you can set them to whatever you want using
* {@link #setDelimiters}. You must escape both start and stop strings
* {@code \<} and {@code \>}.</p>
* `\<` and `\>`.
*/

@@ -107,9 +109,9 @@ export declare class ParseTreePatternMatcher {

*
* @exception IllegalArgumentException if {@code start} is {@code null} or empty.
* @exception IllegalArgumentException if {@code stop} is {@code null} or empty.
* @throws {@link Error} if `start` is not defined or empty.
* @throws {@link Error} if `stop` is not defined or empty.
*/
setDelimiters(start: string, stop: string, escapeLeft: string): void;
/** Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}? */
/** Does `pattern` matched as rule `patternRuleIndex` match `tree`? */
matches(tree: ParseTree, pattern: string, patternRuleIndex: number): boolean;
/** Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a
/** Does `pattern` matched as rule patternRuleIndex match tree? Pass in a
* compiled pattern instead of a string representation of a tree pattern.

@@ -119,4 +121,4 @@ */

/**
* Compare {@code pattern} matched as rule {@code patternRuleIndex} against
* {@code tree} and return a {@link ParseTreeMatch} object that contains the
* Compare `pattern` matched as rule `patternRuleIndex` against
* `tree` and return a {@link ParseTreeMatch} object that contains the
* matched elements, or the node at which the match failed.

@@ -126,3 +128,3 @@ */

/**
* Compare {@code pattern} matched against {@code tree} and return a
* Compare `pattern` matched against `tree` and return a
* {@link ParseTreeMatch} object that contains the matched elements, or the

@@ -149,7 +151,7 @@ * node at which the match failed. Pass in a compiled pattern instead of a

/**
* Recursively walk {@code tree} against {@code patternTree}, filling
* {@code match.}{@link ParseTreeMatch#labels labels}.
* Recursively walk `tree` against `patternTree`, filling
* `match.`{@link ParseTreeMatch#labels labels}.
*
* @return the first node encountered in {@code tree} which does not match
* a corresponding node in {@code patternTree}, or {@code null} if the match
* @returns the first node encountered in `tree` which does not match
* a corresponding node in `patternTree`, or `undefined` if the match
* was successful. The specific node returned depends on the matching

@@ -159,6 +161,6 @@ * algorithm used by the implementation, and may be overridden.

protected matchImpl(tree: ParseTree, patternTree: ParseTree, labels: MultiMap<string, ParseTree>): ParseTree | undefined;
/** Is {@code t} {@code (expr <expr>)} subtree? */
/** Is `t` `(expr <expr>)` subtree? */
protected getRuleTagToken(t: ParseTree): RuleTagToken | undefined;
tokenize(pattern: string): Token[];
/** Split {@code <ID> = <e:expr> ;} into 4 chunks for tokenizing by {@link #tokenize}. */
/** Split `<ID> = <e:expr> ;` into 4 chunks for tokenizing by {@link #tokenize}. */
split(pattern: string): Chunk[];

@@ -165,0 +167,0 @@ }

@@ -5,8 +5,8 @@ /*!

*/
import { CharStream } from '../../CharStream';
import { Token } from '../../Token';
import { TokenSource } from '../../TokenSource';
import { CharStream } from "../../CharStream";
import { Token } from "../../Token";
import { TokenSource } from "../../TokenSource";
/**
* A {@link Token} object representing an entire subtree matched by a parser
* rule; e.g., {@code <expr>}. These tokens are created for {@link TagChunk}
* rule; e.g., `<expr>`. These tokens are created for {@link TagChunk}
* chunks where the tag corresponds to a parser rule.

@@ -34,6 +34,6 @@ */

* @param bypassTokenType The bypass token type assigned to the parser rule.
* @param label The label associated with the rule tag, or {@code null} if
* @param label The label associated with the rule tag, or `undefined` if
* the rule tag is unlabeled.
*
* @exception IllegalArgumentException if {@code ruleName} is {@code null}
* @exception IllegalArgumentException if `ruleName` is not defined
* or empty.

@@ -45,3 +45,3 @@ */

*
* @return The name of the parser rule associated with this rule tag.
* @returns The name of the parser rule associated with this rule tag.
*/

@@ -52,4 +52,4 @@ readonly ruleName: string;

*
* @return The name of the label associated with the rule tag, or
* {@code null} if this is an unlabeled rule tag.
* @returns The name of the label associated with the rule tag, or
* `undefined` if this is an unlabeled rule tag.
*/

@@ -60,3 +60,3 @@ readonly label: string | undefined;

*
* <p>Rule tag tokens are always placed on the {@link #DEFAULT_CHANNEL}.</p>
* Rule tag tokens are always placed on the {@link #DEFAULT_CHANNEL}.
*/

@@ -67,4 +67,4 @@ readonly channel: number;

*
* <p>This method returns the rule tag formatted with {@code <} and {@code >}
* delimiters.</p>
* This method returns the rule tag formatted with `<` and `>`
* delimiters.
*/

@@ -75,4 +75,4 @@ readonly text: string;

*
* <p>Rule tag tokens have types assigned according to the rule bypass
* transitions created during ATN deserialization.</p>
* Rule tag tokens have types assigned according to the rule bypass
* transitions created during ATN deserialization.
*/

@@ -83,3 +83,3 @@ readonly type: number;

*
* <p>The implementation for {@link RuleTagToken} always returns 0.</p>
* The implementation for {@link RuleTagToken} always returns 0.
*/

@@ -90,3 +90,3 @@ readonly line: number;

*
* <p>The implementation for {@link RuleTagToken} always returns -1.</p>
* The implementation for {@link RuleTagToken} always returns -1.
*/

@@ -97,3 +97,3 @@ readonly charPositionInLine: number;

*
* <p>The implementation for {@link RuleTagToken} always returns -1.</p>
* The implementation for {@link RuleTagToken} always returns -1.
*/

@@ -104,3 +104,3 @@ readonly tokenIndex: number;

*
* <p>The implementation for {@link RuleTagToken} always returns -1.</p>
* The implementation for {@link RuleTagToken} always returns -1.
*/

@@ -111,3 +111,3 @@ readonly startIndex: number;

*
* <p>The implementation for {@link RuleTagToken} always returns -1.</p>
* The implementation for {@link RuleTagToken} always returns -1.
*/

@@ -118,3 +118,3 @@ readonly stopIndex: number;

*
* <p>The implementation for {@link RuleTagToken} always returns {@code null}.</p>
* The implementation for {@link RuleTagToken} always returns `undefined`.
*/

@@ -125,3 +125,3 @@ readonly tokenSource: TokenSource | undefined;

*
* <p>The implementation for {@link RuleTagToken} always returns {@code null}.</p>
* The implementation for {@link RuleTagToken} always returns `undefined`.
*/

@@ -132,6 +132,6 @@ readonly inputStream: CharStream | undefined;

*
* <p>The implementation for {@link RuleTagToken} returns a string of the form
* {@code ruleName:bypassTokenType}.</p>
* The implementation for {@link RuleTagToken} returns a string of the form
* `ruleName:bypassTokenType`.
*/
toString(): string;
}

@@ -5,3 +5,3 @@ /*!

*/
import { Chunk } from './Chunk';
import { Chunk } from "./Chunk";
/**

@@ -11,11 +11,9 @@ * Represents a placeholder tag in a tree pattern. A tag can have any of the

*
* <ul>
* <li>{@code expr}: An unlabeled placeholder for a parser rule {@code expr}.</li>
* <li>{@code ID}: An unlabeled placeholder for a token of type {@code ID}.</li>
* <li>{@code e:expr}: A labeled placeholder for a parser rule {@code expr}.</li>
* <li>{@code id:ID}: A labeled placeholder for a token of type {@code ID}.</li>
* </ul>
* * `expr`: An unlabeled placeholder for a parser rule `expr`.
* * `ID`: An unlabeled placeholder for a token of type `ID`.
* * `e:expr`: A labeled placeholder for a parser rule `expr`.
* * `id:ID`: A labeled placeholder for a token of type `ID`.
*
* This class does not perform any validation on the tag or label names aside
* from ensuring that the tag is a non-null, non-empty string.
* from ensuring that the tag is a defined, non-empty string.
*/

@@ -35,3 +33,3 @@ export declare class TagChunk extends Chunk {

*
* @param label The label for the tag. If this is {@code null}, the
* @param label The label for the tag. If this is `undefined`, the
* {@link TagChunk} represents an unlabeled tag.

@@ -41,3 +39,3 @@ * @param tag The tag, which should be the name of a parser rule or token

*
* @exception IllegalArgumentException if {@code tag} is {@code null} or
* @exception IllegalArgumentException if `tag` is not defined or
* empty.

@@ -49,3 +47,3 @@ */

*
* @return The tag for the chunk.
* @returns The tag for the chunk.
*/

@@ -56,3 +54,3 @@ readonly tag: string;

*
* @return The label assigned to this chunk, or {@code null} if no label is
* @returns The label assigned to this chunk, or `undefined` if no label is
* assigned to the chunk.

@@ -63,3 +61,3 @@ */

* This method returns a text representation of the tag chunk. Labeled tags
* are returned in the form {@code label:tag}, and unlabeled tags are
* are returned in the form `label:tag`, and unlabeled tags are
* returned as just the tag name.

@@ -66,0 +64,0 @@ */

@@ -5,3 +5,3 @@ /*!

*/
import { Chunk } from './Chunk';
import { Chunk } from "./Chunk";
/**

@@ -20,3 +20,3 @@ * Represents a span of raw text (concrete syntax) between tags in a tree

* @param text The text of this chunk.
* @exception IllegalArgumentException if {@code text} is {@code null}.
* @exception IllegalArgumentException if `text` is not defined.
*/

@@ -27,3 +27,3 @@ constructor(text: string);

*
* @return The text of the chunk.
* @returns The text of the chunk.
*/

@@ -34,6 +34,6 @@ readonly text: string;

*
* <p>The implementation for {@link TextChunk} returns the result of
* `text` in single quotes.</p>
* The implementation for {@link TextChunk} returns the result of
* `text` in single quotes.
*/
toString(): string;
}

@@ -5,6 +5,6 @@ /*!

*/
import { CommonToken } from '../../CommonToken';
import { CommonToken } from "../../CommonToken";
/**
* A {@link Token} object representing a token of a particular type; e.g.,
* {@code <ID>}. These tokens are created for {@link TagChunk} chunks where the
* `<ID>`. These tokens are created for {@link TagChunk} chunks where the
* tag corresponds to a lexer rule or token type.

@@ -27,3 +27,3 @@ */

* @param type The token type.
* @param label The label associated with the token tag, or {@code null} if
* @param label The label associated with the token tag, or `undefined` if
* the token tag is unlabeled.

@@ -34,3 +34,3 @@ */

* Gets the token name.
* @return The token name.
* @returns The token name.
*/

@@ -41,4 +41,4 @@ readonly tokenName: string;

*
* @return The name of the label associated with the rule tag, or
* {@code null} if this is an unlabeled rule tag.
* @returns The name of the label associated with the rule tag, or
* `undefined` if this is an unlabeled rule tag.
*/

@@ -49,4 +49,4 @@ readonly label: string | undefined;

*
* <p>The implementation for {@link TokenTagToken} returns the token tag
* formatted with {@code <} and {@code >} delimiters.</p>
* The implementation for {@link TokenTagToken} returns the token tag
* formatted with `<` and `>` delimiters.
*/

@@ -57,6 +57,6 @@ readonly text: string;

*
* <p>The implementation for {@link TokenTagToken} returns a string of the form
* {@code tokenName:type}.</p>
* The implementation for {@link TokenTagToken} returns a string of the form
* `tokenName:type`.
*/
toString(): string;
}

@@ -5,3 +5,3 @@ /*!

*/
import { RuleContext } from '../RuleContext';
import { RuleContext } from "../RuleContext";
import { ParseTree } from "./ParseTree";

@@ -14,2 +14,3 @@ import { ParseTreeVisitor } from "./ParseTreeVisitor";

readonly abstract parent: RuleNode | undefined;
abstract setParent(parent: RuleContext): void;
abstract getChild(i: number): ParseTree;

@@ -16,0 +17,0 @@ abstract accept<T>(visitor: ParseTreeVisitor<T>): T;

@@ -5,4 +5,4 @@ /*!

*/
import { Tree } from './Tree';
import { Interval } from '../misc/Interval';
import { Tree } from "./Tree";
import { Interval } from "../misc/Interval";
/** A tree that knows about an interval in a token stream

@@ -19,13 +19,13 @@ * is some kind of syntax tree. Subinterfaces distinguish

*
* <p>An interval of i..i-1 indicates an empty interval at position
* An interval of i..i-1 indicates an empty interval at position
* i in the input stream, where 0 &lt;= i &lt;= the size of the input
* token stream. Currently, the code base can only have i=0..n-1 but
* in concept one could have an empty interval after EOF. </p>
* in concept one could have an empty interval after EOF.
*
* <p>If source interval is unknown, this returns {@link Interval#INVALID}.</p>
* If source interval is unknown, this returns {@link Interval#INVALID}.
*
* <p>As a weird special case, the source interval for rules matched after
* EOF is unspecified.</p>
* As a weird special case, the source interval for rules matched after
* EOF is unspecified.
*/
readonly sourceInterval: Interval;
}

@@ -5,8 +5,9 @@ /*!

*/
import { Interval } from '../misc/Interval';
import { Parser } from '../Parser';
import { ParseTree } from './ParseTree';
import { ParseTreeVisitor } from './ParseTreeVisitor';
import { RuleNode } from './RuleNode';
import { Token } from '../Token';
import { Interval } from "../misc/Interval";
import { Parser } from "../Parser";
import { ParseTree } from "./ParseTree";
import { ParseTreeVisitor } from "./ParseTreeVisitor";
import { RuleContext } from "../RuleContext";
import { RuleNode } from "./RuleNode";
import { Token } from "../Token";
export declare class TerminalNode implements ParseTree {

@@ -19,2 +20,3 @@ _symbol: Token;

readonly parent: RuleNode | undefined;
setParent(parent: RuleContext): void;
readonly payload: Token;

@@ -21,0 +23,0 @@ readonly sourceInterval: Interval;

@@ -20,3 +20,5 @@ /*!

*/
readonly payload: any;
readonly payload: {
text?: string;
};
/**

@@ -32,5 +34,5 @@ * If there are children, get the `i`th value indexed from 0. Throws a `RangeError` if `i` is less than zero, or

/** Print out a whole tree, not just a node, in LISP format
* {@code (root child1 .. childN)}. Print just a node if this is a leaf.
* `(root child1 .. childN)`. Print just a node if this is a leaf.
*/
toStringTree(): string;
}

@@ -5,5 +5,6 @@ /*!

*/
import { Parser } from '../Parser';
import { Parser } from "../Parser";
import { ParserRuleContext } from "../ParserRuleContext";
import { ParseTree } from "./ParseTree";
import { Tree } from "./Tree";
/** A set of utility routines useful for all kinds of ANTLR trees. */

@@ -15,2 +16,3 @@ export declare class Trees {

*/
static toStringTree(t: Tree): string;
/** Print out a whole tree in LISP form. {@link #getNodeText} is used on the

@@ -20,9 +22,13 @@ * node payloads to get the text for the nodes. Detect

*/
static toStringTree(t: Tree, recog: Parser | undefined): string;
/** Print out a whole tree in LISP form. {@link #getNodeText} is used on the
* node payloads to get the text for the nodes.
*/
static toStringTree(t: ParseTree, arg2?: Parser | string[]): string;
static getNodeText(t: ParseTree, arg2: Parser | string[]): string;
static toStringTree(t: Tree, ruleNames: string[] | undefined): string;
static toStringTree(t: Tree, arg2?: Parser | string[]): string;
static getNodeText(t: Tree, recog: Parser | undefined): string;
static getNodeText(t: Tree, ruleNames: string[] | undefined): string;
/** Return ordered list of all children of this node */
static getChildren(t: ParseTree): ParseTree[];
static getChildren(t: Tree): Tree[];
/** Return a list of all ancestors of this node. The first node of

@@ -34,12 +40,13 @@ * list is the root and the last is the parent of this node.

static getAncestors(t: ParseTree): ParseTree[];
static getAncestors(t: Tree): Tree[];
/** Return true if t is u's parent or a node on path to root from u.
* Use == not equals().
* Use === not equals().
*
* @since 4.5.1
*/
static isAncestorOf(t: ParseTree, u: ParseTree): boolean;
static findAllTokenNodes(t: ParseTree, ttype: number): Array<ParseTree>;
static findAllRuleNodes(t: ParseTree, ruleIndex: number): Array<ParseTree>;
static findAllNodes(t: ParseTree, index: number, findTokens: boolean): Array<ParseTree>;
static _findAllNodes(t: ParseTree, index: number, findTokens: boolean, nodes: Array<ParseTree>): void;
static isAncestorOf(t: Tree, u: Tree): boolean;
static findAllTokenNodes(t: ParseTree, ttype: number): ParseTree[];
static findAllRuleNodes(t: ParseTree, ruleIndex: number): ParseTree[];
static findAllNodes(t: ParseTree, index: number, findTokens: boolean): ParseTree[];
static _findAllNodes(t: ParseTree, index: number, findTokens: boolean, nodes: ParseTree[]): void;
/** Get all descendents; includes t itself.

@@ -51,16 +58,22 @@ *

/** Find smallest subtree of t enclosing range startTokenIndex..stopTokenIndex
* inclusively using postorder traversal. Recursive depth-first-search.
*
* @since 4.5
*/
* inclusively using postorder traversal. Recursive depth-first-search.
*
* @since 4.5
*/
static getRootOfSubtreeEnclosingRegion(t: ParseTree, startTokenIndex: number, stopTokenIndex: number): ParserRuleContext | undefined;
/** Replace any subtree siblings of root that are completely to left
* or right of lookahead range with a CommonToken(Token.INVALID_TYPE,"...")
* node. The source interval for t is not altered to suit smaller range!
*
* WARNING: destructive to t.
*
* @since 4.5.1
*/
* or right of lookahead range with a CommonToken(Token.INVALID_TYPE,"...")
* node. The source interval for t is not altered to suit smaller range!
*
* WARNING: destructive to t.
*
* @since 4.5.1
*/
static stripChildrenOutOfRange(t: ParserRuleContext, root: ParserRuleContext, startIndex: number, stopIndex: number): void;
/** Return first node satisfying the pred
*
* @since 4.5.1
*/
static findNodeSuchThat(t: ParseTree, pred: (tree: ParseTree) => boolean): ParseTree | undefined;
static findNodeSuchThat(t: Tree, pred: (tree: Tree) => boolean): Tree | undefined;
}

@@ -5,11 +5,11 @@ /*!

*/
export * from './XPath';
export * from './XPathElement';
export * from './XPathLexer';
export * from './XPathLexerErrorListener';
export * from './XPathRuleAnywhereElement';
export * from './XPathRuleElement';
export * from './XPathTokenAnywhereElement';
export * from './XPathTokenElement';
export * from './XPathWildcardAnywhereElement';
export * from './XPathWildcardElement';
export * from "./XPath";
export * from "./XPathElement";
export * from "./XPathLexer";
export * from "./XPathLexerErrorListener";
export * from "./XPathRuleAnywhereElement";
export * from "./XPathRuleElement";
export * from "./XPathTokenAnywhereElement";
export * from "./XPathTokenElement";
export * from "./XPathWildcardAnywhereElement";
export * from "./XPathWildcardElement";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { Parser } from "../../Parser";

@@ -13,40 +13,34 @@ import { ParseTree } from "../ParseTree";

*
* <p>
* Split path into words and separators {@code /} and {@code //} via ANTLR
* Split path into words and separators `/` and `//` via ANTLR
* itself then walk path elements from left to right. At each separator-word
* pair, find set of nodes. Next stage uses those as work list.</p>
* pair, find set of nodes. Next stage uses those as work list.
*
* <p>
* The basic interface is
* {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}.
* But that is just shorthand for:</p>
* {@link XPath#findAll ParseTree.findAll}`(tree, pathString, parser)`.
* But that is just shorthand for:
*
* <pre>
* {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
* return p.{@link #evaluate evaluate}(tree);
* </pre>
* ```
* let p = new XPath(parser, pathString);
* return p.evaluate(tree);
* ```
*
* <p>
* See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this
* allows operators:</p>
* See `TestXPath` for descriptions. In short, this
* allows operators:
*
* <dl>
* <dt>/</dt> <dd>root</dd>
* <dt>//</dt> <dd>anywhere</dd>
* <dt>!</dt> <dd>invert; this must appear directly after root or anywhere
* operator</dd>
* </dl>
* | | |
* | --- | --- |
* | `/` | root |
* | `//` | anywhere |
* | `!` | invert; this much appear directly after root or anywhere operator |
*
* <p>
* and path elements:</p>
* and path elements:
*
* <dl>
* <dt>ID</dt> <dd>token name</dd>
* <dt>'string'</dt> <dd>any string literal token from the grammar</dd>
* <dt>expr</dt> <dd>rule name</dd>
* <dt>*</dt> <dd>wildcard matching any node</dd>
* </dl>
* | | |
* | --- | --- |
* | `ID` | token name |
* | `'string'` | any string literal token from the grammar |
* | `expr` | rule name |
* | `*` | wildcard matching any node |
*
* <p>
* Whitespace is not allowed.</p>
* Whitespace is not allowed.
*/

@@ -60,17 +54,15 @@ export declare class XPath {

constructor(parser: Parser, path: string);
static throwRecover(e: Error): void;
split(path: string): XPathElement[];
/**
* Convert word like {@code *} or {@code ID} or {@code expr} to a path
* element. {@code anywhere} is {@code true} if {@code //} precedes the
* Convert word like `*` or `ID` or `expr` to a path
* element. `anywhere` is `true` if `//` precedes the
* word.
*/
protected getXPathElement(wordToken: Token, anywhere: boolean): XPathElement;
static findAll(tree: ParseTree, xpath: string, parser: Parser): ParseTree[];
static findAll(tree: ParseTree, xpath: string, parser: Parser): Set<ParseTree>;
/**
* Return a list of all nodes starting at {@code t} as root that satisfy the
* path. The root {@code /} is relative to the node passed to
* {@link #evaluate}.
* Return a list of all nodes starting at `t` as root that satisfy the
* path. The root `/` is relative to the node passed to {@link evaluate}.
*/
evaluate(t: ParseTree): ParseTree[];
evaluate(t: ParseTree): Set<ParseTree>;
}

@@ -9,3 +9,3 @@ /*!

invert: boolean;
/** Construct element like {@code /ID} or {@code ID} or {@code /*} etc...
/** Construct element like `/ID` or `ID` or `/*` etc...
* op is null if just node

@@ -15,3 +15,3 @@ */

/**
* Given tree rooted at {@code t} return all nodes matched by this path
* Given tree rooted at `t` return all nodes matched by this path
* element.

@@ -18,0 +18,0 @@ */

@@ -1,2 +0,6 @@

import { ATN, CharStream, Lexer, RuleContext, Vocabulary } from 'antlr4ts';
import { ATN } from "../..";
import { CharStream } from "../..";
import { Lexer } from "../..";
import { RuleContext } from "../..";
import { Vocabulary } from "../..";
export declare class XPathLexer extends Lexer {

@@ -11,2 +15,3 @@ static readonly TOKEN_REF: number;

static readonly STRING: number;
static readonly channelNames: string[];
static readonly modeNames: string[];

@@ -22,5 +27,9 @@ static readonly ruleNames: string[];

readonly serializedATN: string;
readonly channelNames: string[];
readonly modeNames: string[];
action(_localctx: RuleContext, ruleIndex: number, actionIndex: number): void;
private ID_action(_localctx, actionIndex);
private static readonly _serializedATNSegments;
private static readonly _serializedATNSegment0;
private static readonly _serializedATNSegment1;
static readonly _serializedATN: string;

@@ -27,0 +36,0 @@ static __ATN: ATN;

@@ -9,3 +9,3 @@ /*!

export declare class XPathLexerErrorListener implements ANTLRErrorListener<number> {
syntaxError<T extends number>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException): void;
syntaxError<T extends number>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined): void;
}

@@ -8,3 +8,3 @@ /*!

/**
* Either {@code ID} at start of path or {@code ...//ID} in middle of path.
* Either `ID` at start of path or `...//ID` in middle of path.
*/

@@ -11,0 +11,0 @@ export declare class XPathRuleAnywhereElement extends XPathElement {

@@ -16,3 +16,3 @@ /*!

* zero to that number, inclusively, thus querying all stored entries.
* @return the highest token type value
* @returns the highest token type value
*/

@@ -22,7 +22,7 @@ readonly maxTokenType: number;

* Gets the string literal associated with a token type. The string returned
* by this method, when not {@code null}, can be used unaltered in a parser
* by this method, when not `undefined`, can be used unaltered in a parser
* grammar to represent this token type.
*
* <p>The following table shows examples of lexer rules and the literal
* names assigned to the corresponding token types.</p>
* The following table shows examples of lexer rules and the literal
* names assigned to the corresponding token types.
*

@@ -36,15 +36,15 @@ * <table>

* <tr>
* <td>{@code THIS : 'this';}</td>
* <td>{@code 'this'}</td>
* <td>{@code "'this'"}</td>
* <td>`THIS : 'this';`</td>
* <td>`'this'`</td>
* <td>`"'this'"`</td>
* </tr>
* <tr>
* <td>{@code SQUOTE : '\'';}</td>
* <td>{@code '\''}</td>
* <td>{@code "'\\''"}</td>
* <td>`SQUOTE : '\'';`</td>
* <td>`'\''`</td>
* <td>`"'\\''"`</td>
* </tr>
* <tr>
* <td>{@code ID : [A-Z]+;}</td>
* <td>`ID : [A-Z]+;`</td>
* <td>n/a</td>
* <td>{@code null}</td>
* <td>`undefined`</td>
* </tr>

@@ -55,4 +55,4 @@ * </table>

*
* @return The string literal associated with the specified token type, or
* {@code null} if no string literal is associated with the type.
* @returns The string literal associated with the specified token type, or
* `undefined` if no string literal is associated with the type.
*/

@@ -62,18 +62,16 @@ getLiteralName(tokenType: number): string | undefined;

* Gets the symbolic name associated with a token type. The string returned
* by this method, when not {@code null}, can be used unaltered in a parser
* by this method, when not `undefined`, can be used unaltered in a parser
* grammar to represent this token type.
*
* <p>This method supports token types defined by any of the following
* methods:</p>
* This method supports token types defined by any of the following
* methods:
*
* <ul>
* <li>Tokens created by lexer rules.</li>
* <li>Tokens defined in a <code>tokens{}</code> block in a lexer or parser
* grammar.</li>
* <li>The implicitly defined {@code EOF} token, which has the token type
* {@link Token#EOF}.</li>
* </ul>
* * Tokens created by lexer rules.
* * Tokens defined in a `tokens{}` block in a lexer or parser
* grammar.
* * The implicitly defined `EOF` token, which has the token type
* {@link Token#EOF}.
*
* <p>The following table shows examples of lexer rules and the literal
* names assigned to the corresponding token types.</p>
* The following table shows examples of lexer rules and the literal
* names assigned to the corresponding token types.
*

@@ -86,12 +84,12 @@ * <table>

* <tr>
* <td>{@code THIS : 'this';}</td>
* <td>{@code THIS}</td>
* <td>`THIS : 'this';`</td>
* <td>`THIS`</td>
* </tr>
* <tr>
* <td>{@code SQUOTE : '\'';}</td>
* <td>{@code SQUOTE}</td>
* <td>`SQUOTE : '\'';`</td>
* <td>`SQUOTE`</td>
* </tr>
* <tr>
* <td>{@code ID : [A-Z]+;}</td>
* <td>{@code ID}</td>
* <td>`ID : [A-Z]+;`</td>
* <td>`ID`</td>
* </tr>

@@ -102,4 +100,4 @@ * </table>

*
* @return The symbolic name associated with the specified token type, or
* {@code null} if no symbolic name is associated with the type.
* @returns The symbolic name associated with the specified token type, or
* `undefined` if no symbolic name is associated with the type.
*/

@@ -110,17 +108,15 @@ getSymbolicName(tokenType: number): string | undefined;

*
* <p>ANTLR provides a default implementation of this method, but
* ANTLR provides a default implementation of this method, but
* applications are free to override the behavior in any manner which makes
* sense for the application. The default implementation returns the first
* result from the following list which produces a non-{@code null}
* result.</p>
* result from the following list which produces a non-`undefined`
* result.
*
* <ol>
* <li>The result of {@link #getLiteralName}</li>
* <li>The result of {@link #getSymbolicName}</li>
* <li>The result of {@link Integer#toString}</li>
* </ol>
* 1. The result of {@link #getLiteralName}
* 1. The result of {@link #getSymbolicName}
* 1. The result of {@link Integer#toString}
*
* @param tokenType The token type.
*
* @return The display name of the token type, for use in error reporting or
* @returns The display name of the token type, for use in error reporting or
* other user-visible messages which reference specific token types.

@@ -127,0 +123,0 @@ */

@@ -5,3 +5,3 @@ /*!

*/
import { Vocabulary } from './Vocabulary';
import { Vocabulary } from "./Vocabulary";
/**

@@ -17,6 +17,5 @@ * This class provides a default implementation of the {@link Vocabulary}

*
* <p>
* No literal or symbol names are assigned to token types, so
* {@link #getDisplayName(int)} returns the numeric value for all tokens
* except {@link Token#EOF}.</p>
* except {@link Token#EOF}.
*/

@@ -37,3 +36,3 @@ static readonly EMPTY_VOCABULARY: VocabularyImpl;

* @param displayNames The display names assigned to tokens, or an empty array
* to use the values in {@code literalNames} and {@code symbolicNames} as
* to use the values in `literalNames` and `symbolicNames` as
* the source of display names, as described in

@@ -46,3 +45,3 @@ * {@link #getDisplayName(int)}.

*/
constructor(literalNames: (string | undefined)[], symbolicNames: (string | undefined)[], displayNames: (string | undefined)[]);
constructor(literalNames: Array<string | undefined>, symbolicNames: Array<string | undefined>, displayNames: Array<string | undefined>);
readonly maxTokenType: number;

@@ -49,0 +48,0 @@ getLiteralName(tokenType: number): string | undefined;

@@ -5,3 +5,3 @@ /*!

*/
import { Token } from './Token';
import { Token } from "./Token";
export interface WritableToken extends Token {

@@ -8,0 +8,0 @@ text: string | undefined;

{
"name": "antlr4ts",
"version": "0.5.0-alpha.8565fd2b",
"description": "ANTLR 4 runtime for Typescript",
"main": "dist/antlr4ts.js",
"types": "dist/antlr4ts.d.ts",
"files": [
"dist",
"src"
],
"version": "0.5.0-dev",
"description": "ANTLR 4 runtime for JavaScript written in Typescript",
"main": "dist/index.js",
"types": "dist/index.d.ts",
"scripts": {
"prepare": "npm run antlr && tsc -p .",
"antlr": "cd ../../src/tree/xpath && antlr4ts XPathLexer.g4 -DbaseImportPath=../.."
},
"repository": {

@@ -15,6 +15,2 @@ "type": "git",

},
"scripts": {
"build": "npm run generate && webpack",
"generate": "cd tree/xpath && antlr4ts XPathLexer.g4"
},
"keywords": [

@@ -31,4 +27,8 @@ "ANTLR4",

"devDependencies": {
"antlr4ts-cli": "^0.4.0-alpha.4"
"antlr4ts-cli": "^0.5.0-dev",
"typescript": "^2.8.3"
},
"dependencies": {
"source-map-support": "^0.5.16"
}
}
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc