Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@microsoft/tsdoc

Package Overview
Dependencies
Maintainers
2
Versions
49
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@microsoft/tsdoc - npm Package Compare versions

Comparing version 0.3.0 to 0.4.0

lib/__tests__/ParagraphSplitter.test.d.ts

8

CHANGELOG.md
# Change Log - @microsoft/tsdoc
## 0.4.0
Mon, 27 Aug 2018
- Rename `DocCodeSpan.text` to `DocCodeSpan.code` and model the delimiters using particles
- Add support for code fences (`DocCodeFence`)
- `DocSection` content is now grouped into `DocParagraph` nodes; blank lines are used to indicate paragraph boundaries
- Rename `DocComment.deprecated` to `deprecatedBlock`
## 0.3.0

@@ -4,0 +12,0 @@ Fri, 24 Aug 2018

22

lib/nodes/DocCodeSpan.d.ts
import { DocNodeKind, IDocNodeParameters, DocNode } from './DocNode';
import { Excerpt } from '../parser/Excerpt';
/**

@@ -6,3 +7,6 @@ * Constructor parameters for {@link DocCodeSpan}.

export interface IDocCodeSpanParameters extends IDocNodeParameters {
text: string;
openingDelimiterExcerpt?: Excerpt;
codeExcerpt?: Excerpt;
code: string;
closingDelimiterExcerpt?: Excerpt;
}

@@ -16,7 +20,6 @@ /**

readonly kind: DocNodeKind;
private readonly _openingDelimiterParticle;
private readonly _codeParticle;
private readonly _closingDelimiterParticle;
/**
* The text that should be rendered as code.
*/
readonly text: string;
/**
* Don't call this directly. Instead use {@link TSDocParser}

@@ -26,2 +29,11 @@ * @internal

constructor(parameters: IDocCodeSpanParameters);
/**
* The text that should be rendered as code, excluding the backtick delimiters.
*/
readonly code: string;
/**
* {@inheritdoc}
* @override
*/
getChildNodes(): ReadonlyArray<DocNode>;
}

@@ -17,2 +17,3 @@ "use strict";

var DocNode_1 = require("./DocNode");
var DocParticle_1 = require("./DocParticle");
/**

@@ -32,5 +33,37 @@ * Represents CommonMark-style code span, i.e. code surrounded by

_this.kind = "CodeSpan" /* CodeSpan */;
_this.text = parameters.text;
_this._openingDelimiterParticle = new DocParticle_1.DocParticle({
excerpt: parameters.openingDelimiterExcerpt,
content: '`'
});
_this._codeParticle = new DocParticle_1.DocParticle({
excerpt: parameters.codeExcerpt,
content: parameters.code
});
_this._closingDelimiterParticle = new DocParticle_1.DocParticle({
excerpt: parameters.closingDelimiterExcerpt,
content: '`'
});
return _this;
}
Object.defineProperty(DocCodeSpan.prototype, "code", {
/**
* The text that should be rendered as code, excluding the backtick delimiters.
*/
get: function () {
return this._codeParticle.content;
},
enumerable: true,
configurable: true
});
/**
* {@inheritdoc}
* @override
*/
DocCodeSpan.prototype.getChildNodes = function () {
return [
this._openingDelimiterParticle,
this._codeParticle,
this._closingDelimiterParticle
];
};
return DocCodeSpan;

@@ -37,0 +70,0 @@ }(DocNode_1.DocNode));

@@ -59,3 +59,3 @@ import { DocNode, DocNodeKind, IDocNodeParameters } from './DocNode';

*/
deprecated: DocBlock | undefined;
deprecatedBlock: DocBlock | undefined;
/**

@@ -62,0 +62,0 @@ * The collection of parsed `@param` blocks for this doc comment.

@@ -36,2 +36,3 @@ "use strict";

_this.privateRemarks = undefined;
_this.deprecatedBlock = undefined;
_this.paramBlocks = [];

@@ -64,7 +65,13 @@ _this.returnsBlock = undefined;

DocComment.prototype.getChildNodes = function () {
var result = [this.summarySection];
var result = [];
result.push(this.summarySection);
if (this.remarksBlock) {
result.push(this.remarksBlock);
}
result.push.apply(result, this._customBlocks);
if (this.privateRemarks) {
result.push(this.privateRemarks);
}
if (this.deprecatedBlock) {
result.push(this.deprecatedBlock);
}
result.push.apply(result, this.paramBlocks);

@@ -74,2 +81,3 @@ if (this.returnsBlock) {

}
result.push.apply(result, this._customBlocks);
result.push.apply(result, this.modifierTagSet.nodes);

@@ -76,0 +84,0 @@ return result;

@@ -8,2 +8,3 @@ import { Excerpt } from '../parser/Excerpt';

BlockTag = "BlockTag",
CodeFence = "CodeFence",
CodeSpan = "CodeSpan",

@@ -10,0 +11,0 @@ Comment = "Comment",

@@ -1,7 +0,7 @@

import { DocNodeKind } from './DocNode';
import { DocSection, IDocSectionParameters } from './DocSection';
import { DocNodeKind, DocNode } from './DocNode';
import { DocNodeContainer, IDocNodeContainerParameters } from './DocNodeContainer';
/**
* Constructor parameters for {@link DocParagraph}.
*/
export interface IDocParagraphParameters extends IDocSectionParameters {
export interface IDocParagraphParameters extends IDocNodeContainerParameters {
}

@@ -13,3 +13,3 @@ /**

*/
export declare class DocParagraph extends DocSection {
export declare class DocParagraph extends DocNodeContainer {
/** {@inheritdoc} */

@@ -22,2 +22,7 @@ readonly kind: DocNodeKind;

constructor(parameters: IDocParagraphParameters);
/**
* {@inheritdoc}
* @override
*/
isAllowedChildNode(docNode: DocNode): boolean;
}

@@ -16,3 +16,3 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
var DocSection_1 = require("./DocSection");
var DocNodeContainer_1 = require("./DocNodeContainer");
/**

@@ -35,5 +35,25 @@ * Represents a paragraph of text, similar to a `<p>` element in HTML.

}
/**
* {@inheritdoc}
* @override
*/
DocParagraph.prototype.isAllowedChildNode = function (docNode) {
// NOTE: DocNodeKind.Paragraph cannot be nested
switch (docNode.kind) {
case "BlockTag" /* BlockTag */:
case "CodeSpan" /* CodeSpan */:
case "ErrorText" /* ErrorText */:
case "EscapedText" /* EscapedText */:
case "HtmlStartTag" /* HtmlStartTag */:
case "HtmlEndTag" /* HtmlEndTag */:
case "InlineTag" /* InlineTag */:
case "PlainText" /* PlainText */:
case "SoftBreak" /* SoftBreak */:
return true;
}
return false;
};
return DocParagraph;
}(DocSection_1.DocSection));
}(DocNodeContainer_1.DocNodeContainer));
exports.DocParagraph = DocParagraph;
//# sourceMappingURL=DocParagraph.js.map

@@ -11,4 +11,9 @@ import { DocNode, DocNodeKind, IDocNodeParameters } from './DocNode';

* to contain no special symbols or meaning.
*
* @remarks
* The text content must not contain newline characters.
* Use DocSoftBreak to represent manual line splitting.
*/
export declare class DocPlainText extends DocNode {
private static readonly _newlineCharacterRegExp;
/** {@inheritdoc} */

@@ -15,0 +20,0 @@ readonly kind: DocNodeKind;

@@ -20,2 +20,6 @@ "use strict";

* to contain no special symbols or meaning.
*
* @remarks
* The text content must not contain newline characters.
* Use DocSoftBreak to represent manual line splitting.
*/

@@ -32,5 +36,12 @@ var DocPlainText = /** @class */ (function (_super) {

_this.kind = "PlainText" /* PlainText */;
if (DocPlainText._newlineCharacterRegExp.test(parameters.text)) {
// Use DocSoftBreak to represent manual line splitting
throw new Error('The DocPlainText content must not contain newline characters');
}
_this.text = parameters.text;
return _this;
}
// TODO: We should also prohibit "\r", but this requires updating LineExtractor
// to interpret a lone "\r" as a newline
DocPlainText._newlineCharacterRegExp = /[\n]/;
return DocPlainText;

@@ -37,0 +48,0 @@ }(DocNode_1.DocNode));

@@ -1,6 +0,7 @@

import { DocNode, DocNodeKind, IDocNodeParameters } from './DocNode';
import { DocNode, DocNodeKind } from './DocNode';
import { DocNodeContainer, IDocNodeContainerParameters } from './DocNodeContainer';
/**
* Constructor parameters for {@link DocSection}.
*/
export interface IDocSectionParameters extends IDocNodeParameters {
export interface IDocSectionParameters extends IDocNodeContainerParameters {
}

@@ -11,6 +12,5 @@ /**

*/
export declare class DocSection extends DocNode {
export declare class DocSection extends DocNodeContainer {
/** {@inheritdoc} */
readonly kind: DocNodeKind;
private readonly _nodes;
/**

@@ -22,24 +22,11 @@ * Don't call this directly. Instead use {@link TSDocParser}

/**
* The child nodes. Note that for subclasses {@link getChildNodes()} may enumerate
* additional nodes that are not part of this collection.
*/
readonly nodes: ReadonlyArray<DocNode>;
/**
* {@inheritdoc}
* @override
*/
getChildNodes(): ReadonlyArray<DocNode>;
/**
* Returns true if the specified `docNode` is allowed to be added as a child node.
* The {@link appendNode()} and {@link appendNodes()} functions use this to validate their
* inputs.
*
* @virtual
*/
isAllowedChildNode(docNode: DocNode): boolean;
/**
* Append a node to the collection.
* If the last item in DocSection.nodes is not a DocParagraph, a new paragraph
* is started. Either way, the provided docNode will be appended to the paragraph.
*/
appendNode(docNode: DocNode): void;
appendNodes(docNodes: ReadonlyArray<DocNode>): void;
appendNodeInParagraph(docNode: DocNode): void;
}

@@ -16,3 +16,4 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
var DocNode_1 = require("./DocNode");
var DocParagraph_1 = require("./DocParagraph");
var DocNodeContainer_1 = require("./DocNodeContainer");
/**

@@ -32,16 +33,4 @@ * Represents a general block of rich text. DocSection is the base class for DocNode classes that

_this.kind = "Section" /* Section */;
_this._nodes = [];
return _this;
}
Object.defineProperty(DocSection.prototype, "nodes", {
/**
* The child nodes. Note that for subclasses {@link getChildNodes()} may enumerate
* additional nodes that are not part of this collection.
*/
get: function () {
return this._nodes;
},
enumerable: true,
configurable: true
});
/**

@@ -51,24 +40,7 @@ * {@inheritdoc}

*/
DocSection.prototype.getChildNodes = function () {
return this._nodes;
};
/**
* Returns true if the specified `docNode` is allowed to be added as a child node.
* The {@link appendNode()} and {@link appendNodes()} functions use this to validate their
* inputs.
*
* @virtual
*/
DocSection.prototype.isAllowedChildNode = function (docNode) {
switch (docNode.kind) {
case "BlockTag" /* BlockTag */:
case "CodeSpan" /* CodeSpan */:
case "CodeFence" /* CodeFence */:
case "ErrorText" /* ErrorText */:
case "EscapedText" /* EscapedText */:
case "HtmlStartTag" /* HtmlStartTag */:
case "HtmlEndTag" /* HtmlEndTag */:
case "InlineTag" /* InlineTag */:
case "Paragraph" /* Paragraph */:
case "PlainText" /* PlainText */:
case "SoftBreak" /* SoftBreak */:
return true;

@@ -79,19 +51,22 @@ }

/**
* Append a node to the collection.
* If the last item in DocSection.nodes is not a DocParagraph, a new paragraph
* is started. Either way, the provided docNode will be appended to the paragraph.
*/
DocSection.prototype.appendNode = function (docNode) {
if (!this.isAllowedChildNode(docNode)) {
throw new Error("A DocSection cannot contain nodes of type " + docNode.kind);
DocSection.prototype.appendNodeInParagraph = function (docNode) {
var paragraphNode = undefined;
if (this.nodes.length > 0) {
var lastNode = this.nodes[this.nodes.length - 1];
if (lastNode.kind === "Paragraph" /* Paragraph */) {
paragraphNode = lastNode;
}
}
this._nodes.push(docNode);
};
DocSection.prototype.appendNodes = function (docNodes) {
for (var _i = 0, docNodes_1 = docNodes; _i < docNodes_1.length; _i++) {
var docNode = docNodes_1[_i];
this.appendNode(docNode);
if (!paragraphNode) {
paragraphNode = new DocParagraph_1.DocParagraph({});
this.appendNode(paragraphNode);
}
paragraphNode.appendNode(docNode);
};
return DocSection;
}(DocNode_1.DocNode));
}(DocNodeContainer_1.DocNodeContainer));
exports.DocSection = DocSection;
//# sourceMappingURL=DocSection.js.map
export * from './DocBlock';
export * from './DocBlockTag';
export * from './DocCodeFence';
export * from './DocCodeSpan';

@@ -12,2 +13,3 @@ export * from './DocComment';

export * from './DocNode';
export * from './DocNodeContainer';
export * from './DocParagraph';

@@ -14,0 +16,0 @@ export * from './DocParamBlock';

@@ -8,2 +8,3 @@ "use strict";

__export(require("./DocBlockTag"));
__export(require("./DocCodeFence"));
__export(require("./DocCodeSpan"));

@@ -18,2 +19,3 @@ __export(require("./DocComment"));

__export(require("./DocNode"));
__export(require("./DocNodeContainer"));
__export(require("./DocParagraph"));

@@ -20,0 +22,0 @@ __export(require("./DocParamBlock"));

@@ -6,3 +6,3 @@ import { ParserContext } from './ParserContext';

export declare class LineExtractor {
private static readonly _whitespaceRegExp;
private static readonly _whitespaceCharacterRegExp;
/**

@@ -9,0 +9,0 @@ * This step parses an entire code comment from slash-star-star until star-slash,

@@ -65,3 +65,3 @@ "use strict";

}
else if (!LineExtractor._whitespaceRegExp.test(current)) {
else if (!LineExtractor._whitespaceCharacterRegExp.test(current)) {
parserContext.log.addMessageForTextRange('Expecting a leading "/**"', range.getNewRange(currentIndex, currentIndex + 1));

@@ -107,3 +107,3 @@ return false;

}
else if (!LineExtractor._whitespaceRegExp.test(current)) {
else if (!LineExtractor._whitespaceCharacterRegExp.test(current)) {
collectingLineEnd = nextIndex;

@@ -136,3 +136,3 @@ }

}
else if (!LineExtractor._whitespaceRegExp.test(current)) {
else if (!LineExtractor._whitespaceCharacterRegExp.test(current)) {
// If the star is missing, then start the line here

@@ -154,3 +154,3 @@ // Example: "/**\nL1*/"

};
LineExtractor._whitespaceRegExp = /^\s$/;
LineExtractor._whitespaceCharacterRegExp = /^\s$/;
return LineExtractor;

@@ -157,0 +157,0 @@ }());

@@ -17,3 +17,4 @@ import { ParserContext } from './ParserContext';

private _parseParamBlock;
private _pushDocNode;
private _pushParagraphNode;
private _pushSectionNode;
private _parseBackslashEscape;

@@ -30,2 +31,3 @@ private _parseBlockTag;

private _parseHtmlName;
private _parseCodeFence;
private _parseCodeSpan;

@@ -32,0 +34,0 @@ private _readSpacingAndNewlines;

@@ -36,3 +36,3 @@ "use strict";

this._tokenReader.readToken();
this._pushDocNode(new nodes_1.DocSoftBreak({
this._pushParagraphNode(new nodes_1.DocSoftBreak({
excerpt: new Excerpt_1.Excerpt({ content: this._tokenReader.extractAccumulatedSequence() })

@@ -43,3 +43,3 @@ }));

this._pushAccumulatedPlainText();
this._pushDocNode(this._parseBackslashEscape());
this._pushParagraphNode(this._parseBackslashEscape());
break;

@@ -52,7 +52,7 @@ case Token_1.TokenKind.AtSign:

this._pushAccumulatedPlainText();
this._pushDocNode(this._parseInlineTag());
this._pushParagraphNode(this._parseInlineTag());
break;
case Token_1.TokenKind.RightCurlyBracket:
this._pushAccumulatedPlainText();
this._pushDocNode(this._createError('The "}" character should be escaped using a backslash to avoid confusion with a TSDoc inline tag'));
this._pushParagraphNode(this._createError('The "}" character should be escaped using a backslash to avoid confusion with a TSDoc inline tag'));
break;

@@ -63,6 +63,6 @@ case Token_1.TokenKind.LessThan:

if (this._tokenReader.peekTokenAfterKind() === Token_1.TokenKind.Slash) {
this._pushDocNode(this._parseHtmlEndTag());
this._pushParagraphNode(this._parseHtmlEndTag());
}
else {
this._pushDocNode(this._parseHtmlStartTag());
this._pushParagraphNode(this._parseHtmlStartTag());
}

@@ -72,7 +72,13 @@ break;

this._pushAccumulatedPlainText();
this._pushDocNode(this._createError('The ">" character should be escaped using a backslash to avoid confusion with an HTML tag'));
this._pushParagraphNode(this._createError('The ">" character should be escaped using a backslash to avoid confusion with an HTML tag'));
break;
case Token_1.TokenKind.Backtick:
this._pushAccumulatedPlainText();
this._pushDocNode(this._parseCodeSpan());
if (this._tokenReader.peekTokenAfterKind() === Token_1.TokenKind.Backtick
&& this._tokenReader.peekTokenAfterAfterKind() === Token_1.TokenKind.Backtick) {
this._pushSectionNode(this._parseCodeFence());
}
else {
this._pushParagraphNode(this._parseCodeSpan());
}
break;

@@ -90,3 +96,3 @@ default:

var plainTextSequence = this._tokenReader.extractAccumulatedSequence();
this._pushDocNode(new nodes_1.DocPlainText({
this._pushParagraphNode(new nodes_1.DocPlainText({
text: plainTextSequence.toString(),

@@ -103,3 +109,3 @@ excerpt: new Excerpt_1.Excerpt({ content: plainTextSequence })

if (parsedBlockTag.kind !== "BlockTag" /* BlockTag */) {
this._pushDocNode(parsedBlockTag);
this._pushParagraphNode(parsedBlockTag);
return;

@@ -138,3 +144,3 @@ }

}
this._pushDocNode(docBlockTag);
this._pushParagraphNode(docBlockTag);
};

@@ -151,3 +157,3 @@ NodeParser.prototype._addBlockToDocComment = function (block) {

case StandardTags_1.StandardTags.deprecated.tagNameWithUpperCase:
docComment.deprecated = block;
docComment.deprecatedBlock = block;
break;

@@ -222,3 +228,7 @@ case StandardTags_1.StandardTags.returns.tagNameWithUpperCase:

};
NodeParser.prototype._pushDocNode = function (docNode) {
NodeParser.prototype._pushParagraphNode = function (docNode) {
this._currentSection.appendNodeInParagraph(docNode);
this._verbatimNodes.push(docNode);
};
NodeParser.prototype._pushSectionNode = function (docNode) {
this._currentSection.appendNode(docNode);

@@ -595,2 +605,150 @@ this._verbatimNodes.push(docNode);

};
NodeParser.prototype._parseCodeFence = function () {
this._tokenReader.assertAccumulatedSequenceIsEmpty();
var startMarker = this._tokenReader.createMarker();
var endOfOpeningDelimiterMarker = startMarker + 2;
switch (this._tokenReader.peekPreviousTokenKind()) {
case Token_1.TokenKind.Newline:
case Token_1.TokenKind.None:
break;
default:
return this._backtrackAndCreateErrorRange(startMarker,
// include the three backticks so they don't get reinterpreted as a code span
endOfOpeningDelimiterMarker, 'The opening backtick for a code fence must appear at the start of the line');
}
// Read the opening ``` delimiter
var openingDelimiter = '';
openingDelimiter += this._tokenReader.readToken();
openingDelimiter += this._tokenReader.readToken();
openingDelimiter += this._tokenReader.readToken();
if (openingDelimiter !== '```') {
// This would be a parser bug -- the caller of _parseCodeFence() should have verified this while
// looking ahead to distinguish code spans/fences
throw new Error('Expecting three backticks');
}
var openingDelimiterSequence = this._tokenReader.extractAccumulatedSequence();
// Read any spaces after the delimiter,
// but NOT the Newline since that goes with the language particle
while (this._tokenReader.peekTokenKind() === Token_1.TokenKind.Spacing) {
this._tokenReader.readToken();
}
var openingDelimiterExcerpt = new Excerpt_1.Excerpt({
content: openingDelimiterSequence,
spacingAfterContent: this._tokenReader.tryExtractAccumulatedSequence()
});
// Read the language specifier (if present) and newline
var done = false;
var startOfPaddingMarker = undefined;
while (!done) {
switch (this._tokenReader.peekTokenKind()) {
case Token_1.TokenKind.Spacing:
case Token_1.TokenKind.Newline:
if (startOfPaddingMarker === undefined) {
// Starting a new run of spacing characters
startOfPaddingMarker = this._tokenReader.createMarker();
}
if (this._tokenReader.peekTokenKind() === Token_1.TokenKind.Newline) {
done = true;
}
this._tokenReader.readToken();
break;
case Token_1.TokenKind.Backtick:
var failure = this._createFailureForToken('The language specifier cannot contain backtick characters');
return this._backtrackAndCreateErrorRangeForFailure(startMarker, endOfOpeningDelimiterMarker, 'Error parsing code fence: ', failure);
case Token_1.TokenKind.EndOfInput:
var failure2 = this._createFailureForToken('Missing closing delimiter');
return this._backtrackAndCreateErrorRangeForFailure(startMarker, endOfOpeningDelimiterMarker, 'Error parsing code fence: ', failure2);
default:
// more non-spacing content
startOfPaddingMarker = undefined;
this._tokenReader.readToken();
break;
}
}
// At this point, we must have accumulated at least a newline token.
// Example: "pov-ray sdl \n"
var languageSequence = this._tokenReader.extractAccumulatedSequence();
var languageExcerpt = new Excerpt_1.Excerpt({
// Example: "pov-ray sdl"
content: languageSequence.getNewSequence(languageSequence.startIndex, startOfPaddingMarker),
// Example: " \n"
spacingAfterContent: languageSequence.getNewSequence(startOfPaddingMarker, languageSequence.endIndex)
});
// Read the code content until we see the closing ``` delimiter
var codeEndMarker = -1;
done = false;
var tokenBeforeDelimiter;
while (!done) {
switch (this._tokenReader.peekTokenKind()) {
case Token_1.TokenKind.EndOfInput:
var failure2 = this._createFailureForToken('Missing closing delimiter');
return this._backtrackAndCreateErrorRangeForFailure(startMarker, endOfOpeningDelimiterMarker, 'Error parsing code fence: ', failure2);
case Token_1.TokenKind.Newline:
tokenBeforeDelimiter = this._tokenReader.readToken();
codeEndMarker = this._tokenReader.createMarker();
while (this._tokenReader.peekTokenKind() === Token_1.TokenKind.Spacing) {
tokenBeforeDelimiter = this._tokenReader.readToken();
}
if (this._tokenReader.peekTokenKind() !== Token_1.TokenKind.Backtick) {
break;
}
this._tokenReader.readToken(); // first backtick
if (this._tokenReader.peekTokenKind() !== Token_1.TokenKind.Backtick) {
break;
}
this._tokenReader.readToken(); // second backtick
if (this._tokenReader.peekTokenKind() !== Token_1.TokenKind.Backtick) {
break;
}
this._tokenReader.readToken(); // third backtick
done = true;
break;
default:
this._tokenReader.readToken();
break;
}
}
if (tokenBeforeDelimiter.kind !== Token_1.TokenKind.Newline) {
this._parserContext.log.addMessageForTextRange('The closing delimiter for a code fence must not be indented', tokenBeforeDelimiter.range);
}
// Example: "code 1\ncode 2\n ```"
var codeAndDelimiterSequence = this._tokenReader.extractAccumulatedSequence();
var codeExcerpt = new Excerpt_1.Excerpt({
content: codeAndDelimiterSequence.getNewSequence(codeAndDelimiterSequence.startIndex, codeEndMarker)
});
// Read the spacing and newline after the closing delimiter
done = false;
while (!done) {
switch (this._tokenReader.peekTokenKind()) {
case Token_1.TokenKind.Spacing:
this._tokenReader.readToken();
break;
case Token_1.TokenKind.Newline:
done = true;
this._tokenReader.readToken();
break;
case Token_1.TokenKind.EndOfInput:
done = true;
break;
default:
this._parserContext.log.addMessageForTextRange('Unexpected characters after closing delimiter for code fence', this._tokenReader.peekToken().range);
done = true;
break;
}
}
var closingDelimiterExcerpt = new Excerpt_1.Excerpt({
// Example: "```"
content: codeAndDelimiterSequence.getNewSequence(codeEndMarker, codeAndDelimiterSequence.endIndex),
// Example: " \n"
spacingAfterContent: this._tokenReader.tryExtractAccumulatedSequence()
});
return new nodes_1.DocCodeFence({
openingDelimiterExcerpt: openingDelimiterExcerpt,
languageExcerpt: languageExcerpt,
language: languageExcerpt.content.toString(),
codeExcerpt: codeExcerpt,
code: codeExcerpt.content.toString(),
closingDelimiterExcerpt: closingDelimiterExcerpt
});
};
NodeParser.prototype._parseCodeSpan = function () {

@@ -612,4 +770,8 @@ this._tokenReader.assertAccumulatedSequenceIsEmpty();

this._tokenReader.readToken(); // read the backtick
var text = '';
var openingDelimiterExcerpt = new Excerpt_1.Excerpt({
content: this._tokenReader.extractAccumulatedSequence()
});
var closingBacktickMarker;
var codeExcerpt;
var closingDelimiterExcerpt;
// Parse the content backtick

@@ -620,4 +782,10 @@ while (true) {

if (peekedTokenKind === Token_1.TokenKind.Backtick) {
codeExcerpt = new Excerpt_1.Excerpt({
content: this._tokenReader.extractAccumulatedSequence()
});
closingBacktickMarker = this._tokenReader.createMarker();
this._tokenReader.readToken();
closingDelimiterExcerpt = new Excerpt_1.Excerpt({
content: this._tokenReader.extractAccumulatedSequence()
});
break;

@@ -628,3 +796,3 @@ }

}
text += this._tokenReader.readToken().toString();
this._tokenReader.readToken();
}

@@ -642,4 +810,6 @@ // Make sure there's whitespace after

return new nodes_1.DocCodeSpan({
excerpt: new Excerpt_1.Excerpt({ content: this._tokenReader.extractAccumulatedSequence() }),
text: text
openingDelimiterExcerpt: openingDelimiterExcerpt,
codeExcerpt: codeExcerpt,
code: codeExcerpt.content.toString(),
closingDelimiterExcerpt: closingDelimiterExcerpt
});

@@ -646,0 +816,0 @@ };

@@ -43,3 +43,3 @@ import { Token, TokenKind } from './Token';

/**
* Show the next token that will be returned by _readToken(), without
* Returns the next token that would be returned by _readToken(), without
* consuming anything.

@@ -49,3 +49,3 @@ */

/**
* Show the TokenKind for the next token that will be returned by _readToken(), without
* Returns the TokenKind for the next token that would be returned by _readToken(), without
* consuming anything.

@@ -55,7 +55,10 @@ */

/**
* Show the TokenKind for the token after the next token that will be returned by _readToken(),
* without consuming anything. In other words, look ahead two tokens.
* Like peekTokenKind(), but looks ahead two tokens.
*/
peekTokenAfterKind(): TokenKind;
/**
* Like peekTokenKind(), but looks ahead threee tokens.
*/
peekTokenAfterAfterKind(): TokenKind;
/**
* Extract the next token from the input stream and return it.

@@ -62,0 +65,0 @@ * The token will also be appended to the accumulated sequence, which can

@@ -77,3 +77,3 @@ "use strict";

/**
* Show the next token that will be returned by _readToken(), without
* Returns the next token that would be returned by _readToken(), without
* consuming anything.

@@ -85,3 +85,3 @@ */

/**
* Show the TokenKind for the next token that will be returned by _readToken(), without
* Returns the TokenKind for the next token that would be returned by _readToken(), without
* consuming anything.

@@ -93,4 +93,3 @@ */

/**
* Show the TokenKind for the token after the next token that will be returned by _readToken(),
* without consuming anything. In other words, look ahead two tokens.
* Like peekTokenKind(), but looks ahead two tokens.
*/

@@ -104,2 +103,11 @@ TokenReader.prototype.peekTokenAfterKind = function () {

/**
* Like peekTokenKind(), but looks ahead threee tokens.
*/
TokenReader.prototype.peekTokenAfterAfterKind = function () {
if (this._currentIndex + 2 >= this.tokens.length) {
return Token_1.TokenKind.None;
}
return this.tokens[this._currentIndex + 2].kind;
};
/**
* Extract the next token from the input stream and return it.

@@ -106,0 +114,0 @@ * The token will also be appended to the accumulated sequence, which can

@@ -38,2 +38,7 @@ import { ParserContext } from './ParserContext';

/**
* Constructs a TokenSequence that corresponds to a different range of tokens,
* e.g. a subrange.
*/
getNewSequence(startIndex: number, endIndex: number): TokenSequence;
/**
* Returns a TextRange that includes all tokens in the sequence (including any additional

@@ -40,0 +45,0 @@ * characters between doc comment lines).

@@ -49,2 +49,13 @@ "use strict";

/**
* Constructs a TokenSequence that corresponds to a different range of tokens,
* e.g. a subrange.
*/
TokenSequence.prototype.getNewSequence = function (startIndex, endIndex) {
return new TokenSequence({
parserContext: this.parserContext,
startIndex: startIndex,
endIndex: endIndex
});
};
/**
* Returns a TextRange that includes all tokens in the sequence (including any additional

@@ -51,0 +62,0 @@ * characters between doc comment lines).

@@ -9,2 +9,3 @@ "use strict";

var TSDocParserConfiguration_1 = require("./TSDocParserConfiguration");
var ParagraphSplitter_1 = require("./ParagraphSplitter");
/**

@@ -31,2 +32,3 @@ * The main API for parsing TSDoc comments.

nodeParser.parse();
ParagraphSplitter_1.ParagraphSplitter.splitParagraphs(parserContext.docComment);
}

@@ -33,0 +35,0 @@ return parserContext;

{
"name": "@microsoft/tsdoc",
"version": "0.3.0",
"version": "0.4.0",
"description": "A parser for the TypeScript doc comment syntax",

@@ -5,0 +5,0 @@ "keywords": [

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc