Socket
Socket
Sign inDemoInstall

micromark-core-commonmark

Package Overview
Dependencies
22
Maintainers
1
Versions
14
Alerts
File Explorer

Advanced tools

Install Socket

Detect and block malicious and high-risk dependencies

Install

Comparing version 1.0.6 to 1.1.0

9

dev/lib/attention.d.ts
/** @type {Construct} */
export const attention: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Event = import('micromark-util-types').Event
export type Point = import('micromark-util-types').Point
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type Event = import('micromark-util-types').Event
export type Code = import('micromark-util-types').Code
export type Point = import('micromark-util-types').Point
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
*/
import {ok as assert} from 'uvu/assert'
import {push, splice} from 'micromark-util-chunked'

@@ -19,2 +19,3 @@ import {classifyCharacter} from 'micromark-util-classify-character'

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -47,3 +48,3 @@ /** @type {Construct} */

let use
/** @type {Event[]} */
/** @type {Array<Event>} */
let nextEvents

@@ -149,2 +150,8 @@ /** @type {number} */

// Always populated by defaults.
assert(
context.parser.constructs.insideSpan.null,
'expected `insideSpan` to be populated'
)
// Between.

@@ -200,3 +207,6 @@ nextEvents = push(

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeAttention(effects, ok) {

@@ -212,3 +222,12 @@ const attentionMarkers = this.parser.constructs.attentionMarkers.null

/** @type {State} */
/**
* Before a sequence.
*
* ```markdown
* > | **
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -219,17 +238,31 @@ assert(

)
marker = code
effects.enter('attentionSequence')
marker = code
return sequence(code)
return inside(code)
}
/** @type {State} */
function sequence(code) {
/**
* In a sequence.
*
* ```markdown
* > | **
* ^^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === marker) {
effects.consume(code)
return sequence
return inside
}
const token = effects.exit('attentionSequence')
// To do: next major: move this to resolver, just like `markdown-rs`.
const after = classifyCharacter(code)
// Always populated by defaults.
assert(attentionMarkers, 'expected `attentionMarkers` to be populated')
const open =

@@ -236,0 +269,0 @@ !after ||

/** @type {Construct} */
export const autolink: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {

@@ -17,2 +17,3 @@ asciiAlpha,

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -22,9 +23,23 @@ /** @type {Construct} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeAutolink(effects, ok, nok) {
let size = 1
let size = 0
return start
/** @type {State} */
/**
* Start of an autolink.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -40,3 +55,14 @@ assert(code === codes.lessThan, 'expected `<`')

/** @type {State} */
/**
* After `<`, at protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function open(code) {

@@ -48,22 +74,53 @@ if (asciiAlpha(code)) {

return asciiAtext(code) ? emailAtext(code) : nok(code)
return emailAtext(code)
}
/** @type {State} */
/**
* At second byte of protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function schemeOrEmailAtext(code) {
return code === codes.plusSign ||
// ASCII alphanumeric and `+`, `-`, and `.`.
if (
code === codes.plusSign ||
code === codes.dash ||
code === codes.dot ||
asciiAlphanumeric(code)
? schemeInsideOrEmailAtext(code)
: emailAtext(code)
) {
// Count the previous alphabetical from `open` too.
size = 1
return schemeInsideOrEmailAtext(code)
}
return emailAtext(code)
}
/** @type {State} */
/**
* In ambiguous protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function schemeInsideOrEmailAtext(code) {
if (code === codes.colon) {
effects.consume(code)
size = 0
return urlInside
}
// ASCII alphanumeric and `+`, `-`, and `.`.
if (

@@ -80,12 +137,27 @@ (code === codes.plusSign ||

size = 0
return emailAtext(code)
}
/** @type {State} */
/**
* After protocol, in URL.
*
* ```markdown
* > | a<https://example.com>b
* ^
* ```
*
* @type {State}
*/
function urlInside(code) {
if (code === codes.greaterThan) {
effects.exit(types.autolinkProtocol)
return end(code)
effects.enter(types.autolinkMarker)
effects.consume(code)
effects.exit(types.autolinkMarker)
effects.exit(types.autolink)
return ok
}
// ASCII control, space, or `<`.
if (

@@ -104,7 +176,15 @@ code === codes.eof ||

/** @type {State} */
/**
* In email atext.
*
* ```markdown
* > | a<user.name@example.com>b
* ^
* ```
*
* @type {State}
*/
function emailAtext(code) {
if (code === codes.atSign) {
effects.consume(code)
size = 0
return emailAtSignOrDot

@@ -121,3 +201,12 @@ }

/** @type {State} */
/**
* In label, after at-sign or dot.
*
* ```markdown
* > | a<user.name@example.com>b
* ^ ^
* ```
*
* @type {State}
*/
function emailAtSignOrDot(code) {

@@ -127,3 +216,12 @@ return asciiAlphanumeric(code) ? emailLabel(code) : nok(code)

/** @type {State} */
/**
* In label, where `.` and `>` are allowed.
*
* ```markdown
* > | a<user.name@example.com>b
* ^
* ```
*
* @type {State}
*/
function emailLabel(code) {

@@ -137,5 +235,9 @@ if (code === codes.dot) {

if (code === codes.greaterThan) {
// Exit, then change the type.
// Exit, then change the token type.
effects.exit(types.autolinkProtocol).type = types.autolinkEmail
return end(code)
effects.enter(types.autolinkMarker)
effects.consume(code)
effects.exit(types.autolinkMarker)
effects.exit(types.autolink)
return ok
}

@@ -146,4 +248,16 @@

/** @type {State} */
/**
* In label, where `.` and `>` are *not* allowed.
*
* Though, this is also used in `emailLabel` to parse other values.
*
* ```markdown
* > | a<user.name@ex-ample.com>b
* ^
* ```
*
* @type {State}
*/
function emailValue(code) {
// ASCII alphanumeric or `-`.
if (

@@ -153,4 +267,5 @@ (code === codes.dash || asciiAlphanumeric(code)) &&

) {
const next = code === codes.dash ? emailValue : emailLabel
effects.consume(code)
return code === codes.dash ? emailValue : emailLabel
return next
}

@@ -160,12 +275,2 @@

}
/** @type {State} */
function end(code) {
assert(code === codes.greaterThan, 'expected `>`')
effects.enter(types.autolinkMarker)
effects.consume(code)
effects.exit(types.autolinkMarker)
effects.exit(types.autolink)
return ok
}
}
/** @type {Construct} */
export const blankLine: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'

@@ -15,10 +16,46 @@ import {types} from 'micromark-util-symbol/types.js'

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeBlankLine(effects, ok, nok) {
return factorySpace(effects, afterWhitespace, types.linePrefix)
return start
/** @type {State} */
function afterWhitespace(code) {
/**
* Start of blank line.
*
* > 👉 **Note**: `␠` represents a space character.
*
* ```markdown
* > | ␠␠␊
* ^
* > | ␊
* ^
* ```
*
* @type {State}
*/
function start(code) {
return markdownSpace(code)
? factorySpace(effects, after, types.linePrefix)(code)
: after(code)
}
/**
* At eof/eol, after optional whitespace.
*
* > 👉 **Note**: `␠` represents a space character.
*
* ```markdown
* > | ␠␠␊
* ^
* > | ␊
* ^
* ```
*
* @type {State}
*/
function after(code) {
return code === codes.eof || markdownLineEnding(code) ? ok(code) : nok(code)
}
}
/** @type {Construct} */
export const blockQuote: Construct
export type Construct = import('micromark-util-types').Construct
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Exiter = import('micromark-util-types').Exiter
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Exiter} Exiter
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'

@@ -14,2 +14,3 @@ import {markdownSpace} from 'micromark-util-character'

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -24,3 +25,6 @@ /** @type {Construct} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeBlockQuoteStart(effects, ok, nok) {

@@ -31,3 +35,12 @@ const self = this

/** @type {State} */
/**
* Start of block quote.
*
* ```markdown
* > | > a
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -54,3 +67,12 @@ if (code === codes.greaterThan) {

/** @type {State} */
/**
* After `>`, before optional whitespace.
*
* ```markdown
* > | > a
* ^
* ```
*
* @type {State}
*/
function after(code) {

@@ -70,12 +92,69 @@ if (markdownSpace(code)) {

/** @type {Tokenizer} */
/**
* Start of block quote continuation.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeBlockQuoteContinuation(effects, ok, nok) {
return factorySpace(
effects,
effects.attempt(blockQuote, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)
const self = this
return contStart
/**
* Start of block quote continuation.
*
* Also used to parse the first block quote opening.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @type {State}
*/
function contStart(code) {
if (markdownSpace(code)) {
// Always populated by defaults.
assert(
self.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
return factorySpace(
effects,
contBefore,
types.linePrefix,
self.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)(code)
}
return contBefore(code)
}
/**
* At `>`, after optional whitespace.
*
* Also used to parse the first block quote opening.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @type {State}
*/
function contBefore(code) {
return effects.attempt(blockQuote, ok, nok)(code)
}
}

@@ -82,0 +161,0 @@

/** @type {Construct} */
export const characterEscape: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {asciiPunctuation} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -18,7 +19,19 @@ /** @type {Construct} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCharacterEscape(effects, ok, nok) {
return start
/** @type {State} */
/**
* Start of character escape.
*
* ```markdown
* > | a\*b
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -30,7 +43,17 @@ assert(code === codes.backslash, 'expected `\\`')

effects.exit(types.escapeMarker)
return open
return inside
}
/** @type {State} */
function open(code) {
/**
* After `\`, at punctuation.
*
* ```markdown
* > | a\*b
* ^
* ```
*
* @type {State}
*/
function inside(code) {
// ASCII punctuation.
if (asciiPunctuation(code)) {

@@ -37,0 +60,0 @@ effects.enter(types.characterEscapeValue)

/** @type {Construct} */
export const characterReference: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {ok as assert} from 'uvu/assert'
import {decodeNamedCharacterReference} from 'decode-named-character-reference'

@@ -19,2 +18,3 @@ import {

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -27,3 +27,6 @@ /** @type {Construct} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCharacterReference(effects, ok, nok) {

@@ -34,3 +37,3 @@ const self = this

let max
/** @type {(code: Code) => code is number} */
/** @type {(code: Code) => boolean} */
let test

@@ -40,3 +43,16 @@

/** @type {State} */
/**
* Start of character reference.
*
* ```markdown
* > | a&amp;b
* ^
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -51,3 +67,17 @@ assert(code === codes.ampersand, 'expected `&`')

/** @type {State} */
/**
* After `&`, at `#` for numeric references or alphanumeric for named
* references.
*
* ```markdown
* > | a&amp;b
* ^
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function open(code) {

@@ -67,3 +97,14 @@ if (code === codes.numberSign) {

/** @type {State} */
/**
* After `#`, at `x` for hexadecimals or digit for decimals.
*
* ```markdown
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function numeric(code) {

@@ -86,9 +127,22 @@ if (code === codes.uppercaseX || code === codes.lowercaseX) {

/** @type {State} */
/**
* After markers (`&#x`, `&#`, or `&`), in value, before `;`.
*
* The character reference kind defines what and how many characters are
* allowed.
*
* ```markdown
* > | a&amp;b
* ^^^
* > | a&#123;b
* ^^^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function value(code) {
/** @type {Token} */
let token
if (code === codes.semicolon && size) {
token = effects.exit(types.characterReferenceValue)
const token = effects.exit(types.characterReferenceValue)

@@ -102,2 +156,4 @@ if (

// To do: `markdown-rs` uses a different name:
// `CharacterReferenceMarkerSemi`.
effects.enter(types.characterReferenceMarker)

@@ -104,0 +160,0 @@ effects.consume(code)

/** @type {Construct} */
export const codeFenced: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'
import {
markdownLineEnding,
markdownLineEndingOrSpace
} from 'micromark-util-character'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
/** @type {Construct} */
const nonLazyContinuation = {
tokenize: tokenizeNonLazyContinuation,
partial: true
}
/** @type {Construct} */
export const codeFenced = {

@@ -25,14 +29,11 @@ name: 'codeFenced',

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCodeFenced(effects, ok, nok) {
const self = this
/** @type {Construct} */
const closingFenceConstruct = {tokenize: tokenizeClosingFence, partial: true}
/** @type {Construct} */
const nonLazyLine = {tokenize: tokenizeNonLazyLine, partial: true}
const tail = this.events[this.events.length - 1]
const initialPrefix =
tail && tail[1].type === types.linePrefix
? tail[2].sliceSerialize(tail[1], true).length
: 0
const closeStart = {tokenize: tokenizeCloseStart, partial: true}
let initialPrefix = 0
let sizeOpen = 0

@@ -44,4 +45,32 @@ /** @type {NonNullable<Code>} */

/** @type {State} */
/**
* Start of code.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse whitespace like `markdown-rs`.
return beforeSequenceOpen(code)
}
/**
* In opening fence, after prefix, at sequence.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function beforeSequenceOpen(code) {
assert(

@@ -51,27 +80,63 @@ code === codes.graveAccent || code === codes.tilde,

)
const tail = self.events[self.events.length - 1]
initialPrefix =
tail && tail[1].type === types.linePrefix
? tail[2].sliceSerialize(tail[1], true).length
: 0
marker = code
effects.enter(types.codeFenced)
effects.enter(types.codeFencedFence)
effects.enter(types.codeFencedFenceSequence)
marker = code
return sequenceOpen(code)
}
/** @type {State} */
/**
* In opening fence sequence.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (code === marker) {
sizeOpen++
effects.consume(code)
sizeOpen++
return sequenceOpen
}
if (sizeOpen < constants.codeFencedSequenceSizeMin) {
return nok(code)
}
effects.exit(types.codeFencedFenceSequence)
return sizeOpen < constants.codeFencedSequenceSizeMin
? nok(code)
: factorySpace(effects, infoOpen, types.whitespace)(code)
return markdownSpace(code)
? factorySpace(effects, infoBefore, types.whitespace)(code)
: infoBefore(code)
}
/** @type {State} */
function infoOpen(code) {
/**
* In opening fence, after the sequence (and optional whitespace), before info.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function infoBefore(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return openAfter(code)
effects.exit(types.codeFencedFence)
return self.interrupt
? ok(code)
: effects.check(nonLazyContinuation, atNonLazyBreak, after)(code)
}

@@ -84,11 +149,31 @@

/** @type {State} */
/**
* In info.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function info(code) {
if (code === codes.eof || markdownLineEndingOrSpace(code)) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.chunkString)
effects.exit(types.codeFencedFenceInfo)
return factorySpace(effects, infoAfter, types.whitespace)(code)
return infoBefore(code)
}
if (code === codes.graveAccent && code === marker) return nok(code)
if (markdownSpace(code)) {
effects.exit(types.chunkString)
effects.exit(types.codeFencedFenceInfo)
return factorySpace(effects, metaBefore, types.whitespace)(code)
}
if (code === codes.graveAccent && code === marker) {
return nok(code)
}
effects.consume(code)

@@ -98,6 +183,17 @@ return info

/** @type {State} */
function infoAfter(code) {
/**
* In opening fence, after info and whitespace, before meta.
*
* ```markdown
* > | ~~~js eval
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function metaBefore(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return openAfter(code)
return infoBefore(code)
}

@@ -110,3 +206,14 @@

/** @type {State} */
/**
* In meta.
*
* ```markdown
* > | ~~~js eval
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function meta(code) {

@@ -116,6 +223,9 @@ if (code === codes.eof || markdownLineEnding(code)) {

effects.exit(types.codeFencedFenceMeta)
return openAfter(code)
return infoBefore(code)
}
if (code === codes.graveAccent && code === marker) return nok(code)
if (code === codes.graveAccent && code === marker) {
return nok(code)
}
effects.consume(code)

@@ -125,49 +235,118 @@ return meta

/** @type {State} */
function openAfter(code) {
effects.exit(types.codeFencedFence)
return self.interrupt ? ok(code) : contentStart(code)
/**
* At eol/eof in code, before a non-lazy closing fence or content.
*
* ```markdown
* > | ~~~js
* ^
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function atNonLazyBreak(code) {
assert(markdownLineEnding(code), 'expected eol')
return effects.attempt(closeStart, after, contentBefore)(code)
}
/** @type {State} */
/**
* Before code content, not a closing fence, at eol.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function contentBefore(code) {
assert(markdownLineEnding(code), 'expected eol')
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return contentStart
}
/**
* Before code content, not a closing fence.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function contentStart(code) {
if (code === codes.eof) {
return after(code)
}
return initialPrefix > 0 && markdownSpace(code)
? factorySpace(
effects,
beforeContentChunk,
types.linePrefix,
initialPrefix + 1
)(code)
: beforeContentChunk(code)
}
if (markdownLineEnding(code)) {
return effects.attempt(
nonLazyLine,
effects.attempt(
closingFenceConstruct,
after,
initialPrefix
? factorySpace(
effects,
contentStart,
types.linePrefix,
initialPrefix + 1
)
: contentStart
),
after
)(code)
/**
* Before code content, after optional prefix.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function beforeContentChunk(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return effects.check(nonLazyContinuation, atNonLazyBreak, after)(code)
}
effects.enter(types.codeFlowValue)
return contentContinue(code)
return contentChunk(code)
}
/** @type {State} */
function contentContinue(code) {
/**
* In code content.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^^^^^^^^
* | ~~~
* ```
*
* @type {State}
*/
function contentChunk(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.codeFlowValue)
return contentStart(code)
return beforeContentChunk(code)
}
effects.consume(code)
return contentContinue
return contentChunk
}
/** @type {State} */
/**
* After code.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function after(code) {

@@ -178,10 +357,17 @@ effects.exit(types.codeFenced)

/** @type {Tokenizer} */
function tokenizeNonLazyLine(effects, ok, nok) {
const self = this
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCloseStart(effects, ok, nok) {
let size = 0
return start
return startBefore
/** @type {State} */
function start(code) {
/**
*
*
* @type {State}
*/
function startBefore(code) {
assert(markdownLineEnding(code), 'expected eol')

@@ -191,46 +377,101 @@ effects.enter(types.lineEnding)

effects.exit(types.lineEnding)
return lineStart
return start
}
/** @type {State} */
function lineStart(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code)
/**
* Before closing fence, at optional whitespace.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function start(code) {
// Always populated by defaults.
assert(
self.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
// To do: `enter` here or in next state?
effects.enter(types.codeFencedFence)
return markdownSpace(code)
? factorySpace(
effects,
beforeSequenceClose,
types.linePrefix,
self.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)(code)
: beforeSequenceClose(code)
}
}
/** @type {Tokenizer} */
function tokenizeClosingFence(effects, ok, nok) {
let size = 0
/**
* In closing fence, after optional whitespace, at sequence.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function beforeSequenceClose(code) {
if (code === marker) {
effects.enter(types.codeFencedFenceSequence)
return sequenceClose(code)
}
return factorySpace(
effects,
closingSequenceStart,
types.linePrefix,
this.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)
/** @type {State} */
function closingSequenceStart(code) {
effects.enter(types.codeFencedFence)
effects.enter(types.codeFencedFenceSequence)
return closingSequence(code)
return nok(code)
}
/** @type {State} */
function closingSequence(code) {
/**
* In closing fence sequence.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function sequenceClose(code) {
if (code === marker) {
size++
effects.consume(code)
size++
return closingSequence
return sequenceClose
}
if (size < sizeOpen) return nok(code)
effects.exit(types.codeFencedFenceSequence)
return factorySpace(effects, closingSequenceEnd, types.whitespace)(code)
if (size >= sizeOpen) {
effects.exit(types.codeFencedFenceSequence)
return markdownSpace(code)
? factorySpace(effects, sequenceCloseAfter, types.whitespace)(code)
: sequenceCloseAfter(code)
}
return nok(code)
}
/** @type {State} */
function closingSequenceEnd(code) {
/**
* After closing fence sequence, after optional whitespace.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function sequenceCloseAfter(code) {
if (code === codes.eof || markdownLineEnding(code)) {

@@ -245,1 +486,37 @@ effects.exit(types.codeFencedFence)

}
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeNonLazyContinuation(effects, ok, nok) {
const self = this
return start
/**
*
*
* @type {State}
*/
function start(code) {
if (code === codes.eof) {
return nok(code)
}
assert(markdownLineEnding(code), 'expected eol')
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return lineStart
}
/**
*
*
* @type {State}
*/
function lineStart(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code)
}
}
/** @type {Construct} */
export const codeIndented: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Resolver = import('micromark-util-types').Resolver
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -22,5 +22,8 @@ /** @type {Construct} */

/** @type {Construct} */
const indentedContent = {tokenize: tokenizeIndentedContent, partial: true}
const furtherStart = {tokenize: tokenizeFurtherStart, partial: true}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCodeIndented(effects, ok, nok) {

@@ -30,8 +33,25 @@ const self = this

/** @type {State} */
/**
* Start of code (indented).
*
* > **Parsing note**: it is not needed to check if this first line is a
* > filled line (that it has a non-whitespace character), because blank lines
* > are parsed already, so we never run into that.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: manually check if interrupting like `markdown-rs`.
assert(markdownSpace(code))
effects.enter(types.codeIndented)
// To do: use an improved `space_or_tab` function like `markdown-rs`,
// so that we can drop the next state.
return factorySpace(
effects,
afterStartPrefix,
afterPrefix,
types.linePrefix,

@@ -42,4 +62,13 @@ constants.tabSize + 1

/** @type {State} */
function afterStartPrefix(code) {
/**
* At start, after 1 or 4 spaces.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function afterPrefix(code) {
const tail = self.events[self.events.length - 1]

@@ -49,8 +78,17 @@ return tail &&

tail[2].sliceSerialize(tail[1], true).length >= constants.tabSize
? afterPrefix(code)
? atBreak(code)
: nok(code)
}
/** @type {State} */
function afterPrefix(code) {
/**
* At a break.
*
* ```markdown
* > | aaa
* ^ ^
* ```
*
* @type {State}
*/
function atBreak(code) {
if (code === codes.eof) {

@@ -61,18 +99,27 @@ return after(code)

if (markdownLineEnding(code)) {
return effects.attempt(indentedContent, afterPrefix, after)(code)
return effects.attempt(furtherStart, atBreak, after)(code)
}
effects.enter(types.codeFlowValue)
return content(code)
return inside(code)
}
/** @type {State} */
function content(code) {
/**
* In code content.
*
* ```markdown
* > | aaa
* ^^^^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.codeFlowValue)
return afterPrefix(code)
return atBreak(code)
}
effects.consume(code)
return content
return inside
}

@@ -83,2 +130,5 @@

effects.exit(types.codeIndented)
// To do: allow interrupting like `markdown-rs`.
// Feel free to interrupt.
// tokenizer.interrupt = false
return ok(code)

@@ -88,10 +138,24 @@ }

/** @type {Tokenizer} */
function tokenizeIndentedContent(effects, ok, nok) {
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeFurtherStart(effects, ok, nok) {
const self = this
return start
return furtherStart
/** @type {State} */
function start(code) {
/**
* At eol, trying to parse another indent.
*
* ```markdown
* > | aaa
* ^
* | bbb
* ```
*
* @type {State}
*/
function furtherStart(code) {
// To do: improve `lazy` / `pierce` handling.
// If this is a lazy line, it can’t be code.

@@ -106,5 +170,11 @@ if (self.parser.lazy[self.now().line]) {

effects.exit(types.lineEnding)
return start
return furtherStart
}
// To do: the code here in `micromark-js` is a bit different from
// `markdown-rs` because there it can attempt spaces.
// We can’t yet.
//
// To do: use an improved `space_or_tab` function like `markdown-rs`,
// so that we can drop the next state.
return factorySpace(

@@ -118,3 +188,12 @@ effects,

/** @type {State} */
/**
* At start, after 1 or 4 spaces.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function afterPrefix(code) {

@@ -127,5 +206,5 @@ const tail = self.events[self.events.length - 1]

: markdownLineEnding(code)
? start(code)
? furtherStart(code)
: nok(code)
}
}
/** @type {Construct} */
export const codeText: Construct
export type Construct = import('micromark-util-types').Construct
export type Previous = import('micromark-util-types').Previous
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Previous = import('micromark-util-types').Previous
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Previous} Previous
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Previous} Previous
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {markdownLineEnding} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -23,2 +24,3 @@ /** @type {Construct} */

// To do: next major: don’t resolve, like `markdown-rs`.
/** @type {Resolver} */

@@ -30,3 +32,3 @@ function resolveCodeText(events) {

let index
/** @type {number|undefined} */
/** @type {number | undefined} */
let enter

@@ -88,3 +90,6 @@

/** @type {Previous} */
/**
* @this {TokenizeContext}
* @type {Previous}
*/
function previous(code) {

@@ -98,3 +103,6 @@ // If there is a previous code, there will always be a tail.

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCodeText(effects, ok, nok) {

@@ -110,3 +118,14 @@ const self = this

/** @type {State} */
/**
* Start of code (text).
*
* ```markdown
* > | `a`
* ^
* > | \`a`
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -117,19 +136,37 @@ assert(code === codes.graveAccent, 'expected `` ` ``')

effects.enter(types.codeTextSequence)
return openingSequence(code)
return sequenceOpen(code)
}
/** @type {State} */
function openingSequence(code) {
/**
* In opening sequence.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (code === codes.graveAccent) {
effects.consume(code)
sizeOpen++
return openingSequence
return sequenceOpen
}
effects.exit(types.codeTextSequence)
return gap(code)
return between(code)
}
/** @type {State} */
function gap(code) {
/**
* Between something and something else.
*
* ```markdown
* > | `a`
* ^^
* ```
*
* @type {State}
*/
function between(code) {
// EOF.

@@ -140,10 +177,4 @@ if (code === codes.eof) {

// Closing fence?
// Could also be data.
if (code === codes.graveAccent) {
token = effects.enter(types.codeTextSequence)
size = 0
return closingSequence(code)
}
// To do: next major: don’t do spaces in resolve, but when compiling,
// like `markdown-rs`.
// Tabs don’t work, and virtual spaces don’t make sense.

@@ -154,5 +185,12 @@ if (code === codes.space) {

effects.exit('space')
return gap
return between
}
// Closing fence? Could also be data.
if (code === codes.graveAccent) {
token = effects.enter(types.codeTextSequence)
size = 0
return sequenceClose(code)
}
if (markdownLineEnding(code)) {

@@ -162,3 +200,3 @@ effects.enter(types.lineEnding)

effects.exit(types.lineEnding)
return gap
return between
}

@@ -171,4 +209,12 @@

// In code.
/** @type {State} */
/**
* In data.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function data(code) {

@@ -182,3 +228,3 @@ if (

effects.exit(types.codeTextData)
return gap(code)
return between(code)
}

@@ -190,5 +236,13 @@

// Closing fence.
/** @type {State} */
function closingSequence(code) {
/**
* In closing sequence.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function sequenceClose(code) {
// More.

@@ -198,3 +252,3 @@ if (code === codes.graveAccent) {

size++
return closingSequence
return sequenceClose
}

@@ -201,0 +255,0 @@

@@ -8,4 +8,5 @@ /**

export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'

@@ -16,2 +16,3 @@ import {markdownLineEnding} from 'micromark-util-character'

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -38,11 +39,23 @@ /**

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeContent(effects, ok) {
/** @type {Token} */
/** @type {Token | undefined} */
let previous
return start
return chunkStart
/** @type {State} */
function start(code) {
/**
* Before a content chunk.
*
* ```markdown
* > | abc
* ^
* ```
*
* @type {State}
*/
function chunkStart(code) {
assert(

@@ -57,7 +70,16 @@ code !== codes.eof && !markdownLineEnding(code),

})
return data(code)
return chunkInside(code)
}
/** @type {State} */
function data(code) {
/**
* In a content chunk.
*
* ```markdown
* > | abc
* ^^^
* ```
*
* @type {State}
*/
function chunkInside(code) {
if (code === codes.eof) {

@@ -67,2 +89,4 @@ return contentEnd(code)

// To do: in `markdown-rs`, each line is parsed on its own, and everything
// is stitched together resolving.
if (markdownLineEnding(code)) {

@@ -78,6 +102,10 @@ return effects.check(

effects.consume(code)
return data
return chunkInside
}
/** @type {State} */
/**
*
*
* @type {State}
*/
function contentEnd(code) {

@@ -89,3 +117,7 @@ effects.exit(types.chunkContent)

/** @type {State} */
/**
*
*
* @type {State}
*/
function contentContinue(code) {

@@ -95,2 +127,3 @@ assert(markdownLineEnding(code), 'expected eol')

effects.exit(types.chunkContent)
assert(previous, 'expected previous token')
previous.next = effects.enter(types.chunkContent, {

@@ -101,7 +134,10 @@ contentType: constants.contentTypeContent,

previous = previous.next
return data
return chunkInside
}
}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeContinuation(effects, ok, nok) {

@@ -112,3 +148,7 @@ const self = this

/** @type {State} */
/**
*
*
* @type {State}
*/
function startLookahead(code) {

@@ -123,3 +163,7 @@ assert(markdownLineEnding(code), 'expected a line ending')

/** @type {State} */
/**
*
*
* @type {State}
*/
function prefixed(code) {

@@ -130,2 +174,8 @@ if (code === codes.eof || markdownLineEnding(code)) {

// Always populated by defaults.
assert(
self.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
const tail = self.events[self.events.length - 1]

@@ -132,0 +182,0 @@

/** @type {Construct} */
export const definition: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {factoryDestination} from 'micromark-factory-destination'

@@ -13,9 +13,11 @@ import {factoryLabel} from 'micromark-factory-label'

import {factoryWhitespace} from 'micromark-factory-whitespace'
import {normalizeIdentifier} from 'micromark-util-normalize-identifier'
import {
markdownLineEnding,
markdownLineEndingOrSpace
markdownLineEndingOrSpace,
markdownSpace
} from 'micromark-util-character'
import {normalizeIdentifier} from 'micromark-util-normalize-identifier'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -26,5 +28,8 @@ /** @type {Construct} */

/** @type {Construct} */
const titleConstruct = {tokenize: tokenizeTitle, partial: true}
const titleBefore = {tokenize: tokenizeTitleBefore, partial: true}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeDefinition(effects, ok, nok) {

@@ -37,6 +42,33 @@ const self = this

/** @type {State} */
/**
* At start of a definition.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function start(code) {
// Do not interrupt paragraphs (but do follow definitions).
// To do: do `interrupt` the way `markdown-rs` does.
// To do: parse whitespace the way `markdown-rs` does.
effects.enter(types.definition)
return before(code)
}
/**
* After optional whitespace, at `[`.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function before(code) {
// To do: parse whitespace the way `markdown-rs` does.
assert(code === codes.leftSquareBracket, 'expected `[`')
effects.enter(types.definition)
return factoryLabel.call(

@@ -46,2 +78,3 @@ self,

labelAfter,
// Note: we don’t need to reset the way `markdown-rs` does.
nok,

@@ -54,3 +87,12 @@ types.definitionLabel,

/** @type {State} */
/**
* After label.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function labelAfter(code) {

@@ -65,21 +107,3 @@ identifier = normalizeIdentifier(

effects.exit(types.definitionMarker)
// Note: blank lines can’t exist in content.
return factoryWhitespace(
effects,
factoryDestination(
effects,
effects.attempt(
titleConstruct,
factorySpace(effects, after, types.whitespace),
factorySpace(effects, after, types.whitespace)
),
nok,
types.definitionDestination,
types.definitionDestinationLiteral,
types.definitionDestinationLiteralMarker,
types.definitionDestinationRaw,
types.definitionDestinationString
)
)
return markerAfter
}

@@ -90,11 +114,99 @@

/** @type {State} */
/**
* After marker.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function markerAfter(code) {
// Note: whitespace is optional.
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, destinationBefore)(code)
: destinationBefore(code)
}
/**
* Before destination.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function destinationBefore(code) {
return factoryDestination(
effects,
destinationAfter,
// Note: we don’t need to reset the way `markdown-rs` does.
nok,
types.definitionDestination,
types.definitionDestinationLiteral,
types.definitionDestinationLiteralMarker,
types.definitionDestinationRaw,
types.definitionDestinationString
)(code)
}
/**
* After destination.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function destinationAfter(code) {
return effects.attempt(titleBefore, after, after)(code)
}
/**
* After definition.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function after(code) {
return markdownSpace(code)
? factorySpace(effects, afterWhitespace, types.whitespace)(code)
: afterWhitespace(code)
}
/**
* After definition, after optional whitespace.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function afterWhitespace(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.definition)
if (!self.parser.defined.includes(identifier)) {
self.parser.defined.push(identifier)
}
// Note: we don’t care about uniqueness.
// It’s likely that that doesn’t happen very frequently.
// It is more likely that it wastes precious time.
self.parser.defined.push(identifier)
// To do: `markdown-rs` interrupt.
// // You’d be interrupting.
// tokenizer.interrupt = true
return ok(code)

@@ -107,37 +219,82 @@ }

/** @type {Tokenizer} */
function tokenizeTitle(effects, ok, nok) {
return start
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeTitleBefore(effects, ok, nok) {
return titleBefore
/** @type {State} */
function start(code) {
/**
* After destination, at whitespace.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleBefore(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, before)(code)
? factoryWhitespace(effects, beforeMarker)(code)
: nok(code)
}
/** @type {State} */
function before(code) {
if (
code === codes.quotationMark ||
code === codes.apostrophe ||
code === codes.leftParenthesis
) {
return factoryTitle(
effects,
factorySpace(effects, after, types.whitespace),
nok,
types.definitionTitle,
types.definitionTitleMarker,
types.definitionTitleString
)(code)
}
/**
* At title.
*
* ```markdown
* | [a]: b
* > | "c"
* ^
* ```
*
* @type {State}
*/
function beforeMarker(code) {
return factoryTitle(
effects,
titleAfter,
nok,
types.definitionTitle,
types.definitionTitleMarker,
types.definitionTitleString
)(code)
}
return nok(code)
/**
* After title.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleAfter(code) {
return markdownSpace(code)
? factorySpace(
effects,
titleAfterOptionalWhitespace,
types.whitespace
)(code)
: titleAfterOptionalWhitespace(code)
}
/** @type {State} */
function after(code) {
/**
* After title, after optional whitespace.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleAfterOptionalWhitespace(code) {
return code === codes.eof || markdownLineEnding(code) ? ok(code) : nok(code)
}
}
/** @type {Construct} */
export const hardBreakEscape: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {markdownLineEnding} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -18,19 +19,40 @@ /** @type {Construct} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeHardBreakEscape(effects, ok, nok) {
return start
/** @type {State} */
/**
* Start of a hard break (escape).
*
* ```markdown
* > | a\
* ^
* | b
* ```
*
* @type {State}
*/
function start(code) {
assert(code === codes.backslash, 'expected `\\`')
effects.enter(types.hardBreakEscape)
effects.enter(types.escapeMarker)
effects.consume(code)
return open
return after
}
/** @type {State} */
function open(code) {
/**
* After `\`, at eol.
*
* ```markdown
* > | a\
* ^
* | b
* ```
*
* @type {State}
*/
function after(code) {
if (markdownLineEnding(code)) {
effects.exit(types.escapeMarker)
effects.exit(types.hardBreakEscape)

@@ -37,0 +59,0 @@ return ok(code)

@@ -5,4 +5,5 @@ /** @type {Construct} */

export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'

@@ -20,2 +20,3 @@ import {

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -70,3 +71,2 @@ /** @type {Construct} */

end: events[contentEnd][1].end,
// @ts-expect-error Constants are fine to assign.
contentType: constants.contentTypeText

@@ -86,5 +86,7 @@ }

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeHeadingAtx(effects, ok, nok) {
const self = this
let size = 0

@@ -94,12 +96,45 @@

/** @type {State} */
/**
* Start of a heading (atx).
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse indent like `markdown-rs`.
effects.enter(types.atxHeading)
return before(code)
}
/**
* After optional whitespace, at `#`.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function before(code) {
assert(code === codes.numberSign, 'expected `#`')
effects.enter(types.atxHeading)
effects.enter(types.atxHeadingSequence)
return fenceOpenInside(code)
return sequenceOpen(code)
}
/** @type {State} */
function fenceOpenInside(code) {
/**
* In opening sequence.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (

@@ -110,8 +145,9 @@ code === codes.numberSign &&

effects.consume(code)
return fenceOpenInside
return sequenceOpen
}
// Always at least one `#`.
if (code === codes.eof || markdownLineEndingOrSpace(code)) {
effects.exit(types.atxHeadingSequence)
return self.interrupt ? ok(code) : headingBreak(code)
return atBreak(code)
}

@@ -122,7 +158,16 @@

/** @type {State} */
function headingBreak(code) {
/**
* After something, before something else.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function atBreak(code) {
if (code === codes.numberSign) {
effects.enter(types.atxHeadingSequence)
return sequence(code)
return sequenceFurther(code)
}

@@ -132,2 +177,5 @@

effects.exit(types.atxHeading)
// To do: interrupt like `markdown-rs`.
// // Feel free to interrupt.
// tokenizer.interrupt = false
return ok(code)

@@ -137,5 +185,7 @@ }

if (markdownSpace(code)) {
return factorySpace(effects, headingBreak, types.whitespace)(code)
return factorySpace(effects, atBreak, types.whitespace)(code)
}
// To do: generate `data` tokens, add the `text` token later.
// Needs edit map, see: `markdown.rs`.
effects.enter(types.atxHeadingText)

@@ -145,14 +195,34 @@ return data(code)

/** @type {State} */
function sequence(code) {
/**
* In further sequence (after whitespace).
*
* Could be normal “visible” hashes in the heading or a final sequence.
*
* ```markdown
* > | ## aa ##
* ^
* ```
*
* @type {State}
*/
function sequenceFurther(code) {
if (code === codes.numberSign) {
effects.consume(code)
return sequence
return sequenceFurther
}
effects.exit(types.atxHeadingSequence)
return headingBreak(code)
return atBreak(code)
}
/** @type {State} */
/**
* In text.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function data(code) {

@@ -165,3 +235,3 @@ if (

effects.exit(types.atxHeadingText)
return headingBreak(code)
return atBreak(code)
}

@@ -168,0 +238,0 @@

/** @type {Construct} */
export const htmlFlow: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {ok as assert} from 'uvu/assert'
import {

@@ -21,2 +21,3 @@ asciiAlpha,

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
import {blankLine} from './blank-line.js'

@@ -33,3 +34,7 @@

/** @type {Construct} */
const nextBlankConstruct = {tokenize: tokenizeNextBlank, partial: true}
const blankLineBefore = {tokenize: tokenizeBlankLineBefore, partial: true}
const nonLazyContinuationStart = {
tokenize: tokenizeNonLazyContinuationStart,
partial: true
}

@@ -61,9 +66,12 @@ /** @type {Resolver} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeHtmlFlow(effects, ok, nok) {
const self = this
/** @type {number} */
let kind
let marker
/** @type {boolean} */
let startTag
let closingTag
/** @type {string} */

@@ -74,8 +82,32 @@ let buffer

/** @type {Code} */
let marker
let markerB
return start
/** @type {State} */
/**
* Start of HTML (flow).
*
* ```markdown
* > | <x />
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse indent like `markdown-rs`.
return before(code)
}
/**
* At `<`, after optional whitespace.
*
* ```markdown
* > | <x />
* ^
* ```
*
* @type {State}
*/
function before(code) {
assert(code === codes.lessThan, 'expected `<`')

@@ -88,7 +120,20 @@ effects.enter(types.htmlFlow)

/** @type {State} */
/**
* After `<`, at tag name or other stuff.
*
* ```markdown
* > | <x />
* ^
* > | <!doctype>
* ^
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function open(code) {
if (code === codes.exclamationMark) {
effects.consume(code)
return declarationStart
return declarationOpen
}

@@ -98,2 +143,3 @@

effects.consume(code)
closingTag = true
return tagCloseStart

@@ -104,3 +150,6 @@ }

effects.consume(code)
kind = constants.htmlInstruction
marker = constants.htmlInstruction
// To do:
// tokenizer.concrete = true
// To do: use `markdown-rs` style interrupt.
// While we’re in an instruction instead of a declaration, we’re on a `?`

@@ -111,6 +160,7 @@ // right now, so we do need to search for `>`, similar to declarations.

// ASCII alphabetical.
if (asciiAlpha(code)) {
effects.consume(code)
// @ts-expect-error: not null.
buffer = String.fromCharCode(code)
startTag = true
return tagName

@@ -122,7 +172,20 @@ }

/** @type {State} */
function declarationStart(code) {
/**
* After `<!`, at declaration, comment, or CDATA.
*
* ```markdown
* > | <!doctype>
* ^
* > | <!--xxx-->
* ^
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function declarationOpen(code) {
if (code === codes.dash) {
effects.consume(code)
kind = constants.htmlComment
marker = constants.htmlComment
return commentOpenInside

@@ -133,4 +196,3 @@ }

effects.consume(code)
kind = constants.htmlCdata
buffer = constants.cdataOpeningString
marker = constants.htmlCdata
index = 0

@@ -140,5 +202,8 @@ return cdataOpenInside

// ASCII alphabetical.
if (asciiAlpha(code)) {
effects.consume(code)
kind = constants.htmlDeclaration
marker = constants.htmlDeclaration
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuationDeclarationInside

@@ -150,6 +215,17 @@ }

/** @type {State} */
/**
* After `<!-`, inside a comment, at another `-`.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function commentOpenInside(code) {
if (code === codes.dash) {
effects.consume(code)
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuationDeclarationInside

@@ -161,11 +237,25 @@ }

/** @type {State} */
/**
* After `<![`, inside CDATA, expecting `CDATA[`.
*
* ```markdown
* > | <![CDATA[>&<]]>
* ^^^^^^
* ```
*
* @type {State}
*/
function cdataOpenInside(code) {
if (code === buffer.charCodeAt(index++)) {
const value = constants.cdataOpeningString
if (code === value.charCodeAt(index++)) {
effects.consume(code)
return index === buffer.length
? self.interrupt
? ok
: continuation
: cdataOpenInside
if (index === value.length) {
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuation
}
return cdataOpenInside
}

@@ -176,6 +266,16 @@

/** @type {State} */
/**
* After `</`, in closing tag, at tag name.
*
* ```markdown
* > | </x>
* ^
* ```
*
* @type {State}
*/
function tagCloseStart(code) {
if (asciiAlpha(code)) {
effects.consume(code)
// @ts-expect-error: not null.
buffer = String.fromCharCode(code)

@@ -188,3 +288,14 @@ return tagName

/** @type {State} */
/**
* In tag name.
*
* ```markdown
* > | <ab>
* ^^
* > | </ab>
* ^^
* ```
*
* @type {State}
*/
function tagName(code) {

@@ -197,8 +308,9 @@ if (

) {
if (
code !== codes.slash &&
startTag &&
htmlRawNames.includes(buffer.toLowerCase())
) {
kind = constants.htmlRaw
const slash = code === codes.slash
const name = buffer.toLowerCase()
if (!slash && !closingTag && htmlRawNames.includes(name)) {
marker = constants.htmlRaw
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok(code) : continuation(code)

@@ -208,5 +320,5 @@ }

if (htmlBlockNames.includes(buffer.toLowerCase())) {
kind = constants.htmlBasic
marker = constants.htmlBasic
if (code === codes.slash) {
if (slash) {
effects.consume(code)

@@ -216,14 +328,17 @@ return basicSelfClosing

// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok(code) : continuation(code)
}
kind = constants.htmlComplete
// Do not support complete HTML when interrupting
marker = constants.htmlComplete
// Do not support complete HTML when interrupting.
return self.interrupt && !self.parser.lazy[self.now().line]
? nok(code)
: startTag
? completeAttributeNameBefore(code)
: completeClosingTagAfter(code)
: closingTag
? completeClosingTagAfter(code)
: completeAttributeNameBefore(code)
}
// ASCII alphanumerical and `-`.
if (code === codes.dash || asciiAlphanumeric(code)) {

@@ -238,6 +353,17 @@ effects.consume(code)

/** @type {State} */
/**
* After closing slash of a basic tag name.
*
* ```markdown
* > | <div/>
* ^
* ```
*
* @type {State}
*/
function basicSelfClosing(code) {
if (code === codes.greaterThan) {
effects.consume(code)
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuation

@@ -249,3 +375,12 @@ }

/** @type {State} */
/**
* After closing slash of a complete tag name.
*
* ```markdown
* > | <x/>
* ^
* ```
*
* @type {State}
*/
function completeClosingTagAfter(code) {

@@ -260,3 +395,25 @@ if (markdownSpace(code)) {

/** @type {State} */
/**
* At an attribute name.
*
* At first, this state is used after a complete tag name, after whitespace,
* where it expects optional attributes or the end of the tag.
* It is also reused after attributes, when expecting more optional
* attributes.
*
* ```markdown
* > | <a />
* ^
* > | <a :b>
* ^
* > | <a _b>
* ^
* > | <a b>
* ^
* > | <a >
* ^
* ```
*
* @type {State}
*/
function completeAttributeNameBefore(code) {

@@ -268,2 +425,3 @@ if (code === codes.slash) {

// ASCII alphanumerical and `:` and `_`.
if (code === codes.colon || code === codes.underscore || asciiAlpha(code)) {

@@ -282,4 +440,18 @@ effects.consume(code)

/** @type {State} */
/**
* In attribute name.
*
* ```markdown
* > | <a :b>
* ^
* > | <a _b>
* ^
* > | <a b>
* ^
* ```
*
* @type {State}
*/
function completeAttributeName(code) {
// ASCII alphanumerical and `-`, `.`, `:`, and `_`.
if (

@@ -299,3 +471,15 @@ code === codes.dash ||

/** @type {State} */
/**
* After attribute name, at an optional initializer, the end of the tag, or
* whitespace.
*
* ```markdown
* > | <a b>
* ^
* > | <a b=c>
* ^
* ```
*
* @type {State}
*/
function completeAttributeNameAfter(code) {

@@ -315,3 +499,15 @@ if (code === codes.equalsTo) {

/** @type {State} */
/**
* Before unquoted, double quoted, or single quoted attribute value, allowing
* whitespace.
*
* ```markdown
* > | <a b=c>
* ^
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueBefore(code) {

@@ -330,3 +526,3 @@ if (

effects.consume(code)
marker = code
markerB = code
return completeAttributeValueQuoted

@@ -340,8 +536,24 @@ }

marker = null
return completeAttributeValueUnquoted(code)
}
/** @type {State} */
/**
* In double or single quoted attribute value.
*
* ```markdown
* > | <a b="c">
* ^
* > | <a b='c'>
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueQuoted(code) {
if (code === markerB) {
effects.consume(code)
markerB = null
return completeAttributeValueQuotedAfter
}
if (code === codes.eof || markdownLineEnding(code)) {

@@ -351,7 +563,2 @@ return nok(code)

if (code === marker) {
effects.consume(code)
return completeAttributeValueQuotedAfter
}
effects.consume(code)

@@ -361,3 +568,12 @@ return completeAttributeValueQuoted

/** @type {State} */
/**
* In unquoted attribute value.
*
* ```markdown
* > | <a b=c>
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueUnquoted(code) {

@@ -368,2 +584,3 @@ if (

code === codes.apostrophe ||
code === codes.slash ||
code === codes.lessThan ||

@@ -382,3 +599,13 @@ code === codes.equalsTo ||

/** @type {State} */
/**
* After double or single quoted attribute value, before whitespace or the
* end of the tag.
*
* ```markdown
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueQuotedAfter(code) {

@@ -396,3 +623,12 @@ if (

/** @type {State} */
/**
* In certain circumstances of a complete tag where only an `>` is allowed.
*
* ```markdown
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeEnd(code) {

@@ -407,4 +643,19 @@ if (code === codes.greaterThan) {

/** @type {State} */
/**
* After `>` in a complete tag.
*
* ```markdown
* > | <x>
* ^
* ```
*
* @type {State}
*/
function completeAfter(code) {
if (code === codes.eof || markdownLineEnding(code)) {
// // Do not form containers.
// tokenizer.concrete = true
return continuation(code)
}
if (markdownSpace(code)) {

@@ -415,10 +666,17 @@ effects.consume(code)

return code === codes.eof || markdownLineEnding(code)
? continuation(code)
: nok(code)
return nok(code)
}
/** @type {State} */
/**
* In continuation of any HTML kind.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function continuation(code) {
if (code === codes.dash && kind === constants.htmlComment) {
if (code === codes.dash && marker === constants.htmlComment) {
effects.consume(code)

@@ -428,3 +686,3 @@ return continuationCommentInside

if (code === codes.lessThan && kind === constants.htmlRaw) {
if (code === codes.lessThan && marker === constants.htmlRaw) {
effects.consume(code)

@@ -434,3 +692,3 @@ return continuationRawTagOpen

if (code === codes.greaterThan && kind === constants.htmlDeclaration) {
if (code === codes.greaterThan && marker === constants.htmlDeclaration) {
effects.consume(code)

@@ -440,3 +698,3 @@ return continuationClose

if (code === codes.questionMark && kind === constants.htmlInstruction) {
if (code === codes.questionMark && marker === constants.htmlInstruction) {
effects.consume(code)

@@ -446,5 +704,5 @@ return continuationDeclarationInside

if (code === codes.rightSquareBracket && kind === constants.htmlCdata) {
if (code === codes.rightSquareBracket && marker === constants.htmlCdata) {
effects.consume(code)
return continuationCharacterDataInside
return continuationCdataInside
}

@@ -454,8 +712,9 @@

markdownLineEnding(code) &&
(kind === constants.htmlBasic || kind === constants.htmlComplete)
(marker === constants.htmlBasic || marker === constants.htmlComplete)
) {
effects.exit(types.htmlFlowData)
return effects.check(
nextBlankConstruct,
continuationClose,
continuationAtLineEnding
blankLineBefore,
continuationAfter,
continuationStart
)(code)

@@ -465,3 +724,4 @@ }

if (code === codes.eof || markdownLineEnding(code)) {
return continuationAtLineEnding(code)
effects.exit(types.htmlFlowData)
return continuationStart(code)
}

@@ -473,20 +733,54 @@

/** @type {State} */
function continuationAtLineEnding(code) {
effects.exit(types.htmlFlowData)
return htmlContinueStart(code)
/**
* In continuation, at eol.
*
* ```markdown
* > | <x>
* ^
* | asd
* ```
*
* @type {State}
*/
function continuationStart(code) {
return effects.check(
nonLazyContinuationStart,
continuationStartNonLazy,
continuationAfter
)(code)
}
/** @type {State} */
function htmlContinueStart(code) {
if (code === codes.eof) {
return done(code)
}
/**
* In continuation, at eol, before non-lazy content.
*
* ```markdown
* > | <x>
* ^
* | asd
* ```
*
* @type {State}
*/
function continuationStartNonLazy(code) {
assert(markdownLineEnding(code))
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return continuationBefore
}
if (markdownLineEnding(code)) {
return effects.attempt(
{tokenize: htmlLineEnd, partial: true},
htmlContinueStart,
done
)(code)
/**
* In continuation, before non-lazy content.
*
* ```markdown
* | <x>
* > | asd
* ^
* ```
*
* @type {State}
*/
function continuationBefore(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return continuationStart(code)
}

@@ -498,22 +792,12 @@

/** @type {Tokenizer} */
function htmlLineEnd(effects, ok, nok) {
return start
/** @type {State} */
function start(code) {
assert(markdownLineEnding(code), 'expected eol')
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return lineStart
}
/** @type {State} */
function lineStart(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code)
}
}
/** @type {State} */
/**
* In comment continuation, after one `-`, expecting another.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function continuationCommentInside(code) {

@@ -528,3 +812,12 @@ if (code === codes.dash) {

/** @type {State} */
/**
* In raw continuation, after `<`, at `/`.
*
* ```markdown
* > | <script>console.log(1)</script>
* ^
* ```
*
* @type {State}
*/
function continuationRawTagOpen(code) {

@@ -540,10 +833,22 @@ if (code === codes.slash) {

/** @type {State} */
/**
* In raw continuation, after `</`, in a raw tag name.
*
* ```markdown
* > | <script>console.log(1)</script>
* ^^^^^^
* ```
*
* @type {State}
*/
function continuationRawEndTag(code) {
if (
code === codes.greaterThan &&
htmlRawNames.includes(buffer.toLowerCase())
) {
effects.consume(code)
return continuationClose
if (code === codes.greaterThan) {
const name = buffer.toLowerCase()
if (htmlRawNames.includes(name)) {
effects.consume(code)
return continuationClose
}
return continuation(code)
}

@@ -553,2 +858,3 @@

effects.consume(code)
// @ts-expect-error: not null.
buffer += String.fromCharCode(code)

@@ -561,4 +867,13 @@ return continuationRawEndTag

/** @type {State} */
function continuationCharacterDataInside(code) {
/**
* In cdata continuation, after `]`, expecting `]>`.
*
* ```markdown
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function continuationCdataInside(code) {
if (code === codes.rightSquareBracket) {

@@ -572,3 +887,20 @@ effects.consume(code)

/** @type {State} */
/**
* In declaration or instruction continuation, at `>`.
*
* ```markdown
* > | <!-->
* ^
* > | <?>
* ^
* > | <!q>
* ^
* > | <!--ab-->
* ^
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function continuationDeclarationInside(code) {

@@ -581,3 +913,3 @@ if (code === codes.greaterThan) {

// More dashes.
if (code === codes.dash && kind === constants.htmlComment) {
if (code === codes.dash && marker === constants.htmlComment) {
effects.consume(code)

@@ -590,7 +922,16 @@ return continuationDeclarationInside

/** @type {State} */
/**
* In closed continuation: everything we get until the eol/eof is part of it.
*
* ```markdown
* > | <!doctype>
* ^
* ```
*
* @type {State}
*/
function continuationClose(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.htmlFlowData)
return done(code)
return continuationAfter(code)
}

@@ -602,5 +943,18 @@

/** @type {State} */
function done(code) {
/**
* Done.
*
* ```markdown
* > | <!doctype>
* ^
* ```
*
* @type {State}
*/
function continuationAfter(code) {
effects.exit(types.htmlFlow)
// // Feel free to interrupt.
// tokenizer.interrupt = false
// // No longer concrete.
// tokenizer.concrete = false
return ok(code)

@@ -610,15 +964,74 @@ }

/** @type {Tokenizer} */
function tokenizeNextBlank(effects, ok, nok) {
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeNonLazyContinuationStart(effects, ok, nok) {
const self = this
return start
/** @type {State} */
/**
* At eol, before continuation.
*
* ```markdown
* > | * ```js
* ^
* | b
* ```
*
* @type {State}
*/
function start(code) {
if (markdownLineEnding(code)) {
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return after
}
return nok(code)
}
/**
* A continuation.
*
* ```markdown
* | * ```js
* > | b
* ^
* ```
*
* @type {State}
*/
function after(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code)
}
}
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeBlankLineBefore(effects, ok, nok) {
return start
/**
* Before eol, expecting blank line.
*
* ```markdown
* > | <div>
* ^
* |
* ```
*
* @type {State}
*/
function start(code) {
assert(markdownLineEnding(code), 'expected a line ending')
effects.exit(types.htmlFlowData)
effects.enter(types.lineEndingBlank)
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEndingBlank)
effects.exit(types.lineEnding)
return effects.attempt(blankLine, ok, nok)
}
}
/** @type {Construct} */
export const htmlText: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'

@@ -20,2 +20,3 @@ import {

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -25,9 +26,10 @@ /** @type {Construct} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeHtmlText(effects, ok, nok) {
const self = this
/** @type {NonNullable<Code>|undefined} */
/** @type {NonNullable<Code> | undefined} */
let marker
/** @type {string} */
let buffer
/** @type {number} */

@@ -40,3 +42,12 @@ let index

/** @type {State} */
/**
* Start of HTML (text).
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -50,3 +61,16 @@ assert(code === codes.lessThan, 'expected `<`')

/** @type {State} */
/**
* After `<`, at tag name or other stuff.
*
* ```markdown
* > | a <b> c
* ^
* > | a <!doctype> c
* ^
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function open(code) {

@@ -68,2 +92,3 @@ if (code === codes.exclamationMark) {

// ASCII alphabetical.
if (asciiAlpha(code)) {

@@ -77,7 +102,20 @@ effects.consume(code)

/** @type {State} */
/**
* After `<!`, at declaration, comment, or CDATA.
*
* ```markdown
* > | a <!doctype> c
* ^
* > | a <!--b--> c
* ^
* > | a <![CDATA[>&<]]> c
* ^
* ```
*
* @type {State}
*/
function declarationOpen(code) {
if (code === codes.dash) {
effects.consume(code)
return commentOpen
return commentOpenInside
}

@@ -87,5 +125,4 @@

effects.consume(code)
buffer = constants.cdataOpeningString
index = 0
return cdataOpen
return cdataOpenInside
}

@@ -101,7 +138,16 @@

/** @type {State} */
function commentOpen(code) {
/**
* In a comment, after `<!-`, at another `-`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentOpenInside(code) {
if (code === codes.dash) {
effects.consume(code)
return commentStart
return commentEnd
}

@@ -112,26 +158,12 @@

/** @type {State} */
function commentStart(code) {
if (code === codes.eof || code === codes.greaterThan) {
return nok(code)
}
if (code === codes.dash) {
effects.consume(code)
return commentStartDash
}
return comment(code)
}
/** @type {State} */
function commentStartDash(code) {
if (code === codes.eof || code === codes.greaterThan) {
return nok(code)
}
return comment(code)
}
/** @type {State} */
/**
* In comment.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function comment(code) {

@@ -149,3 +181,3 @@ if (code === codes.eof) {

returnState = comment
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -157,7 +189,16 @@

/** @type {State} */
/**
* In comment, after `-`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentClose(code) {
if (code === codes.dash) {
effects.consume(code)
return end
return commentEnd
}

@@ -168,7 +209,36 @@

/** @type {State} */
function cdataOpen(code) {
if (code === buffer.charCodeAt(index++)) {
/**
* In comment, after `--`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentEnd(code) {
return code === codes.greaterThan
? end(code)
: code === codes.dash
? commentClose(code)
: comment(code)
}
/**
* After `<![`, in CDATA, expecting `CDATA[`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^^^^^^
* ```
*
* @type {State}
*/
function cdataOpenInside(code) {
const value = constants.cdataOpeningString
if (code === value.charCodeAt(index++)) {
effects.consume(code)
return index === buffer.length ? cdata : cdataOpen
return index === value.length ? cdata : cdataOpenInside
}

@@ -179,3 +249,12 @@

/** @type {State} */
/**
* In CDATA.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^^^
* ```
*
* @type {State}
*/
function cdata(code) {

@@ -193,3 +272,3 @@ if (code === codes.eof) {

returnState = cdata
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -201,3 +280,12 @@

/** @type {State} */
/**
* In CDATA, after `]`, at another `]`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^
* ```
*
* @type {State}
*/
function cdataClose(code) {

@@ -212,3 +300,12 @@ if (code === codes.rightSquareBracket) {

/** @type {State} */
/**
* In CDATA, after `]]`, at `>`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^
* ```
*
* @type {State}
*/
function cdataEnd(code) {

@@ -227,3 +324,12 @@ if (code === codes.greaterThan) {

/** @type {State} */
/**
* In declaration.
*
* ```markdown
* > | a <!b> c
* ^
* ```
*
* @type {State}
*/
function declaration(code) {

@@ -236,3 +342,3 @@ if (code === codes.eof || code === codes.greaterThan) {

returnState = declaration
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -244,3 +350,12 @@

/** @type {State} */
/**
* In instruction.
*
* ```markdown
* > | a <?b?> c
* ^
* ```
*
* @type {State}
*/
function instruction(code) {

@@ -258,3 +373,3 @@ if (code === codes.eof) {

returnState = instruction
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -266,3 +381,12 @@

/** @type {State} */
/**
* In instruction, after `?`, at `>`.
*
* ```markdown
* > | a <?b?> c
* ^
* ```
*
* @type {State}
*/
function instructionClose(code) {

@@ -272,4 +396,14 @@ return code === codes.greaterThan ? end(code) : instruction(code)

/** @type {State} */
/**
* After `</`, in closing tag, at tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagCloseStart(code) {
// ASCII alphabetical.
if (asciiAlpha(code)) {

@@ -283,4 +417,14 @@ effects.consume(code)

/** @type {State} */
/**
* After `</x`, in a tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagClose(code) {
// ASCII alphanumerical and `-`.
if (code === codes.dash || asciiAlphanumeric(code)) {

@@ -294,7 +438,16 @@ effects.consume(code)

/** @type {State} */
/**
* In closing tag, after tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagCloseBetween(code) {
if (markdownLineEnding(code)) {
returnState = tagCloseBetween
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -310,4 +463,14 @@

/** @type {State} */
/**
* After `<x`, in opening tag name.
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function tagOpen(code) {
// ASCII alphanumerical and `-`.
if (code === codes.dash || asciiAlphanumeric(code)) {

@@ -329,3 +492,12 @@ effects.consume(code)

/** @type {State} */
/**
* In opening tag, after tag name.
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function tagOpenBetween(code) {

@@ -337,2 +509,3 @@ if (code === codes.slash) {

// ASCII alphabetical and `:` and `_`.
if (code === codes.colon || code === codes.underscore || asciiAlpha(code)) {

@@ -345,3 +518,3 @@ effects.consume(code)

returnState = tagOpenBetween
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -357,4 +530,14 @@

/** @type {State} */
/**
* In attribute name.
*
* ```markdown
* > | a <b c> d
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeName(code) {
// ASCII alphabetical and `-`, `.`, `:`, and `_`.
if (

@@ -374,3 +557,13 @@ code === codes.dash ||

/** @type {State} */
/**
* After attribute name, before initializer, the end of the tag, or
* whitespace.
*
* ```markdown
* > | a <b c> d
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeNameAfter(code) {

@@ -384,3 +577,3 @@ if (code === codes.equalsTo) {

returnState = tagOpenAttributeNameAfter
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -396,3 +589,13 @@

/** @type {State} */
/**
* Before unquoted, double quoted, or single quoted attribute value, allowing
* whitespace.
*
* ```markdown
* > | a <b c=d> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueBefore(code) {

@@ -417,3 +620,3 @@ if (

returnState = tagOpenAttributeValueBefore
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -427,10 +630,19 @@

effects.consume(code)
marker = undefined
return tagOpenAttributeValueUnquoted
}
/** @type {State} */
/**
* In double or single quoted attribute value.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueQuoted(code) {
if (code === marker) {
effects.consume(code)
marker = undefined
return tagOpenAttributeValueQuotedAfter

@@ -445,3 +657,3 @@ }

returnState = tagOpenAttributeValueQuoted
return atLineEnding(code)
return lineEndingBefore(code)
}

@@ -453,16 +665,12 @@

/** @type {State} */
function tagOpenAttributeValueQuotedAfter(code) {
if (
code === codes.greaterThan ||
code === codes.slash ||
markdownLineEndingOrSpace(code)
) {
return tagOpenBetween(code)
}
return nok(code)
}
/** @type {State} */
/**
* In unquoted attribute value.
*
* ```markdown
* > | a <b c=d> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueUnquoted(code) {

@@ -480,3 +688,7 @@ if (

if (code === codes.greaterThan || markdownLineEndingOrSpace(code)) {
if (
code === codes.slash ||
code === codes.greaterThan ||
markdownLineEndingOrSpace(code)
) {
return tagOpenBetween(code)

@@ -489,29 +701,35 @@ }

// We can’t have blank lines in content, so no need to worry about empty
// tokens.
/** @type {State} */
function atLineEnding(code) {
assert(returnState, 'expected return state')
assert(markdownLineEnding(code), 'expected eol')
effects.exit(types.htmlTextData)
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return factorySpace(
effects,
afterPrefix,
types.linePrefix,
self.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)
}
/**
* After double or single quoted attribute value, before whitespace or the end
* of the tag.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueQuotedAfter(code) {
if (
code === codes.slash ||
code === codes.greaterThan ||
markdownLineEndingOrSpace(code)
) {
return tagOpenBetween(code)
}
/** @type {State} */
function afterPrefix(code) {
effects.enter(types.htmlTextData)
return returnState(code)
return nok(code)
}
/** @type {State} */
/**
* In certain circumstances of a tag where only an `>` is allowed.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function end(code) {

@@ -527,2 +745,77 @@ if (code === codes.greaterThan) {

}
/**
* At eol.
*
* > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* > | a <!--a
* ^
* | b-->
* ```
*
* @type {State}
*/
function lineEndingBefore(code) {
assert(returnState, 'expected return state')
assert(markdownLineEnding(code), 'expected eol')
effects.exit(types.htmlTextData)
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return lineEndingAfter
}
/**
* After eol, at optional whitespace.
*
* > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* | a <!--a
* > | b-->
* ^
* ```
*
* @type {State}
*/
function lineEndingAfter(code) {
// Always populated by defaults.
assert(
self.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
return markdownSpace(code)
? factorySpace(
effects,
lineEndingAfterPrefix,
types.linePrefix,
self.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)(code)
: lineEndingAfterPrefix(code)
}
/**
* After eol, after optional whitespace.
*
* > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* | a <!--a
* > | b-->
* ^
* ```
*
* @type {State}
*/
function lineEndingAfterPrefix(code) {
effects.enter(types.htmlTextData)
return returnState(code)
}
}
/** @type {Construct} */
export const labelEnd: Construct
export type Construct = import('micromark-util-types').Construct
export type Event = import('micromark-util-types').Event
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Event = import('micromark-util-types').Event
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {ok as assert} from 'uvu/assert'
import {factoryDestination} from 'micromark-factory-destination'

@@ -23,2 +22,3 @@ import {factoryLabel} from 'micromark-factory-label'

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -36,5 +36,5 @@ /** @type {Construct} */

/** @type {Construct} */
const fullReferenceConstruct = {tokenize: tokenizeFullReference}
const referenceFullConstruct = {tokenize: tokenizeReferenceFull}
/** @type {Construct} */
const collapsedReferenceConstruct = {tokenize: tokenizeCollapsedReference}
const referenceCollapsedConstruct = {tokenize: tokenizeReferenceCollapsed}

@@ -44,7 +44,5 @@ /** @type {Resolver} */

let index = -1
/** @type {Token} */
let token
while (++index < events.length) {
token = events[index][1]
const token = events[index][1]

@@ -72,7 +70,7 @@ if (

let token
/** @type {number|undefined} */
/** @type {number | undefined} */
let open
/** @type {number|undefined} */
/** @type {number | undefined} */
let close
/** @type {Event[]} */
/** @type {Array<Event>} */
let media

@@ -148,2 +146,7 @@

// Always populated by defaults.
assert(
context.parser.constructs.insideSpan.null,
'expected `insideSpan.null` to be populated'
)
// Between.

@@ -178,3 +181,6 @@ media = push(

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeLabelEnd(effects, ok, nok) {

@@ -202,6 +208,21 @@ const self = this

/** @type {State} */
/**
* Start of label end.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ```
*
* @type {State}
*/
function start(code) {
assert(code === codes.rightSquareBracket, 'expected `]`')
// If there is not an okay opening.
if (!labelStart) {

@@ -211,4 +232,15 @@ return nok(code)

// It’s a balanced bracket, but contains a link.
if (labelStart._inactive) return balanced(code)
// If the corresponding label (link) start is marked as inactive,
// it means we’d be wrapping a link, like this:
//
// ```markdown
// > | a [b [c](d) e](f) g.
// ^
// ```
//
// We can’t have that, so it’s just balanced brackets.
if (labelStart._inactive) {
return labelEndNok(code)
}
defined = self.parser.defined.includes(

@@ -224,33 +256,107 @@ normalizeIdentifier(

effects.exit(types.labelEnd)
return afterLabelEnd
return after
}
/** @type {State} */
function afterLabelEnd(code) {
// Resource: `[asd](fgh)`.
/**
* After `]`.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function after(code) {
// Note: `markdown-rs` also parses GFM footnotes here, which for us is in
// an extension.
// Resource (`[asd](fgh)`)?
if (code === codes.leftParenthesis) {
return effects.attempt(
resourceConstruct,
ok,
defined ? ok : balanced
labelEndOk,
defined ? labelEndOk : labelEndNok
)(code)
}
// Collapsed (`[asd][]`) or full (`[asd][fgh]`) reference?
// Full (`[asd][fgh]`) or collapsed (`[asd][]`) reference?
if (code === codes.leftSquareBracket) {
return effects.attempt(
fullReferenceConstruct,
ok,
defined
? effects.attempt(collapsedReferenceConstruct, ok, balanced)
: balanced
referenceFullConstruct,
labelEndOk,
defined ? referenceNotFull : labelEndNok
)(code)
}
// Shortcut reference: `[asd]`?
return defined ? ok(code) : balanced(code)
// Shortcut (`[asd]`) reference?
return defined ? labelEndOk(code) : labelEndNok(code)
}
/** @type {State} */
function balanced(code) {
/**
* After `]`, at `[`, but not at a full reference.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function referenceNotFull(code) {
return effects.attempt(
referenceCollapsedConstruct,
labelEndOk,
labelEndNok
)(code)
}
/**
* Done, we found something.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function labelEndOk(code) {
// Note: `markdown-rs` does a bunch of stuff here.
return ok(code)
}
/**
* Done, it’s nothing.
*
* There was an okay opening, but we didn’t match anything.
*
* ```markdown
* > | [a](b c
* ^
* > | [a][b c
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function labelEndNok(code) {
labelStart._balanced = true

@@ -261,8 +367,20 @@ return nok(code)

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeResource(effects, ok, nok) {
return start
return resourceStart
/** @type {State} */
function start(code) {
/**
* At a resource.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceStart(code) {
assert(code === codes.leftParenthesis, 'expected left paren')

@@ -273,9 +391,34 @@ effects.enter(types.resource)

effects.exit(types.resourceMarker)
return factoryWhitespace(effects, open)
return resourceBefore
}
/** @type {State} */
function open(code) {
/**
* In resource, after `(`, at optional whitespace.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceBefore(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, resourceOpen)(code)
: resourceOpen(code)
}
/**
* In resource, after optional whitespace, at `)` or a destination.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceOpen(code) {
if (code === codes.rightParenthesis) {
return end(code)
return resourceEnd(code)
}

@@ -285,4 +428,4 @@

effects,
destinationAfter,
nok,
resourceDestinationAfter,
resourceDestinationMissing,
types.resourceDestination,

@@ -297,11 +440,43 @@ types.resourceDestinationLiteral,

/** @type {State} */
function destinationAfter(code) {
/**
* In resource, after destination, at optional whitespace.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceDestinationAfter(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, between)(code)
: end(code)
? factoryWhitespace(effects, resourceBetween)(code)
: resourceEnd(code)
}
/** @type {State} */
function between(code) {
/**
* At invalid destination.
*
* ```markdown
* > | [a](<<) b
* ^
* ```
*
* @type {State}
*/
function resourceDestinationMissing(code) {
return nok(code)
}
/**
* In resource, after destination and whitespace, at `(` or title.
*
* ```markdown
* > | [a](b ) c
* ^
* ```
*
* @type {State}
*/
function resourceBetween(code) {
if (

@@ -314,3 +489,3 @@ code === codes.quotationMark ||

effects,
factoryWhitespace(effects, end),
resourceTitleAfter,
nok,

@@ -323,7 +498,32 @@ types.resourceTitle,

return end(code)
return resourceEnd(code)
}
/** @type {State} */
function end(code) {
/**
* In resource, after title, at optional whitespace.
*
* ```markdown
* > | [a](b "c") d
* ^
* ```
*
* @type {State}
*/
function resourceTitleAfter(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, resourceEnd)(code)
: resourceEnd(code)
}
/**
* In resource, at `)`.
*
* ```markdown
* > | [a](b) d
* ^
* ```
*
* @type {State}
*/
function resourceEnd(code) {
if (code === codes.rightParenthesis) {

@@ -341,10 +541,22 @@ effects.enter(types.resourceMarker)

/** @type {Tokenizer} */
function tokenizeFullReference(effects, ok, nok) {
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeReferenceFull(effects, ok, nok) {
const self = this
return start
return referenceFull
/** @type {State} */
function start(code) {
/**
* In a reference (full), at the `[`.
*
* ```markdown
* > | [a][b] d
* ^
* ```
*
* @type {State}
*/
function referenceFull(code) {
assert(code === codes.leftSquareBracket, 'expected left bracket')

@@ -354,4 +566,4 @@ return factoryLabel.call(

effects,
afterLabel,
nok,
referenceFullAfter,
referenceFullMissing,
types.reference,

@@ -363,4 +575,13 @@ types.referenceMarker,

/** @type {State} */
function afterLabel(code) {
/**
* In a reference (full), after `]`.
*
* ```markdown
* > | [a][b] d
* ^
* ```
*
* @type {State}
*/
function referenceFullAfter(code) {
return self.parser.defined.includes(

@@ -374,10 +595,39 @@ normalizeIdentifier(

}
/**
* In reference (full) that was missing.
*
* ```markdown
* > | [a][b d
* ^
* ```
*
* @type {State}
*/
function referenceFullMissing(code) {
return nok(code)
}
}
/** @type {Tokenizer} */
function tokenizeCollapsedReference(effects, ok, nok) {
return start
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeReferenceCollapsed(effects, ok, nok) {
return referenceCollapsedStart
/** @type {State} */
function start(code) {
/**
* In reference (collapsed), at `[`.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] d
* ^
* ```
*
* @type {State}
*/
function referenceCollapsedStart(code) {
// We only attempt a collapsed label if there’s a `[`.
assert(code === codes.leftSquareBracket, 'expected left bracket')

@@ -388,7 +638,18 @@ effects.enter(types.reference)

effects.exit(types.referenceMarker)
return open
return referenceCollapsedOpen
}
/** @type {State} */
function open(code) {
/**
* In reference (collapsed), at `]`.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] d
* ^
* ```
*
* @type {State}
*/
function referenceCollapsedOpen(code) {
if (code === codes.rightSquareBracket) {

@@ -395,0 +656,0 @@ effects.enter(types.referenceMarker)

/** @type {Construct} */
export const labelStartImage: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
import {labelEnd} from './label-end.js'

@@ -19,3 +20,6 @@

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeLabelStartImage(effects, ok, nok) {

@@ -26,3 +30,12 @@ const self = this

/** @type {State} */
/**
* Start of label (image) start.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -37,3 +50,12 @@ assert(code === codes.exclamationMark, 'expected `!`')

/** @type {State} */
/**
* After `!`, at `[`.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* @type {State}
*/
function open(code) {

@@ -51,8 +73,34 @@ if (code === codes.leftSquareBracket) {

/** @type {State} */
/**
* After `![`.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* This is needed in because, when GFM footnotes are enabled, images never
* form when started with a `^`.
* Instead, links form:
*
* ```markdown
* ![^a](b)
*
* ![^a][b]
*
* [b]: c
* ```
*
* ```html
* <p>!<a href=\"b\">^a</a></p>
* <p>!<a href=\"c\">^a</a></p>
* ```
*
* @type {State}
*/
function after(code) {
/* To do: remove in the future once we’ve switched from
* `micromark-extension-footnote` to `micromark-extension-gfm-footnote`,
* which doesn’t need this */
/* Hidden footnotes hook */
// To do: use a new field to do this, this is still needed for
// `micromark-extension-gfm-footnote`, but the `label-start-link`
// behavior isn’t.
// Hidden footnotes hook.
/* c8 ignore next 3 */

@@ -59,0 +107,0 @@ return code === codes.caret &&

/** @type {Construct} */
export const labelStartLink: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
import {labelEnd} from './label-end.js'

@@ -19,3 +20,6 @@

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeLabelStartLink(effects, ok, nok) {

@@ -26,3 +30,12 @@ const self = this

/** @type {State} */
/**
* Start of label (link) start.
*
* ```markdown
* > | a [b] c
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -40,6 +53,5 @@ assert(code === codes.leftSquareBracket, 'expected `[`')

function after(code) {
/* To do: remove in the future once we’ve switched from
* `micromark-extension-footnote` to `micromark-extension-gfm-footnote`,
* which doesn’t need this */
/* Hidden footnotes hook. */
// To do: this isn’t needed in `micromark-extension-gfm-footnote`,
// remove.
// Hidden footnotes hook.
/* c8 ignore next 3 */

@@ -46,0 +58,0 @@ return code === codes.caret &&

/** @type {Construct} */
export const lineEnding: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -15,3 +16,6 @@ /** @type {Construct} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeLineEnding(effects, ok) {

@@ -18,0 +22,0 @@ return start

/** @type {Construct} */
export const list: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type ContainerState = import('micromark-util-types').ContainerState
export type Exiter = import('micromark-util-types').Exiter
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Exiter = import('micromark-util-types').Exiter
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
export type ListContainerState = Record<string, unknown> & {
marker: Code
type: string
size: number
}
export type TokenizeContextWithState = TokenizeContext & {
containerState: ListContainerState
}
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').ContainerState} ContainerState
* @typedef {import('micromark-util-types').Exiter} Exiter
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Exiter} Exiter
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
/**
* @typedef {Record<string, unknown> & {marker: Code, type: string, size: number}} ListContainerState
* @typedef {TokenizeContext & {containerState: ListContainerState}} TokenizeContextWithState
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'

@@ -21,2 +16,3 @@ import {asciiDigit, markdownSpace} from 'micromark-util-character'

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'
import {blankLine} from './blank-line.js'

@@ -42,5 +38,8 @@ import {thematicBreak} from './thematic-break.js'

// To do: `markdown-rs` parses list items on their own and later stitches them
// together.
/**
* @type {Tokenizer}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/

@@ -60,2 +59,3 @@ function tokenizeListStart(effects, ok, nok) {

function start(code) {
assert(self.containerState, 'expected state')
const kind =

@@ -96,2 +96,3 @@ self.containerState.type ||

function inside(code) {
assert(self.containerState, 'expected state')
if (asciiDigit(code) && ++size < constants.listItemValueSizeMax) {

@@ -119,2 +120,3 @@ effects.consume(code)

function atMarker(code) {
assert(self.containerState, 'expected state')
assert(code !== codes.eof, 'eof (`null`) is not a marker')

@@ -139,2 +141,3 @@ effects.enter(types.listItemMarker)

function onBlank(code) {
assert(self.containerState, 'expected state')
self.containerState.initialBlankLine = true

@@ -159,2 +162,3 @@ initialSize++

function endOfPrefix(code) {
assert(self.containerState, 'expected state')
self.containerState.size =

@@ -169,3 +173,3 @@ initialSize +

* @type {Tokenizer}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/

@@ -175,2 +179,3 @@ function tokenizeListContinuation(effects, ok, nok) {

assert(self.containerState, 'expected state')
self.containerState._closeFlow = undefined

@@ -182,2 +187,4 @@

function onBlank(code) {
assert(self.containerState, 'expected state')
assert(typeof self.containerState.size === 'number', 'expected size')
self.containerState.furtherBlankLines =

@@ -199,2 +206,3 @@ self.containerState.furtherBlankLines ||

function notBlank(code) {
assert(self.containerState, 'expected state')
if (self.containerState.furtherBlankLines || !markdownSpace(code)) {

@@ -213,2 +221,3 @@ self.containerState.furtherBlankLines = undefined

function notInCurrentItem(code) {
assert(self.containerState, 'expected state')
// While we do continue, we signal that the flow should be closed.

@@ -218,2 +227,7 @@ self.containerState._closeFlow = true

self.interrupt = undefined
// Always populated by defaults.
assert(
self.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
return factorySpace(

@@ -232,3 +246,3 @@ effects,

* @type {Tokenizer}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/

@@ -238,2 +252,5 @@ function tokenizeIndent(effects, ok, nok) {

assert(self.containerState, 'expected state')
assert(typeof self.containerState.size === 'number', 'expected size')
return factorySpace(

@@ -248,2 +265,3 @@ effects,

function afterPrefix(code) {
assert(self.containerState, 'expected state')
const tail = self.events[self.events.length - 1]

@@ -260,5 +278,7 @@ return tail &&

* @type {Exiter}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/
function tokenizeListEnd(effects) {
assert(this.containerState, 'expected state')
assert(typeof this.containerState.type === 'string', 'expected type')
effects.exit(this.containerState.type)

@@ -269,3 +289,3 @@ }

* @type {Tokenizer}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/

@@ -275,2 +295,8 @@ function tokenizeListItemPrefixWhitespace(effects, ok, nok) {

// Always populated by defaults.
assert(
self.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
return factorySpace(

@@ -277,0 +303,0 @@ effects,

/** @type {Construct} */
export const setextUnderline: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -24,8 +25,9 @@ /** @type {Construct} */

function resolveToSetextUnderline(events, context) {
// To do: resolve like `markdown-rs`.
let index = events.length
/** @type {number|undefined} */
/** @type {number | undefined} */
let content
/** @type {number|undefined} */
/** @type {number | undefined} */
let text
/** @type {number|undefined} */
/** @type {number | undefined} */
let definition

@@ -87,29 +89,29 @@

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeSetextUnderline(effects, ok, nok) {
const self = this
let index = self.events.length
/** @type {NonNullable<Code>} */
let marker
/** @type {boolean} */
let paragraph
// Find an opening.
while (index--) {
// Skip enter/exit of line ending, line prefix, and content.
// We can now either have a definition or a paragraph.
if (
self.events[index][1].type !== types.lineEnding &&
self.events[index][1].type !== types.linePrefix &&
self.events[index][1].type !== types.content
) {
paragraph = self.events[index][1].type === types.paragraph
break
}
}
return start
/** @type {State} */
/**
* At start of heading (setext) underline.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function start(code) {
let index = self.events.length
/** @type {boolean | undefined} */
let paragraph
assert(

@@ -120,7 +122,22 @@ code === codes.dash || code === codes.equalsTo,

// Find an opening.
while (index--) {
// Skip enter/exit of line ending, line prefix, and content.
// We can now either have a definition or a paragraph.
if (
self.events[index][1].type !== types.lineEnding &&
self.events[index][1].type !== types.linePrefix &&
self.events[index][1].type !== types.content
) {
paragraph = self.events[index][1].type === types.paragraph
break
}
}
// To do: handle lazy/pierce like `markdown-rs`.
// To do: parse indent like `markdown-rs`.
if (!self.parser.lazy[self.now().line] && (self.interrupt || paragraph)) {
effects.enter(types.setextHeadingLine)
effects.enter(types.setextHeadingLineSequence)
marker = code
return closingSequence(code)
return before(code)
}

@@ -131,15 +148,54 @@

/** @type {State} */
function closingSequence(code) {
/**
* After optional whitespace, at `-` or `=`.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function before(code) {
effects.enter(types.setextHeadingLineSequence)
return inside(code)
}
/**
* In sequence.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === marker) {
effects.consume(code)
return closingSequence
return inside
}
effects.exit(types.setextHeadingLineSequence)
return factorySpace(effects, closingSequenceEnd, types.lineSuffix)(code)
return markdownSpace(code)
? factorySpace(effects, after, types.lineSuffix)(code)
: after(code)
}
/** @type {State} */
function closingSequenceEnd(code) {
/**
* After sequence, after optional whitespace.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function after(code) {
if (code === codes.eof || markdownLineEnding(code)) {

@@ -146,0 +202,0 @@ effects.exit(types.setextHeadingLine)

/** @type {Construct} */
export const thematicBreak: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'

@@ -14,2 +14,3 @@ import {markdownLineEnding, markdownSpace} from 'micromark-util-character'

import {types} from 'micromark-util-symbol/types.js'
import {ok as assert} from 'uvu/assert'

@@ -22,3 +23,6 @@ /** @type {Construct} */

/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeThematicBreak(effects, ok, nok) {

@@ -31,4 +35,29 @@ let size = 0

/** @type {State} */
/**
* Start of thematic break.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter(types.thematicBreak)
// To do: parse indent like `markdown-rs`.
return before(code)
}
/**
* After optional whitespace, at marker.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function before(code) {
assert(

@@ -40,4 +69,2 @@ code === codes.asterisk ||

)
effects.enter(types.thematicBreak)
marker = code

@@ -47,3 +74,12 @@ return atBreak(code)

/** @type {State} */
/**
* After something, before something else.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function atBreak(code) {

@@ -55,18 +91,23 @@ if (code === marker) {

if (markdownSpace(code)) {
return factorySpace(effects, atBreak, types.whitespace)(code)
}
if (
size < constants.thematicBreakMarkerCountMin ||
(code !== codes.eof && !markdownLineEnding(code))
size >= constants.thematicBreakMarkerCountMin &&
(code === codes.eof || markdownLineEnding(code))
) {
return nok(code)
effects.exit(types.thematicBreak)
return ok(code)
}
effects.exit(types.thematicBreak)
return ok(code)
return nok(code)
}
/** @type {State} */
/**
* In sequence.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function sequence(code) {

@@ -80,4 +121,6 @@ if (code === marker) {

effects.exit(types.thematicBreakSequence)
return atBreak(code)
return markdownSpace(code)
? factorySpace(effects, atBreak, types.whitespace)(code)
: atBreak(code)
}
}
/** @type {Construct} */
export const attention: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Event = import('micromark-util-types').Event
export type Point = import('micromark-util-types').Point
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type Event = import('micromark-util-types').Event
export type Code = import('micromark-util-types').Code
export type Point = import('micromark-util-types').Point
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
*/
import {push, splice} from 'micromark-util-chunked'
import {classifyCharacter} from 'micromark-util-classify-character'
import {resolveAll} from 'micromark-util-resolve-all'
/** @type {Construct} */

@@ -21,2 +22,3 @@ export const attention = {

}
/**

@@ -27,33 +29,25 @@ * Take all events and resolve attention to emphasis or strong.

*/
function resolveAllAttention(events, context) {
let index = -1
/** @type {number} */
let open
/** @type {Token} */
let group
/** @type {Token} */
let text
/** @type {Token} */
let openingSequence
/** @type {Token} */
let closingSequence
/** @type {number} */
let use
/** @type {Event[]} */
/** @type {Array<Event>} */
let nextEvents
/** @type {number} */
let offset
let offset // Walk through all events.
// Walk through all events.
//
// Note: performance of this is fine on an mb of normal markdown, but it’s
// a bottleneck for malicious stuff.
while (++index < events.length) {

@@ -66,4 +60,5 @@ // Find a token that can close.

) {
open = index // Now walk back to find an opener.
open = index
// Now walk back to find an opener.
while (open--) {

@@ -74,3 +69,4 @@ // Find a token that can open the closer.

events[open][1].type === 'attentionSequence' &&
events[open][1]._open && // If the markers are the same:
events[open][1]._open &&
// If the markers are the same:
context.sliceSerialize(events[open][1]).charCodeAt(0) ===

@@ -95,4 +91,5 @@ context.sliceSerialize(events[index][1]).charCodeAt(0)

continue
} // Number of markers to use from the sequence.
}
// Number of markers to use from the sequence.
use =

@@ -129,4 +126,5 @@ events[open][1].end.offset - events[open][1].start.offset > 1 &&

events[index][1].start = Object.assign({}, closingSequence.end)
nextEvents = [] // If there are more markers in the opening, add them before.
nextEvents = []
// If there are more markers in the opening, add them before.
if (events[open][1].end.offset - events[open][1].start.offset) {

@@ -137,4 +135,5 @@ nextEvents = push(nextEvents, [

])
} // Opening.
}
// Opening.
nextEvents = push(nextEvents, [

@@ -145,4 +144,7 @@ ['enter', group, context],

['enter', text, context]
]) // Between.
])
// Always populated by defaults.
// Between.
nextEvents = push(

@@ -155,4 +157,5 @@ nextEvents,

)
) // Closing.
)
// Closing.
nextEvents = push(nextEvents, [

@@ -163,4 +166,5 @@ ['exit', text, context],

['exit', group, context]
]) // If there are more markers in the closing, add them after.
])
// If there are more markers in the closing, add them after.
if (events[index][1].end.offset - events[index][1].start.offset) {

@@ -175,3 +179,2 @@ offset = 2

}
splice(events, open - 1, index - open + 3, nextEvents)

@@ -183,6 +186,6 @@ index = open + nextEvents.length - offset - 2

}
} // Remove remaining sequences.
}
// Remove remaining sequences.
index = -1
while (++index < events.length) {

@@ -193,7 +196,9 @@ if (events[index][1].type === 'attentionSequence') {

}
return events
}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeAttention(effects, ok) {

@@ -203,23 +208,45 @@ const attentionMarkers = this.parser.constructs.attentionMarkers.null

const before = classifyCharacter(previous)
/** @type {NonNullable<Code>} */
let marker
return start
/** @type {State} */
/**
* Before a sequence.
*
* ```markdown
* > | **
* ^
* ```
*
* @type {State}
*/
function start(code) {
marker = code
effects.enter('attentionSequence')
marker = code
return sequence(code)
return inside(code)
}
/** @type {State} */
function sequence(code) {
/**
* In a sequence.
*
* ```markdown
* > | **
* ^^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === marker) {
effects.consume(code)
return sequence
return inside
}
const token = effects.exit('attentionSequence')
const token = effects.exit('attentionSequence')
// To do: next major: move this to resolver, just like `markdown-rs`.
const after = classifyCharacter(code)
// Always populated by defaults.
const open =

@@ -234,2 +261,3 @@ !after || (after === 2 && before) || attentionMarkers.includes(code)

}
/**

@@ -245,3 +273,2 @@ * Move a point a bit.

*/
function movePoint(point, offset) {

@@ -248,0 +275,0 @@ point.column += offset

/** @type {Construct} */
export const autolink: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {

@@ -12,3 +14,2 @@ asciiAlpha,

} from 'micromark-util-character'
/** @type {Construct} */

@@ -19,9 +20,23 @@ export const autolink = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeAutolink(effects, ok, nok) {
let size = 1
let size = 0
return start
/** @type {State} */
/**
* Start of an autolink.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -35,4 +50,15 @@ effects.enter('autolink')

}
/** @type {State} */
/**
* After `<`, at protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function open(code) {

@@ -43,20 +69,47 @@ if (asciiAlpha(code)) {

}
return asciiAtext(code) ? emailAtext(code) : nok(code)
return emailAtext(code)
}
/** @type {State} */
/**
* At second byte of protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function schemeOrEmailAtext(code) {
return code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)
? schemeInsideOrEmailAtext(code)
: emailAtext(code)
// ASCII alphanumeric and `+`, `-`, and `.`.
if (code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) {
// Count the previous alphabetical from `open` too.
size = 1
return schemeInsideOrEmailAtext(code)
}
return emailAtext(code)
}
/** @type {State} */
/**
* In ambiguous protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function schemeInsideOrEmailAtext(code) {
if (code === 58) {
effects.consume(code)
size = 0
return urlInside
}
// ASCII alphanumeric and `+`, `-`, and `.`.
if (

@@ -69,29 +122,49 @@ (code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) &&

}
size = 0
return emailAtext(code)
}
/** @type {State} */
/**
* After protocol, in URL.
*
* ```markdown
* > | a<https://example.com>b
* ^
* ```
*
* @type {State}
*/
function urlInside(code) {
if (code === 62) {
effects.exit('autolinkProtocol')
return end(code)
effects.enter('autolinkMarker')
effects.consume(code)
effects.exit('autolinkMarker')
effects.exit('autolink')
return ok
}
// ASCII control, space, or `<`.
if (code === null || code === 32 || code === 60 || asciiControl(code)) {
return nok(code)
}
effects.consume(code)
return urlInside
}
/** @type {State} */
/**
* In email atext.
*
* ```markdown
* > | a<user.name@example.com>b
* ^
* ```
*
* @type {State}
*/
function emailAtext(code) {
if (code === 64) {
effects.consume(code)
size = 0
return emailAtSignOrDot
}
if (asciiAtext(code)) {

@@ -101,12 +174,29 @@ effects.consume(code)

}
return nok(code)
}
/** @type {State} */
/**
* In label, after at-sign or dot.
*
* ```markdown
* > | a<user.name@example.com>b
* ^ ^
* ```
*
* @type {State}
*/
function emailAtSignOrDot(code) {
return asciiAlphanumeric(code) ? emailLabel(code) : nok(code)
}
/** @type {State} */
/**
* In label, where `.` and `>` are allowed.
*
* ```markdown
* > | a<user.name@example.com>b
* ^
* ```
*
* @type {State}
*/
function emailLabel(code) {

@@ -118,30 +208,35 @@ if (code === 46) {

}
if (code === 62) {
// Exit, then change the type.
// Exit, then change the token type.
effects.exit('autolinkProtocol').type = 'autolinkEmail'
return end(code)
effects.enter('autolinkMarker')
effects.consume(code)
effects.exit('autolinkMarker')
effects.exit('autolink')
return ok
}
return emailValue(code)
}
/** @type {State} */
/**
* In label, where `.` and `>` are *not* allowed.
*
* Though, this is also used in `emailLabel` to parse other values.
*
* ```markdown
* > | a<user.name@ex-ample.com>b
* ^
* ```
*
* @type {State}
*/
function emailValue(code) {
// ASCII alphanumeric or `-`.
if ((code === 45 || asciiAlphanumeric(code)) && size++ < 63) {
const next = code === 45 ? emailValue : emailLabel
effects.consume(code)
return code === 45 ? emailValue : emailLabel
return next
}
return nok(code)
}
/** @type {State} */
function end(code) {
effects.enter('autolinkMarker')
effects.consume(code)
effects.exit('autolinkMarker')
effects.exit('autolink')
return ok
}
}
/** @type {Construct} */
export const blankLine: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
/** @type {Construct} */

@@ -14,11 +15,47 @@ export const blankLine = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeBlankLine(effects, ok, nok) {
return factorySpace(effects, afterWhitespace, 'linePrefix')
/** @type {State} */
return start
function afterWhitespace(code) {
/**
* Start of blank line.
*
* > 👉 **Note**: `␠` represents a space character.
*
* ```markdown
* > | ␠␠␊
* ^
* > | ␊
* ^
* ```
*
* @type {State}
*/
function start(code) {
return markdownSpace(code)
? factorySpace(effects, after, 'linePrefix')(code)
: after(code)
}
/**
* At eof/eol, after optional whitespace.
*
* > 👉 **Note**: `␠` represents a space character.
*
* ```markdown
* > | ␠␠␊
* ^
* > | ␊
* ^
* ```
*
* @type {State}
*/
function after(code) {
return code === null || markdownLineEnding(code) ? ok(code) : nok(code)
}
}
/** @type {Construct} */
export const blockQuote: Construct
export type Construct = import('micromark-util-types').Construct
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Exiter = import('micromark-util-types').Exiter
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Exiter} Exiter
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownSpace} from 'micromark-util-character'
/** @type {Construct} */

@@ -19,13 +20,24 @@ export const blockQuote = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeBlockQuoteStart(effects, ok, nok) {
const self = this
return start
/** @type {State} */
/**
* Start of block quote.
*
* ```markdown
* > | > a
* ^
* ```
*
* @type {State}
*/
function start(code) {
if (code === 62) {
const state = self.containerState
if (!state.open) {

@@ -37,3 +49,2 @@ effects.enter('blockQuote', {

}
effects.enter('blockQuotePrefix')

@@ -45,7 +56,15 @@ effects.enter('blockQuoteMarker')

}
return nok(code)
}
/** @type {State} */
/**
* After `>`, before optional whitespace.
*
* ```markdown
* > | > a
* ^
* ```
*
* @type {State}
*/
function after(code) {

@@ -59,3 +78,2 @@ if (markdownSpace(code)) {

}
effects.exit('blockQuotePrefix')

@@ -65,16 +83,69 @@ return ok(code)

}
/** @type {Tokenizer} */
/**
* Start of block quote continuation.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeBlockQuoteContinuation(effects, ok, nok) {
return factorySpace(
effects,
effects.attempt(blockQuote, ok, nok),
'linePrefix',
this.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4
)
const self = this
return contStart
/**
* Start of block quote continuation.
*
* Also used to parse the first block quote opening.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @type {State}
*/
function contStart(code) {
if (markdownSpace(code)) {
// Always populated by defaults.
return factorySpace(
effects,
contBefore,
'linePrefix',
self.parser.constructs.disable.null.includes('codeIndented')
? undefined
: 4
)(code)
}
return contBefore(code)
}
/**
* At `>`, after optional whitespace.
*
* Also used to parse the first block quote opening.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @type {State}
*/
function contBefore(code) {
return effects.attempt(blockQuote, ok, nok)(code)
}
}
/** @type {Exiter} */
function exit(effects) {
effects.exit('blockQuote')
}
/** @type {Construct} */
export const characterEscape: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {asciiPunctuation} from 'micromark-util-character'
/** @type {Construct} */

@@ -13,8 +14,20 @@ export const characterEscape = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCharacterEscape(effects, ok, nok) {
return start
/** @type {State} */
/**
* Start of character escape.
*
* ```markdown
* > | a\*b
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -25,7 +38,17 @@ effects.enter('characterEscape')

effects.exit('escapeMarker')
return open
return inside
}
/** @type {State} */
function open(code) {
/**
* After `\`, at punctuation.
*
* ```markdown
* > | a\*b
* ^
* ```
*
* @type {State}
*/
function inside(code) {
// ASCII punctuation.
if (asciiPunctuation(code)) {

@@ -38,5 +61,4 @@ effects.enter('characterEscapeValue')

}
return nok(code)
}
}
/** @type {Construct} */
export const characterReference: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {decodeNamedCharacterReference} from 'decode-named-character-reference'

@@ -14,3 +15,2 @@ import {

} from 'micromark-util-character'
/** @type {Construct} */

@@ -21,4 +21,7 @@ export const characterReference = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCharacterReference(effects, ok, nok) {

@@ -28,10 +31,21 @@ const self = this

/** @type {number} */
let max
/** @type {(code: Code) => code is number} */
/** @type {(code: Code) => boolean} */
let test
return start
/** @type {State} */
/**
* Start of character reference.
*
* ```markdown
* > | a&amp;b
* ^
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -44,4 +58,18 @@ effects.enter('characterReference')

}
/** @type {State} */
/**
* After `&`, at `#` for numeric references or alphanumeric for named
* references.
*
* ```markdown
* > | a&amp;b
* ^
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function open(code) {

@@ -54,3 +82,2 @@ if (code === 35) {

}
effects.enter('characterReferenceValue')

@@ -61,4 +88,15 @@ max = 31

}
/** @type {State} */
/**
* After `#`, at `x` for hexadecimals or digit for decimals.
*
* ```markdown
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function numeric(code) {

@@ -74,3 +112,2 @@ if (code === 88 || code === 120) {

}
effects.enter('characterReferenceValue')

@@ -81,11 +118,23 @@ max = 7

}
/** @type {State} */
/**
* After markers (`&#x`, `&#`, or `&`), in value, before `;`.
*
* The character reference kind defines what and how many characters are
* allowed.
*
* ```markdown
* > | a&amp;b
* ^^^
* > | a&#123;b
* ^^^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function value(code) {
/** @type {Token} */
let token
if (code === 59 && size) {
token = effects.exit('characterReferenceValue')
const token = effects.exit('characterReferenceValue')
if (

@@ -98,2 +147,4 @@ test === asciiAlphanumeric &&

// To do: `markdown-rs` uses a different name:
// `CharacterReferenceMarkerSemi`.
effects.enter('characterReferenceMarker')

@@ -105,3 +156,2 @@ effects.consume(code)

}
if (test(code) && size++ < max) {

@@ -111,5 +161,4 @@ effects.consume(code)

}
return nok(code)
}
}
/** @type {Construct} */
export const codeFenced: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {factorySpace} from 'micromark-factory-space'
import {
markdownLineEnding,
markdownLineEndingOrSpace
} from 'micromark-util-character'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
/** @type {Construct} */
const nonLazyContinuation = {
tokenize: tokenizeNonLazyContinuation,
partial: true
}

@@ -19,58 +23,108 @@ /** @type {Construct} */

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCodeFenced(effects, ok, nok) {
const self = this
/** @type {Construct} */
const closingFenceConstruct = {
tokenize: tokenizeClosingFence,
const closeStart = {
tokenize: tokenizeCloseStart,
partial: true
}
/** @type {Construct} */
const nonLazyLine = {
tokenize: tokenizeNonLazyLine,
partial: true
}
const tail = this.events[this.events.length - 1]
const initialPrefix =
tail && tail[1].type === 'linePrefix'
? tail[2].sliceSerialize(tail[1], true).length
: 0
let initialPrefix = 0
let sizeOpen = 0
/** @type {NonNullable<Code>} */
let marker
return start
/** @type {State} */
/**
* Start of code.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse whitespace like `markdown-rs`.
return beforeSequenceOpen(code)
}
/**
* In opening fence, after prefix, at sequence.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function beforeSequenceOpen(code) {
const tail = self.events[self.events.length - 1]
initialPrefix =
tail && tail[1].type === 'linePrefix'
? tail[2].sliceSerialize(tail[1], true).length
: 0
marker = code
effects.enter('codeFenced')
effects.enter('codeFencedFence')
effects.enter('codeFencedFenceSequence')
marker = code
return sequenceOpen(code)
}
/** @type {State} */
/**
* In opening fence sequence.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (code === marker) {
sizeOpen++
effects.consume(code)
sizeOpen++
return sequenceOpen
}
if (sizeOpen < 3) {
return nok(code)
}
effects.exit('codeFencedFenceSequence')
return sizeOpen < 3
? nok(code)
: factorySpace(effects, infoOpen, 'whitespace')(code)
return markdownSpace(code)
? factorySpace(effects, infoBefore, 'whitespace')(code)
: infoBefore(code)
}
/** @type {State} */
function infoOpen(code) {
/**
* In opening fence, after the sequence (and optional whitespace), before info.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function infoBefore(code) {
if (code === null || markdownLineEnding(code)) {
return openAfter(code)
effects.exit('codeFencedFence')
return self.interrupt
? ok(code)
: effects.check(nonLazyContinuation, atNonLazyBreak, after)(code)
}
effects.enter('codeFencedFenceInfo')

@@ -82,22 +136,49 @@ effects.enter('chunkString', {

}
/** @type {State} */
/**
* In info.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function info(code) {
if (code === null || markdownLineEndingOrSpace(code)) {
if (code === null || markdownLineEnding(code)) {
effects.exit('chunkString')
effects.exit('codeFencedFenceInfo')
return factorySpace(effects, infoAfter, 'whitespace')(code)
return infoBefore(code)
}
if (code === 96 && code === marker) return nok(code)
if (markdownSpace(code)) {
effects.exit('chunkString')
effects.exit('codeFencedFenceInfo')
return factorySpace(effects, metaBefore, 'whitespace')(code)
}
if (code === 96 && code === marker) {
return nok(code)
}
effects.consume(code)
return info
}
/** @type {State} */
function infoAfter(code) {
/**
* In opening fence, after info and whitespace, before meta.
*
* ```markdown
* > | ~~~js eval
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function metaBefore(code) {
if (code === null || markdownLineEnding(code)) {
return openAfter(code)
return infoBefore(code)
}
effects.enter('codeFencedFenceMeta')

@@ -109,4 +190,15 @@ effects.enter('chunkString', {

}
/** @type {State} */
/**
* In meta.
*
* ```markdown
* > | ~~~js eval
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function meta(code) {

@@ -116,57 +208,123 @@ if (code === null || markdownLineEnding(code)) {

effects.exit('codeFencedFenceMeta')
return openAfter(code)
return infoBefore(code)
}
if (code === 96 && code === marker) return nok(code)
if (code === 96 && code === marker) {
return nok(code)
}
effects.consume(code)
return meta
}
/** @type {State} */
function openAfter(code) {
effects.exit('codeFencedFence')
return self.interrupt ? ok(code) : contentStart(code)
/**
* At eol/eof in code, before a non-lazy closing fence or content.
*
* ```markdown
* > | ~~~js
* ^
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function atNonLazyBreak(code) {
return effects.attempt(closeStart, after, contentBefore)(code)
}
/** @type {State} */
/**
* Before code content, not a closing fence, at eol.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function contentBefore(code) {
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return contentStart
}
/**
* Before code content, not a closing fence.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function contentStart(code) {
if (code === null) {
return after(code)
}
return initialPrefix > 0 && markdownSpace(code)
? factorySpace(
effects,
beforeContentChunk,
'linePrefix',
initialPrefix + 1
)(code)
: beforeContentChunk(code)
}
if (markdownLineEnding(code)) {
return effects.attempt(
nonLazyLine,
effects.attempt(
closingFenceConstruct,
after,
initialPrefix
? factorySpace(
effects,
contentStart,
'linePrefix',
initialPrefix + 1
)
: contentStart
),
after
)(code)
/**
* Before code content, after optional prefix.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function beforeContentChunk(code) {
if (code === null || markdownLineEnding(code)) {
return effects.check(nonLazyContinuation, atNonLazyBreak, after)(code)
}
effects.enter('codeFlowValue')
return contentContinue(code)
return contentChunk(code)
}
/** @type {State} */
function contentContinue(code) {
/**
* In code content.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^^^^^^^^
* | ~~~
* ```
*
* @type {State}
*/
function contentChunk(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit('codeFlowValue')
return contentStart(code)
return beforeContentChunk(code)
}
effects.consume(code)
return contentContinue
return contentChunk
}
/** @type {State} */
/**
* After code.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function after(code) {

@@ -176,56 +334,112 @@ effects.exit('codeFenced')

}
/** @type {Tokenizer} */
function tokenizeNonLazyLine(effects, ok, nok) {
const self = this
return start
/** @type {State} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCloseStart(effects, ok, nok) {
let size = 0
return startBefore
function start(code) {
/**
*
*
* @type {State}
*/
function startBefore(code) {
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return lineStart
return start
}
/** @type {State} */
function lineStart(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code)
}
}
/** @type {Tokenizer} */
/**
* Before closing fence, at optional whitespace.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function start(code) {
// Always populated by defaults.
function tokenizeClosingFence(effects, ok, nok) {
let size = 0
return factorySpace(
effects,
closingSequenceStart,
'linePrefix',
this.parser.constructs.disable.null.includes('codeIndented')
? undefined
: 4
)
/** @type {State} */
function closingSequenceStart(code) {
// To do: `enter` here or in next state?
effects.enter('codeFencedFence')
effects.enter('codeFencedFenceSequence')
return closingSequence(code)
return markdownSpace(code)
? factorySpace(
effects,
beforeSequenceClose,
'linePrefix',
self.parser.constructs.disable.null.includes('codeIndented')
? undefined
: 4
)(code)
: beforeSequenceClose(code)
}
/** @type {State} */
function closingSequence(code) {
/**
* In closing fence, after optional whitespace, at sequence.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function beforeSequenceClose(code) {
if (code === marker) {
effects.enter('codeFencedFenceSequence')
return sequenceClose(code)
}
return nok(code)
}
/**
* In closing fence sequence.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function sequenceClose(code) {
if (code === marker) {
size++
effects.consume(code)
size++
return closingSequence
return sequenceClose
}
if (size < sizeOpen) return nok(code)
effects.exit('codeFencedFenceSequence')
return factorySpace(effects, closingSequenceEnd, 'whitespace')(code)
if (size >= sizeOpen) {
effects.exit('codeFencedFenceSequence')
return markdownSpace(code)
? factorySpace(effects, sequenceCloseAfter, 'whitespace')(code)
: sequenceCloseAfter(code)
}
return nok(code)
}
/** @type {State} */
function closingSequenceEnd(code) {
/**
* After closing fence sequence, after optional whitespace.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function sequenceCloseAfter(code) {
if (code === null || markdownLineEnding(code)) {

@@ -235,6 +449,38 @@ effects.exit('codeFencedFence')

}
return nok(code)
}
}
}
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeNonLazyContinuation(effects, ok, nok) {
const self = this
return start
/**
*
*
* @type {State}
*/
function start(code) {
if (code === null) {
return nok(code)
}
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return lineStart
}
/**
*
*
* @type {State}
*/
function lineStart(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code)
}
}
/** @type {Construct} */
export const codeIndented: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Resolver = import('micromark-util-types').Resolver
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
/** @type {Construct} */

@@ -16,22 +15,51 @@ export const codeIndented = {

}
/** @type {Construct} */
const indentedContent = {
tokenize: tokenizeIndentedContent,
const furtherStart = {
tokenize: tokenizeFurtherStart,
partial: true
}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCodeIndented(effects, ok, nok) {
const self = this
return start
/** @type {State} */
/**
* Start of code (indented).
*
* > **Parsing note**: it is not needed to check if this first line is a
* > filled line (that it has a non-whitespace character), because blank lines
* > are parsed already, so we never run into that.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: manually check if interrupting like `markdown-rs`.
effects.enter('codeIndented')
return factorySpace(effects, afterStartPrefix, 'linePrefix', 4 + 1)(code)
// To do: use an improved `space_or_tab` function like `markdown-rs`,
// so that we can drop the next state.
return factorySpace(effects, afterPrefix, 'linePrefix', 4 + 1)(code)
}
/** @type {State} */
function afterStartPrefix(code) {
/**
* At start, after 1 or 4 spaces.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function afterPrefix(code) {
const tail = self.events[self.events.length - 1]

@@ -41,45 +69,77 @@ return tail &&

tail[2].sliceSerialize(tail[1], true).length >= 4
? afterPrefix(code)
? atBreak(code)
: nok(code)
}
/** @type {State} */
function afterPrefix(code) {
/**
* At a break.
*
* ```markdown
* > | aaa
* ^ ^
* ```
*
* @type {State}
*/
function atBreak(code) {
if (code === null) {
return after(code)
}
if (markdownLineEnding(code)) {
return effects.attempt(indentedContent, afterPrefix, after)(code)
return effects.attempt(furtherStart, atBreak, after)(code)
}
effects.enter('codeFlowValue')
return content(code)
return inside(code)
}
/** @type {State} */
function content(code) {
/**
* In code content.
*
* ```markdown
* > | aaa
* ^^^^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit('codeFlowValue')
return afterPrefix(code)
return atBreak(code)
}
effects.consume(code)
return content
return inside
}
/** @type {State} */
function after(code) {
effects.exit('codeIndented')
// To do: allow interrupting like `markdown-rs`.
// Feel free to interrupt.
// tokenizer.interrupt = false
return ok(code)
}
}
/** @type {Tokenizer} */
function tokenizeIndentedContent(effects, ok, nok) {
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeFurtherStart(effects, ok, nok) {
const self = this
return start
/** @type {State} */
return furtherStart
function start(code) {
/**
* At eol, trying to parse another indent.
*
* ```markdown
* > | aaa
* ^
* | bbb
* ```
*
* @type {State}
*/
function furtherStart(code) {
// To do: improve `lazy` / `pierce` handling.
// If this is a lazy line, it can’t be code.

@@ -89,3 +149,2 @@ if (self.parser.lazy[self.now().line]) {

}
if (markdownLineEnding(code)) {

@@ -95,9 +154,24 @@ effects.enter('lineEnding')

effects.exit('lineEnding')
return start
return furtherStart
}
// To do: the code here in `micromark-js` is a bit different from
// `markdown-rs` because there it can attempt spaces.
// We can’t yet.
//
// To do: use an improved `space_or_tab` function like `markdown-rs`,
// so that we can drop the next state.
return factorySpace(effects, afterPrefix, 'linePrefix', 4 + 1)(code)
}
/** @type {State} */
/**
* At start, after 1 or 4 spaces.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function afterPrefix(code) {

@@ -110,5 +184,5 @@ const tail = self.events[self.events.length - 1]

: markdownLineEnding(code)
? start(code)
? furtherStart(code)
: nok(code)
}
}
/** @type {Construct} */
export const codeText: Construct
export type Construct = import('micromark-util-types').Construct
export type Previous = import('micromark-util-types').Previous
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Previous = import('micromark-util-types').Previous
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Previous} Previous
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Previous} Previous
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {markdownLineEnding} from 'micromark-util-character'
/** @type {Construct} */

@@ -18,4 +19,5 @@ export const codeText = {

}
// To do: next major: don’t resolve, like `markdown-rs`.
/** @type {Resolver} */
function resolveCodeText(events) {

@@ -25,8 +27,7 @@ let tailExitIndex = events.length - 4

/** @type {number} */
let index
/** @type {number|undefined} */
/** @type {number | undefined} */
let enter
let enter // If we start and end with an EOL or a space.
// If we start and end with an EOL or a space.
if (

@@ -38,4 +39,5 @@ (events[headEnterIndex][1].type === 'lineEnding' ||

) {
index = headEnterIndex // And we have data.
index = headEnterIndex
// And we have data.
while (++index < tailExitIndex) {

@@ -51,7 +53,7 @@ if (events[index][1].type === 'codeTextData') {

}
} // Merge adjacent spaces and data.
}
// Merge adjacent spaces and data.
index = headEnterIndex - 1
tailExitIndex++
while (++index <= tailExitIndex) {

@@ -67,3 +69,2 @@ if (enter === undefined) {

events[enter][1].type = 'codeTextData'
if (index !== enter + 2) {

@@ -75,11 +76,12 @@ events[enter][1].end = events[index - 1][1].end

}
enter = undefined
}
}
return events
}
/** @type {Previous} */
/**
* @this {TokenizeContext}
* @type {Previous}
*/
function previous(code) {

@@ -92,4 +94,7 @@ // If there is a previous code, there will always be a tail.

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeCodeText(effects, ok, nok) {

@@ -99,42 +104,64 @@ const self = this

/** @type {number} */
let size
/** @type {Token} */
let token
return start
/** @type {State} */
/**
* Start of code (text).
*
* ```markdown
* > | `a`
* ^
* > | \`a`
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter('codeText')
effects.enter('codeTextSequence')
return openingSequence(code)
return sequenceOpen(code)
}
/** @type {State} */
function openingSequence(code) {
/**
* In opening sequence.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (code === 96) {
effects.consume(code)
sizeOpen++
return openingSequence
return sequenceOpen
}
effects.exit('codeTextSequence')
return gap(code)
return between(code)
}
/** @type {State} */
function gap(code) {
/**
* Between something and something else.
*
* ```markdown
* > | `a`
* ^^
* ```
*
* @type {State}
*/
function between(code) {
// EOF.
if (code === null) {
return nok(code)
} // Closing fence?
// Could also be data.
}
if (code === 96) {
token = effects.enter('codeTextSequence')
size = 0
return closingSequence(code)
} // Tabs don’t work, and virtual spaces don’t make sense.
// To do: next major: don’t do spaces in resolve, but when compiling,
// like `markdown-rs`.
// Tabs don’t work, and virtual spaces don’t make sense.
if (code === 32) {

@@ -144,5 +171,11 @@ effects.enter('space')

effects.exit('space')
return gap
return between
}
// Closing fence? Could also be data.
if (code === 96) {
token = effects.enter('codeTextSequence')
size = 0
return sequenceClose(code)
}
if (markdownLineEnding(code)) {

@@ -152,11 +185,20 @@ effects.enter('lineEnding')

effects.exit('lineEnding')
return gap
} // Data.
return between
}
// Data.
effects.enter('codeTextData')
return data(code)
} // In code.
}
/** @type {State} */
/**
* In data.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function data(code) {

@@ -170,12 +212,19 @@ if (

effects.exit('codeTextData')
return gap(code)
return between(code)
}
effects.consume(code)
return data
} // Closing fence.
}
/** @type {State} */
function closingSequence(code) {
/**
* In closing sequence.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function sequenceClose(code) {
// More.

@@ -185,5 +234,6 @@ if (code === 96) {

size++
return closingSequence
} // Done!
return sequenceClose
}
// Done!
if (size === sizeOpen) {

@@ -193,4 +243,5 @@ effects.exit('codeTextSequence')

return ok(code)
} // More or less accents: mark as data.
}
// More or less accents: mark as data.
token.type = 'codeTextData'

@@ -197,0 +248,0 @@ return data(code)

@@ -8,4 +8,5 @@ /**

export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {subtokenize} from 'micromark-util-subtokenize'
/**

@@ -20,4 +21,4 @@ * No name because it must not be turned off.

}
/** @type {Construct} */
const continuationConstruct = {

@@ -27,2 +28,3 @@ tokenize: tokenizeContinuation,

}
/**

@@ -34,3 +36,2 @@ * Content is transparent: it’s parsed right now. That way, definitions are also

*/
function resolveContent(events) {

@@ -40,11 +41,23 @@ subtokenize(events)

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeContent(effects, ok) {
/** @type {Token} */
/** @type {Token | undefined} */
let previous
return start
/** @type {State} */
return chunkStart
function start(code) {
/**
* Before a content chunk.
*
* ```markdown
* > | abc
* ^
* ```
*
* @type {State}
*/
function chunkStart(code) {
effects.enter('content')

@@ -54,7 +67,16 @@ previous = effects.enter('chunkContent', {

})
return data(code)
return chunkInside(code)
}
/** @type {State} */
function data(code) {
/**
* In a content chunk.
*
* ```markdown
* > | abc
* ^^^
* ```
*
* @type {State}
*/
function chunkInside(code) {
if (code === null) {

@@ -64,2 +86,4 @@ return contentEnd(code)

// To do: in `markdown-rs`, each line is parsed on its own, and everything
// is stitched together resolving.
if (markdownLineEnding(code)) {

@@ -71,9 +95,14 @@ return effects.check(

)(code)
} // Data.
}
// Data.
effects.consume(code)
return data
return chunkInside
}
/** @type {State} */
/**
*
*
* @type {State}
*/
function contentEnd(code) {

@@ -84,4 +113,8 @@ effects.exit('chunkContent')

}
/** @type {State} */
/**
*
*
* @type {State}
*/
function contentContinue(code) {

@@ -95,12 +128,19 @@ effects.consume(code)

previous = previous.next
return data
return chunkInside
}
}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeContinuation(effects, ok, nok) {
const self = this
return startLookahead
/** @type {State} */
/**
*
*
* @type {State}
*/
function startLookahead(code) {

@@ -113,4 +153,8 @@ effects.exit('chunkContent')

}
/** @type {State} */
/**
*
*
* @type {State}
*/
function prefixed(code) {

@@ -121,4 +165,5 @@ if (code === null || markdownLineEnding(code)) {

// Always populated by defaults.
const tail = self.events[self.events.length - 1]
if (

@@ -132,5 +177,4 @@ !self.parser.constructs.disable.null.includes('codeIndented') &&

}
return effects.interrupt(self.parser.constructs.flow, nok, ok)(code)
}
}
/** @type {Construct} */
export const definition: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {factoryDestination} from 'micromark-factory-destination'

@@ -11,8 +13,8 @@ import {factoryLabel} from 'micromark-factory-label'

import {factoryWhitespace} from 'micromark-factory-whitespace'
import {normalizeIdentifier} from 'micromark-util-normalize-identifier'
import {
markdownLineEnding,
markdownLineEndingOrSpace
markdownLineEndingOrSpace,
markdownSpace
} from 'micromark-util-character'
import {normalizeIdentifier} from 'micromark-util-normalize-identifier'
/** @type {Construct} */

@@ -23,20 +25,50 @@ export const definition = {

}
/** @type {Construct} */
const titleConstruct = {
tokenize: tokenizeTitle,
const titleBefore = {
tokenize: tokenizeTitleBefore,
partial: true
}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeDefinition(effects, ok, nok) {
const self = this
/** @type {string} */
let identifier
return start
/** @type {State} */
/**
* At start of a definition.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function start(code) {
// Do not interrupt paragraphs (but do follow definitions).
// To do: do `interrupt` the way `markdown-rs` does.
// To do: parse whitespace the way `markdown-rs` does.
effects.enter('definition')
return before(code)
}
/**
* After optional whitespace, at `[`.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function before(code) {
// To do: parse whitespace the way `markdown-rs` does.
return factoryLabel.call(

@@ -46,2 +78,3 @@ self,

labelAfter,
// Note: we don’t need to reset the way `markdown-rs` does.
nok,

@@ -53,4 +86,13 @@ 'definitionLabel',

}
/** @type {State} */
/**
* After label.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function labelAfter(code) {

@@ -60,77 +102,190 @@ identifier = normalizeIdentifier(

)
if (code === 58) {
effects.enter('definitionMarker')
effects.consume(code)
effects.exit('definitionMarker') // Note: blank lines can’t exist in content.
return factoryWhitespace(
effects,
factoryDestination(
effects,
effects.attempt(
titleConstruct,
factorySpace(effects, after, 'whitespace'),
factorySpace(effects, after, 'whitespace')
),
nok,
'definitionDestination',
'definitionDestinationLiteral',
'definitionDestinationLiteralMarker',
'definitionDestinationRaw',
'definitionDestinationString'
)
)
effects.exit('definitionMarker')
return markerAfter
}
return nok(code)
}
/** @type {State} */
/**
* After marker.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function markerAfter(code) {
// Note: whitespace is optional.
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, destinationBefore)(code)
: destinationBefore(code)
}
/**
* Before destination.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function destinationBefore(code) {
return factoryDestination(
effects,
destinationAfter,
// Note: we don’t need to reset the way `markdown-rs` does.
nok,
'definitionDestination',
'definitionDestinationLiteral',
'definitionDestinationLiteralMarker',
'definitionDestinationRaw',
'definitionDestinationString'
)(code)
}
/**
* After destination.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function destinationAfter(code) {
return effects.attempt(titleBefore, after, after)(code)
}
/**
* After definition.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function after(code) {
return markdownSpace(code)
? factorySpace(effects, afterWhitespace, 'whitespace')(code)
: afterWhitespace(code)
}
/**
* After definition, after optional whitespace.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function afterWhitespace(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit('definition')
if (!self.parser.defined.includes(identifier)) {
self.parser.defined.push(identifier)
}
// Note: we don’t care about uniqueness.
// It’s likely that that doesn’t happen very frequently.
// It is more likely that it wastes precious time.
self.parser.defined.push(identifier)
// To do: `markdown-rs` interrupt.
// // You’d be interrupting.
// tokenizer.interrupt = true
return ok(code)
}
return nok(code)
}
}
/** @type {Tokenizer} */
function tokenizeTitle(effects, ok, nok) {
return start
/** @type {State} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeTitleBefore(effects, ok, nok) {
return titleBefore
function start(code) {
/**
* After destination, at whitespace.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleBefore(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, before)(code)
? factoryWhitespace(effects, beforeMarker)(code)
: nok(code)
}
/** @type {State} */
function before(code) {
if (code === 34 || code === 39 || code === 40) {
return factoryTitle(
effects,
factorySpace(effects, after, 'whitespace'),
nok,
'definitionTitle',
'definitionTitleMarker',
'definitionTitleString'
)(code)
}
/**
* At title.
*
* ```markdown
* | [a]: b
* > | "c"
* ^
* ```
*
* @type {State}
*/
function beforeMarker(code) {
return factoryTitle(
effects,
titleAfter,
nok,
'definitionTitle',
'definitionTitleMarker',
'definitionTitleString'
)(code)
}
return nok(code)
/**
* After title.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleAfter(code) {
return markdownSpace(code)
? factorySpace(effects, titleAfterOptionalWhitespace, 'whitespace')(code)
: titleAfterOptionalWhitespace(code)
}
/** @type {State} */
function after(code) {
/**
* After title, after optional whitespace.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleAfterOptionalWhitespace(code) {
return code === null || markdownLineEnding(code) ? ok(code) : nok(code)
}
}
/** @type {Construct} */
export const hardBreakEscape: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {markdownLineEnding} from 'micromark-util-character'
/** @type {Construct} */

@@ -13,25 +14,45 @@ export const hardBreakEscape = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeHardBreakEscape(effects, ok, nok) {
return start
/** @type {State} */
/**
* Start of a hard break (escape).
*
* ```markdown
* > | a\
* ^
* | b
* ```
*
* @type {State}
*/
function start(code) {
effects.enter('hardBreakEscape')
effects.enter('escapeMarker')
effects.consume(code)
return open
return after
}
/** @type {State} */
function open(code) {
/**
* After `\`, at eol.
*
* ```markdown
* > | a\
* ^
* | b
* ```
*
* @type {State}
*/
function after(code) {
if (markdownLineEnding(code)) {
effects.exit('escapeMarker')
effects.exit('hardBreakEscape')
return ok(code)
}
return nok(code)
}
}

@@ -5,4 +5,5 @@ /** @type {Construct} */

export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {factorySpace} from 'micromark-factory-space'

@@ -15,3 +17,2 @@ import {

import {splice} from 'micromark-util-chunked'
/** @type {Construct} */

@@ -23,4 +24,4 @@ export const headingAtx = {

}
/** @type {Resolver} */
function resolveHeadingAtx(events, context) {

@@ -30,12 +31,12 @@ let contentEnd = events.length - 2

/** @type {Token} */
let content
/** @type {Token} */
let text
let text // Prefix whitespace, part of the opening.
// Prefix whitespace, part of the opening.
if (events[contentStart][1].type === 'whitespace') {
contentStart += 2
} // Suffix whitespace, part of the closing.
}
// Suffix whitespace, part of the closing.
if (

@@ -47,3 +48,2 @@ contentEnd - 2 > contentStart &&

}
if (

@@ -57,3 +57,2 @@ events[contentEnd][1].type === 'atxHeadingSequence' &&

}
if (contentEnd > contentStart) {

@@ -69,3 +68,2 @@ content = {

end: events[contentEnd][1].end,
// @ts-expect-error Constants are fine to assign.
contentType: 'text'

@@ -80,72 +78,136 @@ }

}
return events
}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeHeadingAtx(effects, ok, nok) {
const self = this
let size = 0
return start
/** @type {State} */
/**
* Start of a heading (atx).
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse indent like `markdown-rs`.
effects.enter('atxHeading')
return before(code)
}
/**
* After optional whitespace, at `#`.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function before(code) {
effects.enter('atxHeadingSequence')
return fenceOpenInside(code)
return sequenceOpen(code)
}
/** @type {State} */
function fenceOpenInside(code) {
/**
* In opening sequence.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (code === 35 && size++ < 6) {
effects.consume(code)
return fenceOpenInside
return sequenceOpen
}
// Always at least one `#`.
if (code === null || markdownLineEndingOrSpace(code)) {
effects.exit('atxHeadingSequence')
return self.interrupt ? ok(code) : headingBreak(code)
return atBreak(code)
}
return nok(code)
}
/** @type {State} */
function headingBreak(code) {
/**
* After something, before something else.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function atBreak(code) {
if (code === 35) {
effects.enter('atxHeadingSequence')
return sequence(code)
return sequenceFurther(code)
}
if (code === null || markdownLineEnding(code)) {
effects.exit('atxHeading')
// To do: interrupt like `markdown-rs`.
// // Feel free to interrupt.
// tokenizer.interrupt = false
return ok(code)
}
if (markdownSpace(code)) {
return factorySpace(effects, headingBreak, 'whitespace')(code)
return factorySpace(effects, atBreak, 'whitespace')(code)
}
// To do: generate `data` tokens, add the `text` token later.
// Needs edit map, see: `markdown.rs`.
effects.enter('atxHeadingText')
return data(code)
}
/** @type {State} */
function sequence(code) {
/**
* In further sequence (after whitespace).
*
* Could be normal “visible” hashes in the heading or a final sequence.
*
* ```markdown
* > | ## aa ##
* ^
* ```
*
* @type {State}
*/
function sequenceFurther(code) {
if (code === 35) {
effects.consume(code)
return sequence
return sequenceFurther
}
effects.exit('atxHeadingSequence')
return headingBreak(code)
return atBreak(code)
}
/** @type {State} */
/**
* In text.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function data(code) {
if (code === null || code === 35 || markdownLineEndingOrSpace(code)) {
effects.exit('atxHeadingText')
return headingBreak(code)
return atBreak(code)
}
effects.consume(code)

@@ -152,0 +214,0 @@ return data

/** @type {Construct} */
export const htmlFlow: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {

@@ -17,4 +19,4 @@ asciiAlpha,

import {blankLine} from './blank-line.js'
/** @type {Construct} */
export const htmlFlow = {

@@ -26,13 +28,16 @@ name: 'htmlFlow',

}
/** @type {Construct} */
const nextBlankConstruct = {
tokenize: tokenizeNextBlank,
const blankLineBefore = {
tokenize: tokenizeBlankLineBefore,
partial: true
}
const nonLazyContinuationStart = {
tokenize: tokenizeNonLazyContinuationStart,
partial: true
}
/** @type {Resolver} */
function resolveToHtmlFlow(events) {
let index = events.length
while (index--) {

@@ -43,37 +48,57 @@ if (events[index][0] === 'enter' && events[index][1].type === 'htmlFlow') {

}
if (index > 1 && events[index - 2][1].type === 'linePrefix') {
// Add the prefix start to the HTML token.
events[index][1].start = events[index - 2][1].start // Add the prefix start to the HTML line token.
events[index + 1][1].start = events[index - 2][1].start // Remove the line prefix.
events[index][1].start = events[index - 2][1].start
// Add the prefix start to the HTML line token.
events[index + 1][1].start = events[index - 2][1].start
// Remove the line prefix.
events.splice(index - 2, 2)
}
return events
}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeHtmlFlow(effects, ok, nok) {
const self = this
/** @type {number} */
let kind
let marker
/** @type {boolean} */
let startTag
let closingTag
/** @type {string} */
let buffer
/** @type {number} */
let index
/** @type {Code} */
let marker
let markerB
return start
/** @type {State} */
/**
* Start of HTML (flow).
*
* ```markdown
* > | <x />
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse indent like `markdown-rs`.
return before(code)
}
/**
* At `<`, after optional whitespace.
*
* ```markdown
* > | <x />
* ^
* ```
*
* @type {State}
*/
function before(code) {
effects.enter('htmlFlow')

@@ -84,45 +109,71 @@ effects.enter('htmlFlowData')

}
/** @type {State} */
/**
* After `<`, at tag name or other stuff.
*
* ```markdown
* > | <x />
* ^
* > | <!doctype>
* ^
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function open(code) {
if (code === 33) {
effects.consume(code)
return declarationStart
return declarationOpen
}
if (code === 47) {
effects.consume(code)
closingTag = true
return tagCloseStart
}
if (code === 63) {
effects.consume(code)
kind = 3 // While we’re in an instruction instead of a declaration, we’re on a `?`
marker = 3
// To do:
// tokenizer.concrete = true
// To do: use `markdown-rs` style interrupt.
// While we’re in an instruction instead of a declaration, we’re on a `?`
// right now, so we do need to search for `>`, similar to declarations.
return self.interrupt ? ok : continuationDeclarationInside
}
// ASCII alphabetical.
if (asciiAlpha(code)) {
effects.consume(code)
// @ts-expect-error: not null.
buffer = String.fromCharCode(code)
startTag = true
return tagName
}
return nok(code)
}
/** @type {State} */
function declarationStart(code) {
/**
* After `<!`, at declaration, comment, or CDATA.
*
* ```markdown
* > | <!doctype>
* ^
* > | <!--xxx-->
* ^
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function declarationOpen(code) {
if (code === 45) {
effects.consume(code)
kind = 2
marker = 2
return commentOpenInside
}
if (code === 91) {
effects.consume(code)
kind = 5
buffer = 'CDATA['
marker = 5
index = 0

@@ -132,47 +183,89 @@ return cdataOpenInside

// ASCII alphabetical.
if (asciiAlpha(code)) {
effects.consume(code)
kind = 4
marker = 4
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuationDeclarationInside
}
return nok(code)
}
/** @type {State} */
/**
* After `<!-`, inside a comment, at another `-`.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function commentOpenInside(code) {
if (code === 45) {
effects.consume(code)
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuationDeclarationInside
}
return nok(code)
}
/** @type {State} */
/**
* After `<![`, inside CDATA, expecting `CDATA[`.
*
* ```markdown
* > | <![CDATA[>&<]]>
* ^^^^^^
* ```
*
* @type {State}
*/
function cdataOpenInside(code) {
if (code === buffer.charCodeAt(index++)) {
const value = 'CDATA['
if (code === value.charCodeAt(index++)) {
effects.consume(code)
return index === buffer.length
? self.interrupt
? ok
: continuation
: cdataOpenInside
if (index === value.length) {
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuation
}
return cdataOpenInside
}
return nok(code)
}
/** @type {State} */
/**
* After `</`, in closing tag, at tag name.
*
* ```markdown
* > | </x>
* ^
* ```
*
* @type {State}
*/
function tagCloseStart(code) {
if (asciiAlpha(code)) {
effects.consume(code)
// @ts-expect-error: not null.
buffer = String.fromCharCode(code)
return tagName
}
return nok(code)
}
/** @type {State} */
/**
* In tag name.
*
* ```markdown
* > | <ab>
* ^^
* > | </ab>
* ^^
* ```
*
* @type {State}
*/
function tagName(code) {

@@ -185,15 +278,13 @@ if (

) {
if (
code !== 47 &&
startTag &&
htmlRawNames.includes(buffer.toLowerCase())
) {
kind = 1
const slash = code === 47
const name = buffer.toLowerCase()
if (!slash && !closingTag && htmlRawNames.includes(name)) {
marker = 1
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok(code) : continuation(code)
}
if (htmlBlockNames.includes(buffer.toLowerCase())) {
kind = 6
if (code === 47) {
marker = 6
if (slash) {
effects.consume(code)

@@ -203,14 +294,16 @@ return basicSelfClosing

// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok(code) : continuation(code)
}
kind = 7 // Do not support complete HTML when interrupting
marker = 7
// Do not support complete HTML when interrupting.
return self.interrupt && !self.parser.lazy[self.now().line]
? nok(code)
: startTag
? completeAttributeNameBefore(code)
: completeClosingTagAfter(code)
: closingTag
? completeClosingTagAfter(code)
: completeAttributeNameBefore(code)
}
// ASCII alphanumerical and `-`.
if (code === 45 || asciiAlphanumeric(code)) {

@@ -221,17 +314,35 @@ effects.consume(code)

}
return nok(code)
}
/** @type {State} */
/**
* After closing slash of a basic tag name.
*
* ```markdown
* > | <div/>
* ^
* ```
*
* @type {State}
*/
function basicSelfClosing(code) {
if (code === 62) {
effects.consume(code)
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuation
}
return nok(code)
}
/** @type {State} */
/**
* After closing slash of a complete tag name.
*
* ```markdown
* > | <x/>
* ^
* ```
*
* @type {State}
*/
function completeClosingTagAfter(code) {

@@ -242,7 +353,28 @@ if (markdownSpace(code)) {

}
return completeEnd(code)
}
/** @type {State} */
/**
* At an attribute name.
*
* At first, this state is used after a complete tag name, after whitespace,
* where it expects optional attributes or the end of the tag.
* It is also reused after attributes, when expecting more optional
* attributes.
*
* ```markdown
* > | <a />
* ^
* > | <a :b>
* ^
* > | <a _b>
* ^
* > | <a b>
* ^
* > | <a >
* ^
* ```
*
* @type {State}
*/
function completeAttributeNameBefore(code) {

@@ -254,2 +386,3 @@ if (code === 47) {

// ASCII alphanumerical and `:` and `_`.
if (code === 58 || code === 95 || asciiAlpha(code)) {

@@ -259,3 +392,2 @@ effects.consume(code)

}
if (markdownSpace(code)) {

@@ -265,8 +397,21 @@ effects.consume(code)

}
return completeEnd(code)
}
/** @type {State} */
/**
* In attribute name.
*
* ```markdown
* > | <a :b>
* ^
* > | <a _b>
* ^
* > | <a b>
* ^
* ```
*
* @type {State}
*/
function completeAttributeName(code) {
// ASCII alphanumerical and `-`, `.`, `:`, and `_`.
if (

@@ -282,7 +427,18 @@ code === 45 ||

}
return completeAttributeNameAfter(code)
}
/** @type {State} */
/**
* After attribute name, at an optional initializer, the end of the tag, or
* whitespace.
*
* ```markdown
* > | <a b>
* ^
* > | <a b=c>
* ^
* ```
*
* @type {State}
*/
function completeAttributeNameAfter(code) {

@@ -293,3 +449,2 @@ if (code === 61) {

}
if (markdownSpace(code)) {

@@ -299,7 +454,18 @@ effects.consume(code)

}
return completeAttributeNameBefore(code)
}
/** @type {State} */
/**
* Before unquoted, double quoted, or single quoted attribute value, allowing
* whitespace.
*
* ```markdown
* > | <a b=c>
* ^
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueBefore(code) {

@@ -315,9 +481,7 @@ if (

}
if (code === 34 || code === 39) {
effects.consume(code)
marker = code
markerB = code
return completeAttributeValueQuoted
}
if (markdownSpace(code)) {

@@ -327,23 +491,40 @@ effects.consume(code)

}
marker = null
return completeAttributeValueUnquoted(code)
}
/** @type {State} */
/**
* In double or single quoted attribute value.
*
* ```markdown
* > | <a b="c">
* ^
* > | <a b='c'>
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueQuoted(code) {
if (code === markerB) {
effects.consume(code)
markerB = null
return completeAttributeValueQuotedAfter
}
if (code === null || markdownLineEnding(code)) {
return nok(code)
}
if (code === marker) {
effects.consume(code)
return completeAttributeValueQuotedAfter
}
effects.consume(code)
return completeAttributeValueQuoted
}
/** @type {State} */
/**
* In unquoted attribute value.
*
* ```markdown
* > | <a b=c>
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueUnquoted(code) {

@@ -354,2 +535,3 @@ if (

code === 39 ||
code === 47 ||
code === 60 ||

@@ -363,8 +545,17 @@ code === 61 ||

}
effects.consume(code)
return completeAttributeValueUnquoted
}
/** @type {State} */
/**
* After double or single quoted attribute value, before whitespace or the
* end of the tag.
*
* ```markdown
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueQuotedAfter(code) {

@@ -374,7 +565,15 @@ if (code === 47 || code === 62 || markdownSpace(code)) {

}
return nok(code)
}
/** @type {State} */
/**
* In certain circumstances of a complete tag where only an `>` is allowed.
*
* ```markdown
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeEnd(code) {

@@ -385,8 +584,21 @@ if (code === 62) {

}
return nok(code)
}
/** @type {State} */
/**
* After `>` in a complete tag.
*
* ```markdown
* > | <x>
* ^
* ```
*
* @type {State}
*/
function completeAfter(code) {
if (code === null || markdownLineEnding(code)) {
// // Do not form containers.
// tokenizer.concrete = true
return continuation(code)
}
if (markdownSpace(code)) {

@@ -396,97 +608,118 @@ effects.consume(code)

}
return code === null || markdownLineEnding(code)
? continuation(code)
: nok(code)
return nok(code)
}
/** @type {State} */
/**
* In continuation of any HTML kind.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function continuation(code) {
if (code === 45 && kind === 2) {
if (code === 45 && marker === 2) {
effects.consume(code)
return continuationCommentInside
}
if (code === 60 && kind === 1) {
if (code === 60 && marker === 1) {
effects.consume(code)
return continuationRawTagOpen
}
if (code === 62 && kind === 4) {
if (code === 62 && marker === 4) {
effects.consume(code)
return continuationClose
}
if (code === 63 && kind === 3) {
if (code === 63 && marker === 3) {
effects.consume(code)
return continuationDeclarationInside
}
if (code === 93 && kind === 5) {
if (code === 93 && marker === 5) {
effects.consume(code)
return continuationCharacterDataInside
return continuationCdataInside
}
if (markdownLineEnding(code) && (kind === 6 || kind === 7)) {
if (markdownLineEnding(code) && (marker === 6 || marker === 7)) {
effects.exit('htmlFlowData')
return effects.check(
nextBlankConstruct,
continuationClose,
continuationAtLineEnding
blankLineBefore,
continuationAfter,
continuationStart
)(code)
}
if (code === null || markdownLineEnding(code)) {
return continuationAtLineEnding(code)
effects.exit('htmlFlowData')
return continuationStart(code)
}
effects.consume(code)
return continuation
}
/** @type {State} */
function continuationAtLineEnding(code) {
effects.exit('htmlFlowData')
return htmlContinueStart(code)
/**
* In continuation, at eol.
*
* ```markdown
* > | <x>
* ^
* | asd
* ```
*
* @type {State}
*/
function continuationStart(code) {
return effects.check(
nonLazyContinuationStart,
continuationStartNonLazy,
continuationAfter
)(code)
}
/** @type {State} */
function htmlContinueStart(code) {
if (code === null) {
return done(code)
}
/**
* In continuation, at eol, before non-lazy content.
*
* ```markdown
* > | <x>
* ^
* | asd
* ```
*
* @type {State}
*/
function continuationStartNonLazy(code) {
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return continuationBefore
}
if (markdownLineEnding(code)) {
return effects.attempt(
{
tokenize: htmlLineEnd,
partial: true
},
htmlContinueStart,
done
)(code)
/**
* In continuation, before non-lazy content.
*
* ```markdown
* | <x>
* > | asd
* ^
* ```
*
* @type {State}
*/
function continuationBefore(code) {
if (code === null || markdownLineEnding(code)) {
return continuationStart(code)
}
effects.enter('htmlFlowData')
return continuation(code)
}
/** @type {Tokenizer} */
function htmlLineEnd(effects, ok, nok) {
return start
/** @type {State} */
function start(code) {
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return lineStart
}
/** @type {State} */
function lineStart(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code)
}
}
/** @type {State} */
/**
* In comment continuation, after one `-`, expecting another.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function continuationCommentInside(code) {

@@ -497,7 +730,15 @@ if (code === 45) {

}
return continuation(code)
}
/** @type {State} */
/**
* In raw continuation, after `<`, at `/`.
*
* ```markdown
* > | <script>console.log(1)</script>
* ^
* ```
*
* @type {State}
*/
function continuationRawTagOpen(code) {

@@ -509,24 +750,44 @@ if (code === 47) {

}
return continuation(code)
}
/** @type {State} */
/**
* In raw continuation, after `</`, in a raw tag name.
*
* ```markdown
* > | <script>console.log(1)</script>
* ^^^^^^
* ```
*
* @type {State}
*/
function continuationRawEndTag(code) {
if (code === 62 && htmlRawNames.includes(buffer.toLowerCase())) {
effects.consume(code)
return continuationClose
if (code === 62) {
const name = buffer.toLowerCase()
if (htmlRawNames.includes(name)) {
effects.consume(code)
return continuationClose
}
return continuation(code)
}
if (asciiAlpha(code) && buffer.length < 8) {
effects.consume(code)
// @ts-expect-error: not null.
buffer += String.fromCharCode(code)
return continuationRawEndTag
}
return continuation(code)
}
/** @type {State} */
function continuationCharacterDataInside(code) {
/**
* In cdata continuation, after `]`, expecting `]>`.
*
* ```markdown
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function continuationCdataInside(code) {
if (code === 93) {

@@ -536,7 +797,23 @@ effects.consume(code)

}
return continuation(code)
}
/** @type {State} */
/**
* In declaration or instruction continuation, at `>`.
*
* ```markdown
* > | <!-->
* ^
* > | <?>
* ^
* > | <!q>
* ^
* > | <!--ab-->
* ^
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function continuationDeclarationInside(code) {

@@ -546,42 +823,120 @@ if (code === 62) {

return continuationClose
} // More dashes.
}
if (code === 45 && kind === 2) {
// More dashes.
if (code === 45 && marker === 2) {
effects.consume(code)
return continuationDeclarationInside
}
return continuation(code)
}
/** @type {State} */
/**
* In closed continuation: everything we get until the eol/eof is part of it.
*
* ```markdown
* > | <!doctype>
* ^
* ```
*
* @type {State}
*/
function continuationClose(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit('htmlFlowData')
return done(code)
return continuationAfter(code)
}
effects.consume(code)
return continuationClose
}
/** @type {State} */
function done(code) {
/**
* Done.
*
* ```markdown
* > | <!doctype>
* ^
* ```
*
* @type {State}
*/
function continuationAfter(code) {
effects.exit('htmlFlow')
// // Feel free to interrupt.
// tokenizer.interrupt = false
// // No longer concrete.
// tokenizer.concrete = false
return ok(code)
}
}
/** @type {Tokenizer} */
function tokenizeNextBlank(effects, ok, nok) {
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeNonLazyContinuationStart(effects, ok, nok) {
const self = this
return start
/** @type {State} */
/**
* At eol, before continuation.
*
* ```markdown
* > | * ```js
* ^
* | b
* ```
*
* @type {State}
*/
function start(code) {
effects.exit('htmlFlowData')
effects.enter('lineEndingBlank')
if (markdownLineEnding(code)) {
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return after
}
return nok(code)
}
/**
* A continuation.
*
* ```markdown
* | * ```js
* > | b
* ^
* ```
*
* @type {State}
*/
function after(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code)
}
}
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeBlankLineBefore(effects, ok, nok) {
return start
/**
* Before eol, expecting blank line.
*
* ```markdown
* > | <div>
* ^
* |
* ```
*
* @type {State}
*/
function start(code) {
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEndingBlank')
effects.exit('lineEnding')
return effects.attempt(blankLine, ok, nok)
}
}
/** @type {Construct} */
export const htmlText: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {factorySpace} from 'micromark-factory-space'

@@ -15,3 +17,2 @@ import {

} from 'micromark-util-character'
/** @type {Construct} */

@@ -22,21 +23,27 @@ export const htmlText = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeHtmlText(effects, ok, nok) {
const self = this
/** @type {NonNullable<Code>|undefined} */
/** @type {NonNullable<Code> | undefined} */
let marker
/** @type {string} */
let buffer
/** @type {number} */
let index
/** @type {State} */
let returnState
return start
/** @type {State} */
/**
* Start of HTML (text).
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -48,4 +55,17 @@ effects.enter('htmlText')

}
/** @type {State} */
/**
* After `<`, at tag name or other stuff.
*
* ```markdown
* > | a <b> c
* ^
* > | a <!doctype> c
* ^
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function open(code) {

@@ -56,3 +76,2 @@ if (code === 33) {

}
if (code === 47) {

@@ -62,3 +81,2 @@ effects.consume(code)

}
if (code === 63) {

@@ -69,2 +87,3 @@ effects.consume(code)

// ASCII alphabetical.
if (asciiAlpha(code)) {

@@ -74,20 +93,29 @@ effects.consume(code)

}
return nok(code)
}
/** @type {State} */
/**
* After `<!`, at declaration, comment, or CDATA.
*
* ```markdown
* > | a <!doctype> c
* ^
* > | a <!--b--> c
* ^
* > | a <![CDATA[>&<]]> c
* ^
* ```
*
* @type {State}
*/
function declarationOpen(code) {
if (code === 45) {
effects.consume(code)
return commentOpen
return commentOpenInside
}
if (code === 91) {
effects.consume(code)
buffer = 'CDATA['
index = 0
return cdataOpen
return cdataOpenInside
}
if (asciiAlpha(code)) {

@@ -97,40 +125,33 @@ effects.consume(code)

}
return nok(code)
}
/** @type {State} */
function commentOpen(code) {
/**
* In a comment, after `<!-`, at another `-`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentOpenInside(code) {
if (code === 45) {
effects.consume(code)
return commentStart
return commentEnd
}
return nok(code)
}
/** @type {State} */
function commentStart(code) {
if (code === null || code === 62) {
return nok(code)
}
if (code === 45) {
effects.consume(code)
return commentStartDash
}
return comment(code)
}
/** @type {State} */
function commentStartDash(code) {
if (code === null || code === 62) {
return nok(code)
}
return comment(code)
}
/** @type {State} */
/**
* In comment.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function comment(code) {

@@ -140,3 +161,2 @@ if (code === null) {

}
if (code === 45) {

@@ -146,33 +166,75 @@ effects.consume(code)

}
if (markdownLineEnding(code)) {
returnState = comment
return atLineEnding(code)
return lineEndingBefore(code)
}
effects.consume(code)
return comment
}
/** @type {State} */
/**
* In comment, after `-`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentClose(code) {
if (code === 45) {
effects.consume(code)
return end
return commentEnd
}
return comment(code)
}
/** @type {State} */
function cdataOpen(code) {
if (code === buffer.charCodeAt(index++)) {
/**
* In comment, after `--`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentEnd(code) {
return code === 62
? end(code)
: code === 45
? commentClose(code)
: comment(code)
}
/**
* After `<![`, in CDATA, expecting `CDATA[`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^^^^^^
* ```
*
* @type {State}
*/
function cdataOpenInside(code) {
const value = 'CDATA['
if (code === value.charCodeAt(index++)) {
effects.consume(code)
return index === buffer.length ? cdata : cdataOpen
return index === value.length ? cdata : cdataOpenInside
}
return nok(code)
}
/** @type {State} */
/**
* In CDATA.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^^^
* ```
*
* @type {State}
*/
function cdata(code) {

@@ -182,3 +244,2 @@ if (code === null) {

}
if (code === 93) {

@@ -188,13 +249,20 @@ effects.consume(code)

}
if (markdownLineEnding(code)) {
returnState = cdata
return atLineEnding(code)
return lineEndingBefore(code)
}
effects.consume(code)
return cdata
}
/** @type {State} */
/**
* In CDATA, after `]`, at another `]`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^
* ```
*
* @type {State}
*/
function cdataClose(code) {

@@ -205,7 +273,15 @@ if (code === 93) {

}
return cdata(code)
}
/** @type {State} */
/**
* In CDATA, after `]]`, at `>`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^
* ```
*
* @type {State}
*/
function cdataEnd(code) {

@@ -215,3 +291,2 @@ if (code === 62) {

}
if (code === 93) {

@@ -221,7 +296,15 @@ effects.consume(code)

}
return cdata(code)
}
/** @type {State} */
/**
* In declaration.
*
* ```markdown
* > | a <!b> c
* ^
* ```
*
* @type {State}
*/
function declaration(code) {

@@ -231,13 +314,20 @@ if (code === null || code === 62) {

}
if (markdownLineEnding(code)) {
returnState = declaration
return atLineEnding(code)
return lineEndingBefore(code)
}
effects.consume(code)
return declaration
}
/** @type {State} */
/**
* In instruction.
*
* ```markdown
* > | a <?b?> c
* ^
* ```
*
* @type {State}
*/
function instruction(code) {

@@ -247,3 +337,2 @@ if (code === null) {

}
if (code === 63) {

@@ -253,19 +342,36 @@ effects.consume(code)

}
if (markdownLineEnding(code)) {
returnState = instruction
return atLineEnding(code)
return lineEndingBefore(code)
}
effects.consume(code)
return instruction
}
/** @type {State} */
/**
* In instruction, after `?`, at `>`.
*
* ```markdown
* > | a <?b?> c
* ^
* ```
*
* @type {State}
*/
function instructionClose(code) {
return code === 62 ? end(code) : instruction(code)
}
/** @type {State} */
/**
* After `</`, in closing tag, at tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagCloseStart(code) {
// ASCII alphabetical.
if (asciiAlpha(code)) {

@@ -275,8 +381,17 @@ effects.consume(code)

}
return nok(code)
}
/** @type {State} */
/**
* After `</x`, in a tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagClose(code) {
// ASCII alphanumerical and `-`.
if (code === 45 || asciiAlphanumeric(code)) {

@@ -286,13 +401,20 @@ effects.consume(code)

}
return tagCloseBetween(code)
}
/** @type {State} */
/**
* In closing tag, after tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagCloseBetween(code) {
if (markdownLineEnding(code)) {
returnState = tagCloseBetween
return atLineEnding(code)
return lineEndingBefore(code)
}
if (markdownSpace(code)) {

@@ -302,8 +424,17 @@ effects.consume(code)

}
return end(code)
}
/** @type {State} */
/**
* After `<x`, in opening tag name.
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function tagOpen(code) {
// ASCII alphanumerical and `-`.
if (code === 45 || asciiAlphanumeric(code)) {

@@ -313,11 +444,18 @@ effects.consume(code)

}
if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {
return tagOpenBetween(code)
}
return nok(code)
}
/** @type {State} */
/**
* In opening tag, after tag name.
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function tagOpenBetween(code) {

@@ -329,2 +467,3 @@ if (code === 47) {

// ASCII alphabetical and `:` and `_`.
if (code === 58 || code === 95 || asciiAlpha(code)) {

@@ -334,8 +473,6 @@ effects.consume(code)

}
if (markdownLineEnding(code)) {
returnState = tagOpenBetween
return atLineEnding(code)
return lineEndingBefore(code)
}
if (markdownSpace(code)) {

@@ -345,8 +482,17 @@ effects.consume(code)

}
return end(code)
}
/** @type {State} */
/**
* In attribute name.
*
* ```markdown
* > | a <b c> d
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeName(code) {
// ASCII alphabetical and `-`, `.`, `:`, and `_`.
if (

@@ -362,7 +508,16 @@ code === 45 ||

}
return tagOpenAttributeNameAfter(code)
}
/** @type {State} */
/**
* After attribute name, before initializer, the end of the tag, or
* whitespace.
*
* ```markdown
* > | a <b c> d
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeNameAfter(code) {

@@ -373,8 +528,6 @@ if (code === 61) {

}
if (markdownLineEnding(code)) {
returnState = tagOpenAttributeNameAfter
return atLineEnding(code)
return lineEndingBefore(code)
}
if (markdownSpace(code)) {

@@ -384,7 +537,16 @@ effects.consume(code)

}
return tagOpenBetween(code)
}
/** @type {State} */
/**
* Before unquoted, double quoted, or single quoted attribute value, allowing
* whitespace.
*
* ```markdown
* > | a <b c=d> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueBefore(code) {

@@ -400,3 +562,2 @@ if (

}
if (code === 34 || code === 39) {

@@ -407,8 +568,6 @@ effects.consume(code)

}
if (markdownLineEnding(code)) {
returnState = tagOpenAttributeValueBefore
return atLineEnding(code)
return lineEndingBefore(code)
}
if (markdownSpace(code)) {

@@ -418,38 +577,43 @@ effects.consume(code)

}
effects.consume(code)
marker = undefined
return tagOpenAttributeValueUnquoted
}
/** @type {State} */
/**
* In double or single quoted attribute value.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueQuoted(code) {
if (code === marker) {
effects.consume(code)
marker = undefined
return tagOpenAttributeValueQuotedAfter
}
if (code === null) {
return nok(code)
}
if (markdownLineEnding(code)) {
returnState = tagOpenAttributeValueQuoted
return atLineEnding(code)
return lineEndingBefore(code)
}
effects.consume(code)
return tagOpenAttributeValueQuoted
}
/** @type {State} */
function tagOpenAttributeValueQuotedAfter(code) {
if (code === 62 || code === 47 || markdownLineEndingOrSpace(code)) {
return tagOpenBetween(code)
}
return nok(code)
}
/** @type {State} */
/**
* In unquoted attribute value.
*
* ```markdown
* > | a <b c=d> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueUnquoted(code) {

@@ -466,36 +630,37 @@ if (

}
if (code === 62 || markdownLineEndingOrSpace(code)) {
if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {
return tagOpenBetween(code)
}
effects.consume(code)
return tagOpenAttributeValueUnquoted
} // We can’t have blank lines in content, so no need to worry about empty
// tokens.
/** @type {State} */
function atLineEnding(code) {
effects.exit('htmlTextData')
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return factorySpace(
effects,
afterPrefix,
'linePrefix',
self.parser.constructs.disable.null.includes('codeIndented')
? undefined
: 4
)
}
/** @type {State} */
function afterPrefix(code) {
effects.enter('htmlTextData')
return returnState(code)
/**
* After double or single quoted attribute value, before whitespace or the end
* of the tag.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueQuotedAfter(code) {
if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {
return tagOpenBetween(code)
}
return nok(code)
}
/** @type {State} */
/**
* In certain circumstances of a tag where only an `>` is allowed.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function end(code) {

@@ -508,5 +673,74 @@ if (code === 62) {

}
return nok(code)
}
/**
* At eol.
*
* > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* > | a <!--a
* ^
* | b-->
* ```
*
* @type {State}
*/
function lineEndingBefore(code) {
effects.exit('htmlTextData')
effects.enter('lineEnding')
effects.consume(code)
effects.exit('lineEnding')
return lineEndingAfter
}
/**
* After eol, at optional whitespace.
*
* > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* | a <!--a
* > | b-->
* ^
* ```
*
* @type {State}
*/
function lineEndingAfter(code) {
// Always populated by defaults.
return markdownSpace(code)
? factorySpace(
effects,
lineEndingAfterPrefix,
'linePrefix',
self.parser.constructs.disable.null.includes('codeIndented')
? undefined
: 4
)(code)
: lineEndingAfterPrefix(code)
}
/**
* After eol, after optional whitespace.
*
* > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* | a <!--a
* > | b-->
* ^
* ```
*
* @type {State}
*/
function lineEndingAfterPrefix(code) {
effects.enter('htmlTextData')
return returnState(code)
}
}
/** @type {Construct} */
export const labelEnd: Construct
export type Construct = import('micromark-util-types').Construct
export type Event = import('micromark-util-types').Event
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type Token = import('micromark-util-types').Token
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Event = import('micromark-util-types').Event
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {factoryDestination} from 'micromark-factory-destination'

@@ -18,3 +19,2 @@ import {factoryLabel} from 'micromark-factory-label'

import {resolveAll} from 'micromark-util-resolve-all'
/** @type {Construct} */

@@ -27,4 +27,4 @@ export const labelEnd = {

}
/** @type {Construct} */
const resourceConstruct = {

@@ -34,22 +34,15 @@ tokenize: tokenizeResource

/** @type {Construct} */
const fullReferenceConstruct = {
tokenize: tokenizeFullReference
const referenceFullConstruct = {
tokenize: tokenizeReferenceFull
}
/** @type {Construct} */
const referenceCollapsedConstruct = {
tokenize: tokenizeReferenceCollapsed
}
const collapsedReferenceConstruct = {
tokenize: tokenizeCollapsedReference
}
/** @type {Resolver} */
function resolveAllLabelEnd(events) {
let index = -1
/** @type {Token} */
let token
while (++index < events.length) {
token = events[index][1]
const token = events[index][1]
if (

@@ -66,7 +59,6 @@ token.type === 'labelImage' ||

}
return events
}
/** @type {Resolver} */
function resolveToLabelEnd(events, context) {

@@ -76,17 +68,13 @@ let index = events.length

/** @type {Token} */
let token
/** @type {number|undefined} */
/** @type {number | undefined} */
let open
/** @type {number|undefined} */
/** @type {number | undefined} */
let close
/** @type {Event[]} */
/** @type {Array<Event>} */
let media
let media // Find an opening.
// Find an opening.
while (index--) {
token = events[index][1]
if (open) {

@@ -99,5 +87,6 @@ // If we see another link, or inactive link label, we’ve been here before.

break
} // Mark other link openings as inactive, as we can’t have links in
}
// Mark other link openings as inactive, as we can’t have links in
// links.
if (events[index][0] === 'enter' && token.type === 'labelLink') {

@@ -113,3 +102,2 @@ token._inactive = true

open = index
if (token.type !== 'labelLink') {

@@ -124,3 +112,2 @@ offset = 2

}
const group = {

@@ -144,8 +131,13 @@ type: events[open][1].type === 'labelLink' ? 'link' : 'image',

['enter', label, context]
] // Opening marker.
]
media = push(media, events.slice(open + 1, open + offset + 3)) // Text open.
// Opening marker.
media = push(media, events.slice(open + 1, open + offset + 3))
media = push(media, [['enter', text, context]]) // Between.
// Text open.
media = push(media, [['enter', text, context]])
// Always populated by defaults.
// Between.
media = push(

@@ -158,4 +150,5 @@ media,

)
) // Text close, marker close, label close.
)
// Text close, marker close, label close.
media = push(media, [

@@ -166,6 +159,8 @@ ['exit', text, context],

['exit', label, context]
]) // Reference, resource, or so.
])
media = push(media, events.slice(close + 1)) // Media close.
// Reference, resource, or so.
media = push(media, events.slice(close + 1))
// Media close.
media = push(media, [['exit', group, context]])

@@ -175,4 +170,7 @@ splice(events, open, events.length, media)

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeLabelEnd(effects, ok, nok) {

@@ -182,8 +180,7 @@ const self = this

/** @type {Token} */
let labelStart
/** @type {boolean} */
let defined
let defined // Find an opening.
// Find an opening.
while (index--) {

@@ -199,12 +196,37 @@ if (

}
return start
/** @type {State} */
/**
* Start of label end.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ```
*
* @type {State}
*/
function start(code) {
// If there is not an okay opening.
if (!labelStart) {
return nok(code)
} // It’s a balanced bracket, but contains a link.
}
if (labelStart._inactive) return balanced(code)
// If the corresponding label (link) start is marked as inactive,
// it means we’d be wrapping a link, like this:
//
// ```markdown
// > | a [b [c](d) e](f) g.
// ^
// ```
//
// We can’t have that, so it’s just balanced brackets.
if (labelStart._inactive) {
return labelEndNok(code)
}
defined = self.parser.defined.includes(

@@ -223,31 +245,107 @@ normalizeIdentifier(

effects.exit('labelEnd')
return afterLabelEnd
return after
}
/** @type {State} */
function afterLabelEnd(code) {
// Resource: `[asd](fgh)`.
/**
* After `]`.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function after(code) {
// Note: `markdown-rs` also parses GFM footnotes here, which for us is in
// an extension.
// Resource (`[asd](fgh)`)?
if (code === 40) {
return effects.attempt(
resourceConstruct,
ok,
defined ? ok : balanced
labelEndOk,
defined ? labelEndOk : labelEndNok
)(code)
} // Collapsed (`[asd][]`) or full (`[asd][fgh]`) reference?
}
// Full (`[asd][fgh]`) or collapsed (`[asd][]`) reference?
if (code === 91) {
return effects.attempt(
fullReferenceConstruct,
ok,
defined
? effects.attempt(collapsedReferenceConstruct, ok, balanced)
: balanced
referenceFullConstruct,
labelEndOk,
defined ? referenceNotFull : labelEndNok
)(code)
} // Shortcut reference: `[asd]`?
}
return defined ? ok(code) : balanced(code)
// Shortcut (`[asd]`) reference?
return defined ? labelEndOk(code) : labelEndNok(code)
}
/** @type {State} */
function balanced(code) {
/**
* After `]`, at `[`, but not at a full reference.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function referenceNotFull(code) {
return effects.attempt(
referenceCollapsedConstruct,
labelEndOk,
labelEndNok
)(code)
}
/**
* Done, we found something.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function labelEndOk(code) {
// Note: `markdown-rs` does a bunch of stuff here.
return ok(code)
}
/**
* Done, it’s nothing.
*
* There was an okay opening, but we didn’t match anything.
*
* ```markdown
* > | [a](b c
* ^
* > | [a][b c
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function labelEndNok(code) {
labelStart._balanced = true

@@ -257,9 +355,21 @@ return nok(code)

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeResource(effects, ok, nok) {
return start
/** @type {State} */
return resourceStart
function start(code) {
/**
* At a resource.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceStart(code) {
effects.enter('resource')

@@ -269,15 +379,39 @@ effects.enter('resourceMarker')

effects.exit('resourceMarker')
return factoryWhitespace(effects, open)
return resourceBefore
}
/** @type {State} */
function open(code) {
/**
* In resource, after `(`, at optional whitespace.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceBefore(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, resourceOpen)(code)
: resourceOpen(code)
}
/**
* In resource, after optional whitespace, at `)` or a destination.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceOpen(code) {
if (code === 41) {
return end(code)
return resourceEnd(code)
}
return factoryDestination(
effects,
destinationAfter,
nok,
resourceDestinationAfter,
resourceDestinationMissing,
'resourceDestination',

@@ -291,16 +425,48 @@ 'resourceDestinationLiteral',

}
/** @type {State} */
function destinationAfter(code) {
/**
* In resource, after destination, at optional whitespace.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceDestinationAfter(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, between)(code)
: end(code)
? factoryWhitespace(effects, resourceBetween)(code)
: resourceEnd(code)
}
/** @type {State} */
function between(code) {
/**
* At invalid destination.
*
* ```markdown
* > | [a](<<) b
* ^
* ```
*
* @type {State}
*/
function resourceDestinationMissing(code) {
return nok(code)
}
/**
* In resource, after destination and whitespace, at `(` or title.
*
* ```markdown
* > | [a](b ) c
* ^
* ```
*
* @type {State}
*/
function resourceBetween(code) {
if (code === 34 || code === 39 || code === 40) {
return factoryTitle(
effects,
factoryWhitespace(effects, end),
resourceTitleAfter,
nok,

@@ -312,8 +478,32 @@ 'resourceTitle',

}
return resourceEnd(code)
}
return end(code)
/**
* In resource, after title, at optional whitespace.
*
* ```markdown
* > | [a](b "c") d
* ^
* ```
*
* @type {State}
*/
function resourceTitleAfter(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, resourceEnd)(code)
: resourceEnd(code)
}
/** @type {State} */
function end(code) {
/**
* In resource, at `)`.
*
* ```markdown
* > | [a](b) d
* ^
* ```
*
* @type {State}
*/
function resourceEnd(code) {
if (code === 41) {

@@ -326,19 +516,30 @@ effects.enter('resourceMarker')

}
return nok(code)
}
}
/** @type {Tokenizer} */
function tokenizeFullReference(effects, ok, nok) {
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeReferenceFull(effects, ok, nok) {
const self = this
return start
/** @type {State} */
return referenceFull
function start(code) {
/**
* In a reference (full), at the `[`.
*
* ```markdown
* > | [a][b] d
* ^
* ```
*
* @type {State}
*/
function referenceFull(code) {
return factoryLabel.call(
self,
effects,
afterLabel,
nok,
referenceFullAfter,
referenceFullMissing,
'reference',

@@ -349,5 +550,14 @@ 'referenceMarker',

}
/** @type {State} */
function afterLabel(code) {
/**
* In a reference (full), after `]`.
*
* ```markdown
* > | [a][b] d
* ^
* ```
*
* @type {State}
*/
function referenceFullAfter(code) {
return self.parser.defined.includes(

@@ -361,10 +571,40 @@ normalizeIdentifier(

}
/**
* In reference (full) that was missing.
*
* ```markdown
* > | [a][b d
* ^
* ```
*
* @type {State}
*/
function referenceFullMissing(code) {
return nok(code)
}
}
/** @type {Tokenizer} */
function tokenizeCollapsedReference(effects, ok, nok) {
return start
/** @type {State} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeReferenceCollapsed(effects, ok, nok) {
return referenceCollapsedStart
function start(code) {
/**
* In reference (collapsed), at `[`.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] d
* ^
* ```
*
* @type {State}
*/
function referenceCollapsedStart(code) {
// We only attempt a collapsed label if there’s a `[`.
effects.enter('reference')

@@ -374,7 +614,18 @@ effects.enter('referenceMarker')

effects.exit('referenceMarker')
return open
return referenceCollapsedOpen
}
/** @type {State} */
function open(code) {
/**
* In reference (collapsed), at `]`.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] d
* ^
* ```
*
* @type {State}
*/
function referenceCollapsedOpen(code) {
if (code === 93) {

@@ -387,5 +638,4 @@ effects.enter('referenceMarker')

}
return nok(code)
}
}
/** @type {Construct} */
export const labelStartImage: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {labelEnd} from './label-end.js'
/** @type {Construct} */
export const labelStartImage = {

@@ -14,9 +16,21 @@ name: 'labelStartImage',

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeLabelStartImage(effects, ok, nok) {
const self = this
return start
/** @type {State} */
/**
* Start of label (image) start.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -29,4 +43,13 @@ effects.enter('labelImage')

}
/** @type {State} */
/**
* After `!`, at `[`.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* @type {State}
*/
function open(code) {

@@ -40,14 +63,37 @@ if (code === 91) {

}
return nok(code)
}
/** @type {State} */
/**
* After `![`.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* This is needed in because, when GFM footnotes are enabled, images never
* form when started with a `^`.
* Instead, links form:
*
* ```markdown
* ![^a](b)
*
* ![^a][b]
*
* [b]: c
* ```
*
* ```html
* <p>!<a href=\"b\">^a</a></p>
* <p>!<a href=\"c\">^a</a></p>
* ```
*
* @type {State}
*/
function after(code) {
/* To do: remove in the future once we’ve switched from
* `micromark-extension-footnote` to `micromark-extension-gfm-footnote`,
* which doesn’t need this */
/* Hidden footnotes hook */
// To do: use a new field to do this, this is still needed for
// `micromark-extension-gfm-footnote`, but the `label-start-link`
// behavior isn’t.
// Hidden footnotes hook.
/* c8 ignore next 3 */

@@ -54,0 +100,0 @@ return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs

/** @type {Construct} */
export const labelStartLink: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {labelEnd} from './label-end.js'
/** @type {Construct} */
export const labelStartLink = {

@@ -14,9 +16,21 @@ name: 'labelStartLink',

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeLabelStartLink(effects, ok, nok) {
const self = this
return start
/** @type {State} */
/**
* Start of label (link) start.
*
* ```markdown
* > | a [b] c
* ^
* ```
*
* @type {State}
*/
function start(code) {

@@ -30,11 +44,8 @@ effects.enter('labelLink')

}
/** @type {State} */
function after(code) {
/* To do: remove in the future once we’ve switched from
* `micromark-extension-footnote` to `micromark-extension-gfm-footnote`,
* which doesn’t need this */
/* Hidden footnotes hook. */
// To do: this isn’t needed in `micromark-extension-gfm-footnote`,
// remove.
// Hidden footnotes hook.
/* c8 ignore next 3 */

@@ -41,0 +52,0 @@ return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs

/** @type {Construct} */
export const lineEnding: Construct
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
/**
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
/** @type {Construct} */

@@ -14,8 +15,11 @@ export const lineEnding = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeLineEnding(effects, ok) {
return start
/** @type {State} */
function start(code) {

@@ -22,0 +26,0 @@ effects.enter('lineEnding')

/** @type {Construct} */
export const list: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type ContainerState = import('micromark-util-types').ContainerState
export type Exiter = import('micromark-util-types').Exiter
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Exiter = import('micromark-util-types').Exiter
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
export type ListContainerState = Record<string, unknown> & {
marker: Code
type: string
size: number
}
export type TokenizeContextWithState = TokenizeContext & {
containerState: ListContainerState
}
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').ContainerState} ContainerState
* @typedef {import('micromark-util-types').Exiter} Exiter
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Exiter} Exiter
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
/**
* @typedef {Record<string, unknown> & {marker: Code, type: string, size: number}} ListContainerState
* @typedef {TokenizeContext & {containerState: ListContainerState}} TokenizeContextWithState
*/
import {factorySpace} from 'micromark-factory-space'

@@ -18,4 +15,4 @@ import {asciiDigit, markdownSpace} from 'micromark-util-character'

import {thematicBreak} from './thematic-break.js'
/** @type {Construct} */
export const list = {

@@ -29,4 +26,4 @@ name: 'list',

}
/** @type {Construct} */
const listItemPrefixWhitespaceConstruct = {

@@ -36,4 +33,4 @@ tokenize: tokenizeListItemPrefixWhitespace,

}
/** @type {Construct} */
const indentConstruct = {

@@ -43,7 +40,10 @@ tokenize: tokenizeIndent,

}
// To do: `markdown-rs` parses list items on their own and later stitches them
// together.
/**
* @type {Tokenizer}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/
function tokenizeListStart(effects, ok, nok) {

@@ -58,4 +58,4 @@ const self = this

return start
/** @type {State} */
function start(code) {

@@ -67,3 +67,2 @@ const kind =

: 'listOrdered')
if (

@@ -80,3 +79,2 @@ kind === 'listUnordered'

}
if (kind === 'listUnordered') {

@@ -88,3 +86,2 @@ effects.enter('listItemPrefix')

}
if (!self.interrupt || code === 49) {

@@ -96,7 +93,6 @@ effects.enter('listItemPrefix')

}
return nok(code)
}
/** @type {State} */
function inside(code) {

@@ -107,3 +103,2 @@ if (asciiDigit(code) && ++size < 10) {

}
if (

@@ -118,9 +113,8 @@ (!self.interrupt || size < 2) &&

}
return nok(code)
}
/**
* @type {State}
**/
function atMarker(code) {

@@ -132,3 +126,4 @@ effects.enter('listItemMarker')

return effects.check(
blankLine, // Can’t be empty when interrupting.
blankLine,
// Can’t be empty when interrupting.
self.interrupt ? nok : onBlank,

@@ -142,4 +137,4 @@ effects.attempt(

}
/** @type {State} */
function onBlank(code) {

@@ -150,4 +145,4 @@ self.containerState.initialBlankLine = true

}
/** @type {State} */
function otherPrefix(code) {

@@ -160,7 +155,6 @@ if (markdownSpace(code)) {

}
return nok(code)
}
/** @type {State} */
function endOfPrefix(code) {

@@ -173,7 +167,7 @@ self.containerState.size =

}
/**
* @type {Tokenizer}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/
function tokenizeListContinuation(effects, ok, nok) {

@@ -183,10 +177,11 @@ const self = this

return effects.check(blankLine, onBlank, notBlank)
/** @type {State} */
function onBlank(code) {
self.containerState.furtherBlankLines =
self.containerState.furtherBlankLines ||
self.containerState.initialBlankLine // We have a blank line.
self.containerState.initialBlankLine
// We have a blank line.
// Still, try to consume at most the items size.
return factorySpace(

@@ -199,4 +194,4 @@ effects,

}
/** @type {State} */
function notBlank(code) {

@@ -208,3 +203,2 @@ if (self.containerState.furtherBlankLines || !markdownSpace(code)) {

}
self.containerState.furtherBlankLines = undefined

@@ -214,9 +208,11 @@ self.containerState.initialBlankLine = undefined

}
/** @type {State} */
function notInCurrentItem(code) {
// While we do continue, we signal that the flow should be closed.
self.containerState._closeFlow = true // As we’re closing flow, we’re no longer interrupting.
self.containerState._closeFlow = true
// As we’re closing flow, we’re no longer interrupting.
self.interrupt = undefined
// Always populated by defaults.
self.interrupt = undefined
return factorySpace(

@@ -232,7 +228,7 @@ effects,

}
/**
* @type {Tokenizer}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/
function tokenizeIndent(effects, ok, nok) {

@@ -246,4 +242,4 @@ const self = this

)
/** @type {State} */
function afterPrefix(code) {

@@ -258,17 +254,20 @@ const tail = self.events[self.events.length - 1]

}
/**
* @type {Exiter}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/
function tokenizeListEnd(effects) {
effects.exit(this.containerState.type)
}
/**
* @type {Tokenizer}
* @this {TokenizeContextWithState}
* @this {TokenizeContext}
*/
function tokenizeListItemPrefixWhitespace(effects, ok, nok) {
const self = this
// Always populated by defaults.
return factorySpace(

@@ -282,4 +281,4 @@ effects,

)
/** @type {State} */
function afterPrefix(code) {

@@ -286,0 +285,0 @@ const tail = self.events[self.events.length - 1]

/** @type {Construct} */
export const setextUnderline: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type Resolver = import('micromark-util-types').Resolver
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
/** @type {Construct} */

@@ -17,17 +18,16 @@ export const setextUnderline = {

}
/** @type {Resolver} */
function resolveToSetextUnderline(events, context) {
// To do: resolve like `markdown-rs`.
let index = events.length
/** @type {number|undefined} */
/** @type {number | undefined} */
let content
/** @type {number|undefined} */
/** @type {number | undefined} */
let text
/** @type {number|undefined} */
/** @type {number | undefined} */
let definition
let definition // Find the opening of the content.
// Find the opening of the content.
// It’ll always exist: we don’t tokenize if it isn’t there.
while (index--) {

@@ -39,7 +39,7 @@ if (events[index][0] === 'enter') {

}
if (events[index][1].type === 'paragraph') {
text = index
}
} // Exit
}
// Exit
else {

@@ -50,3 +50,2 @@ if (events[index][1].type === 'content') {

}
if (!definition && events[index][1].type === 'definition') {

@@ -57,3 +56,2 @@ definition = index

}
const heading = {

@@ -63,7 +61,9 @@ type: 'setextHeading',

end: Object.assign({}, events[events.length - 1][1].end)
} // Change the paragraph to setext heading text.
}
events[text][1].type = 'setextHeadingText' // If we have definitions in the content, we’ll keep on having content,
// Change the paragraph to setext heading text.
events[text][1].type = 'setextHeadingText'
// If we have definitions in the content, we’ll keep on having content,
// but we need move it.
if (definition) {

@@ -75,59 +75,108 @@ events.splice(text, 0, ['enter', heading, context])

events[content][1] = heading
} // Add the heading exit at the end.
}
// Add the heading exit at the end.
events.push(['exit', heading, context])
return events
}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeSetextUnderline(effects, ok, nok) {
const self = this
let index = self.events.length
/** @type {NonNullable<Code>} */
let marker
/** @type {boolean} */
return start
let paragraph // Find an opening.
while (index--) {
// Skip enter/exit of line ending, line prefix, and content.
// We can now either have a definition or a paragraph.
if (
self.events[index][1].type !== 'lineEnding' &&
self.events[index][1].type !== 'linePrefix' &&
self.events[index][1].type !== 'content'
) {
paragraph = self.events[index][1].type === 'paragraph'
break
/**
* At start of heading (setext) underline.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function start(code) {
let index = self.events.length
/** @type {boolean | undefined} */
let paragraph
// Find an opening.
while (index--) {
// Skip enter/exit of line ending, line prefix, and content.
// We can now either have a definition or a paragraph.
if (
self.events[index][1].type !== 'lineEnding' &&
self.events[index][1].type !== 'linePrefix' &&
self.events[index][1].type !== 'content'
) {
paragraph = self.events[index][1].type === 'paragraph'
break
}
}
}
return start
/** @type {State} */
function start(code) {
// To do: handle lazy/pierce like `markdown-rs`.
// To do: parse indent like `markdown-rs`.
if (!self.parser.lazy[self.now().line] && (self.interrupt || paragraph)) {
effects.enter('setextHeadingLine')
effects.enter('setextHeadingLineSequence')
marker = code
return closingSequence(code)
return before(code)
}
return nok(code)
}
/** @type {State} */
function closingSequence(code) {
/**
* After optional whitespace, at `-` or `=`.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function before(code) {
effects.enter('setextHeadingLineSequence')
return inside(code)
}
/**
* In sequence.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === marker) {
effects.consume(code)
return closingSequence
return inside
}
effects.exit('setextHeadingLineSequence')
return factorySpace(effects, closingSequenceEnd, 'lineSuffix')(code)
return markdownSpace(code)
? factorySpace(effects, after, 'lineSuffix')(code)
: after(code)
}
/** @type {State} */
function closingSequenceEnd(code) {
/**
* After sequence, after optional whitespace.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function after(code) {
if (code === null || markdownLineEnding(code)) {

@@ -137,5 +186,4 @@ effects.exit('setextHeadingLine')

}
return nok(code)
}
}
/** @type {Construct} */
export const thematicBreak: Construct
export type Code = import('micromark-util-types').Code
export type Construct = import('micromark-util-types').Construct
export type State = import('micromark-util-types').State
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding, markdownSpace} from 'micromark-util-character'
/** @type {Construct} */

@@ -15,19 +16,54 @@ export const thematicBreak = {

}
/** @type {Tokenizer} */
/**
* @this {TokenizeContext}
* @type {Tokenizer}
*/
function tokenizeThematicBreak(effects, ok, nok) {
let size = 0
/** @type {NonNullable<Code>} */
let marker
return start
/** @type {State} */
/**
* Start of thematic break.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter('thematicBreak')
// To do: parse indent like `markdown-rs`.
return before(code)
}
/**
* After optional whitespace, at marker.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function before(code) {
marker = code
return atBreak(code)
}
/** @type {State} */
/**
* After something, before something else.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function atBreak(code) {

@@ -38,16 +74,19 @@ if (code === marker) {

}
if (markdownSpace(code)) {
return factorySpace(effects, atBreak, 'whitespace')(code)
if (size >= 3 && (code === null || markdownLineEnding(code))) {
effects.exit('thematicBreak')
return ok(code)
}
if (size < 3 || (code !== null && !markdownLineEnding(code))) {
return nok(code)
}
effects.exit('thematicBreak')
return ok(code)
return nok(code)
}
/** @type {State} */
/**
* In sequence.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function sequence(code) {

@@ -59,6 +98,7 @@ if (code === marker) {

}
effects.exit('thematicBreakSequence')
return atBreak(code)
return markdownSpace(code)
? factorySpace(effects, atBreak, 'whitespace')(code)
: atBreak(code)
}
}
{
"name": "micromark-core-commonmark",
"version": "1.0.6",
"version": "1.1.0",
"description": "The CommonMark markdown constructs",

@@ -38,2 +38,3 @@ "license": "MIT",

"exports": {
"types": "./dev/index.d.ts",
"development": "./dev/index.js",

@@ -61,3 +62,3 @@ "default": "./index.js"

"scripts": {
"build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage"
"build": "micromark-build"
},

@@ -64,0 +65,0 @@ "xo": false,

@@ -11,3 +11,3 @@ # micromark-core-commonmark

The core CommonMark constructs needed to tokenize markdown.
[micromark][] constructs that make up the core of CommonMark.
Some of these can be [turned off][disable], but they are often essential to

@@ -18,5 +18,9 @@ markdown and weird things might happen.

* [What is this?](#what-is-this)
* [When should I use this?](#when-should-i-use-this)
* [Install](#install)
* [Use](#use)
* [API](#api)
* [Types](#types)
* [Compatibility](#compatibility)
* [Security](#security)

@@ -26,5 +30,14 @@ * [Contribute](#contribute)

## What is this?
This package exposes the default constructs.
## When should I use this?
This package is useful when you are making your own micromark extensions.
## Install
[npm][]:
This package is [ESM only][esm].
In Node.js (version 16+), install with [npm][]:

@@ -35,2 +48,16 @@ ```sh

In Deno with [`esm.sh`][esmsh]:
```js
import * as core from 'https://esm.sh/micromark-core-commonmark@1'
```
In browsers with [`esm.sh`][esmsh]:
```html
<script type="module">
import * as core from 'https://esm.sh/micromark-core-commonmark@1?bundle'
</script>
```
## Use

@@ -54,8 +81,23 @@

Each identifier refers to a [construct](https://github.com/micromark/micromark#constructs).
Each identifier refers to a [construct][].
See the code for more on the exported constructs.
## Types
This package is fully typed with [TypeScript][].
It exports no additional types.
## Compatibility
Projects maintained by the unified collective are compatible with all maintained
versions of Node.js.
As of now, that is Node.js 16+.
Our projects sometimes work with older versions, but this is not guaranteed.
This package works with `micromark` version 3+.
## Security
This package is safe.
See [`security.md`][securitymd] in [`micromark/.github`][health] for how to

@@ -92,5 +134,5 @@ submit a security report.

[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-core-commonmark.svg
[bundle-size-badge]: https://img.shields.io/badge/dynamic/json?label=minzipped%20size&query=$.size.compressedSize&url=https://deno.bundlejs.com/?q=micromark-core-commonmark
[bundle-size]: https://bundlephobia.com/result?p=micromark-core-commonmark
[bundle-size]: https://bundlejs.com/?q=micromark-core-commonmark

@@ -105,2 +147,6 @@ [sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg

[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c
[esmsh]: https://esm.sh
[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg

@@ -116,10 +162,16 @@

[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md
[securitymd]: https://github.com/micromark/.github/blob/main/security.md
[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md
[contributing]: https://github.com/micromark/.github/blob/main/contributing.md
[support]: https://github.com/micromark/.github/blob/HEAD/support.md
[support]: https://github.com/micromark/.github/blob/main/support.md
[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md
[coc]: https://github.com/micromark/.github/blob/main/code-of-conduct.md
[disable]: https://github.com/micromark/micromark#case-turn-off-constructs
[construct]: https://github.com/micromark/micromark#constructs
[typescript]: https://www.typescriptlang.org
[micromark]: https://github.com/micromark/micromark
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc