Socket
Socket
Sign inDemoInstall

micromark-util-types

Package Overview
Dependencies
0
Maintainers
1
Versions
9
Alerts
File Explorer

Advanced tools

Install Socket

Detect and block malicious and high-risk dependencies

Install

Comparing version 1.0.2 to 1.1.0

960

index.d.ts

@@ -0,1 +1,3 @@

// Note: this file is authored manually, not generated from `index.js`.
/**

@@ -12,2 +14,3 @@ * A character code.

export type Code = number | null
/**

@@ -21,2 +24,3 @@ * A chunk is either a character code or a slice of a buffer in the form of a

export type Chunk = Code | string
/**

@@ -66,3 +70,4 @@ * Enumeration of the content types.

*/
export type ContentType = 'document' | 'flow' | 'content' | 'text' | 'string'
export type ContentType = 'content' | 'document' | 'flow' | 'string' | 'text'
/**

@@ -80,24 +85,30 @@ * A location in the document (`line`/`column`/`offset`) and chunk (`_index`,

/**
* 1-indexed line number
* Position in a string chunk (or `-1` when pointing to a numeric chunk).
*/
line: number
_bufferIndex: number
/**
* 1-indexed column number
* Position in a list of chunks.
*/
column: number
_index: number
/**
* 0-indexed position in the document
* 1-indexed column number.
*/
offset: number
column: number
/**
* Position in a list of chunks
* 1-indexed line number.
*/
_index: number
line: number
/**
* Position in a string chunk (or `-1` when pointing to a numeric chunk)
* 0-indexed position in the document.
*/
_bufferIndex: number
offset: number
}
/**
* A token: a span of chunks.
*
* Tokens are what the core of micromark produces: the built in HTML compiler

@@ -130,6 +141,18 @@ * or other tools can turn them into different things.

*/
export type Token = {
type: string
export interface Token {
/**
* Token type.
*/
type: TokenType
/**
* Point where the token starts.
*/
start: Point
/**
* Point where the token ends.
*/
end: Point
/**

@@ -139,6 +162,8 @@ * The previous token in a list of linked tokens.

previous?: Token | undefined
/**
* The next token in a list of linked tokens
* The next token in a list of linked tokens.
*/
next?: Token | undefined
/**

@@ -148,3 +173,6 @@ * Declares a token as having content of a certain type.

contentType?: ContentType | undefined
/**
* Connected tokenizer.
*
* Used when dealing with linked tokens.

@@ -155,38 +183,62 @@ * A child tokenizer is needed to tokenize them, which is stored on those

_tokenizer?: TokenizeContext | undefined
/**
* A marker used to parse attention, depending on the characters before
* sequences (`**`), the sequence can open, close, both, or none
* Field to help parse attention.
*
* Depending on the character before sequences (`**`), the sequence can open,
* close, both, or none.
*/
_open?: boolean | undefined
/**
* A marker used to parse attention, depending on the characters after
* sequences (`**`), the sequence can open, close, both, or none
* Field to help parse attention.
*
* Depending on the character before sequences (`**`), the sequence can open,
* close, both, or none.
*/
_close?: boolean | undefined
/**
* A boolean used internally to figure out if a token is in the first content
* of a list item construct.
* Field to help parse GFM task lists.
*
* This boolean is used internally to figure out if a token is in the first
* content of a list item construct.
*/
_isInFirstContentOfListItem?: boolean | undefined
/**
* A boolean used internally to figure out if a token is a container token.
* Field to help parse containers.
*
* This boolean is used internally to figure out if a token is a container
* token.
*/
_container?: boolean | undefined
/**
* A boolean used internally to figure out if a list is loose or not.
* Field to help parse lists.
*
* This boolean is used internally to figure out if a list is loose or not.
*/
_loose?: boolean | undefined
/**
* A boolean used internally to figure out if a link opening can’t be used
* (because links in links are incorrect).
* Field to help parse links.
*
* This boolean is used internally to figure out if a link opening
* can’t be used (because links in links are incorrect).
*/
_inactive?: boolean | undefined
/**
* A boolean used internally to figure out if a link opening is balanced: it’s
* not a link opening but has a balanced closing.
* Field to help parse links.
*
* This boolean is used internally to figure out if a link opening is
* balanced: it’s not a link opening but has a balanced closing.
*/
_balanced?: boolean | undefined
}
/**
* An event is the start or end of a token amongst other events.
* The start or end of a token amongst other events.
*
* Tokens can “contain” other tokens, even though they are stored in a flat

@@ -196,30 +248,60 @@ * list, through `enter`ing before them, and `exit`ing after them.

export type Event = ['enter' | 'exit', Token, TokenizeContext]
/**
* Open a token.
*
* @param type
* Token type.
* @param fields
* Extra fields.
* @returns
* Token.
*/
export type Enter = (
type: string,
fields?: Record<string, unknown> | undefined
type: TokenType,
fields?: Omit<Partial<Token>, 'type'> | undefined
) => Token
/**
* Close a token.
*
* @param type
* Token type.
* @returns
* Token.
*/
export type Exit = (type: string) => Token
export type Exit = (type: TokenType) => Token
/**
* Deal with the character and move to the next.
*
* @param code
* Current code.
*/
export type Consume = (code: Code) => void
/**
* Attempt deals with several values, and tries to parse according to those
* values.
*
* If a value resulted in `ok`, it worked, the tokens that were made are used,
* and `returnState` is switched to.
* and `ok` is switched to.
* If the result is `nok`, the attempt failed, so we revert to the original
* state, and `bogusState` is used.
* state, and `nok` is used.
*
* @param construct
* Construct(s) to try.
* @param ok
* State to move to when successful.
* @param nok
* State to move to when unsuccessful.
* @returns
* Next state.
*/
export type Attempt = (
construct: Construct | Construct[] | ConstructRecord,
returnState: State,
bogusState?: State | undefined
) => (code: Code) => void
construct: Array<Construct> | Construct | ConstructRecord,
ok: State,
nok?: State | undefined
) => State
/**

@@ -230,26 +312,32 @@ * A context object to transition the state machine.

/**
* Start a new token.
* Start a new token.
*/
enter: Enter
/**
* End a started token.
* End a started token.
*/
exit: Exit
/**
* Deal with the character and move to the next.
* Deal with the character and move to the next.
*/
consume: Consume
/**
* Try to tokenize a construct.
* Try to tokenize a construct.
*/
attempt: Attempt
/**
* Interrupt is used for stuff right after a line of content.
* Interrupt is used for stuff right after a line of content.
*/
interrupt: Attempt
/**
* Attempt, then revert.
* Attempt, then revert.
*/
check: Attempt
}
/**

@@ -269,10 +357,38 @@ * The main unit in the state machine: a function that gets a character code

* surfaces early.
*
* @param code
* Current code.
* @returns
* Next state.
*/
export type State = (code: Code) => State | void
/**
* A resolver handles and cleans events coming from `tokenize`.
*
* @param events
* List of events.
* @param context
* Tokenize context.
* @returns
* The given, modified, events.
*/
export type Resolver = (events: Event[], context: TokenizeContext) => Event[]
export type Resolver = (
events: Array<Event>,
context: TokenizeContext
) => Array<Event>
/**
* A tokenize function sets up a state machine to handle character codes streaming in.
*
* @param this
* Tokenize context.
* @param effects
* Effects.
* @param ok
* State to go to when successful.
* @param nok
* State to go to when unsuccessful.
* @returns
* First state.
*/

@@ -285,27 +401,59 @@ export type Tokenizer = (

) => State
/**
* Like a tokenizer, but without `ok` or `nok`.
*
* @param this
* Tokenize context.
* @param effects
* Effects.
* @returns
* First state.
*/
export type Initializer = (this: TokenizeContext, effects: Effects) => State
/**
* Like a tokenizer, but without `ok` or `nok`, and returning void.
*
* This is the final hook when a container must be closed.
*
* @param this
* Tokenize context.
* @param effects
* Effects.
* @returns
* Nothing.
*/
export type Exiter = (this: TokenizeContext, effects: Effects) => void
/**
* Guard whether `code` can come before the construct.
*
* In certain cases a construct can hook into many potential start characters.
* Instead of setting up an attempt to parse that construct for most
* characters, this is a speedy way to reduce that.
*
* @param this
* Tokenize context.
* @param code
* Previous code.
* @returns
* Whether `code` is allowed before.
*/
export type Previous = (this: TokenizeContext, code: Code) => boolean
/**
* An object descibing how to parse a markdown construct.
* An object describing how to parse a markdown construct.
*/
export type Construct = {
/**
* Set up a state machine to handle character codes streaming in.
*/
tokenize: Tokenizer
/**
* Guard whether the previous character can come before the construct
* Guard whether the previous character can come before the construct.
*/
previous?: Previous | undefined
/**

@@ -315,2 +463,3 @@ * For containers, a continuation construct.

continuation?: Construct | undefined
/**

@@ -320,12 +469,17 @@ * For containers, a final hook.

exit?: Exiter | undefined
/**
* Name of the construct, used to toggle constructs off.
*
* Named constructs must not be `partial`.
*/
name?: string | undefined
/**
* Whether this construct represents a partial construct.
*
* Partial constructs must not have a `name`.
*/
partial?: boolean | undefined
/**

@@ -340,2 +494,3 @@ * Resolve the events parsed by `tokenize`.

resolve?: Resolver | undefined
/**

@@ -351,2 +506,3 @@ * Resolve the events from the start of the content (which includes other

resolveTo?: Resolver | undefined
/**

@@ -362,2 +518,3 @@ * Resolve all events when the content is complete, from the start to the end.

resolveAll?: Resolver | undefined
/**

@@ -387,15 +544,19 @@ * Concrete constructs cannot be interrupted by more containers.

concrete?: boolean | undefined
/**
* Whether the construct, when in a `ConstructRecord`, precedes over existing
* constructs for the same character code when merged
* constructs for the same character code when merged.
*
* The default is that new constructs precede over existing ones.
*/
add?: 'before' | 'after' | undefined
add?: 'after' | 'before' | undefined
}
/**
* Like a construct, but `tokenize` does not accept `ok` or `nok`.
*/
export type InitialConstruct = Construct & {
export type InitialConstruct = Omit<Construct, 'tokenize'> & {
tokenize: Initializer
}
/**

@@ -406,18 +567,62 @@ * Several constructs, mapped from their initial codes.

string,
undefined | Construct | Construct[]
Array<Construct> | Construct | undefined
>
/**
* State shared between container calls.
*/
export interface ContainerState {
/**
* Special field to close the current flow (or containers).
*/
_closeFlow?: boolean | undefined
/**
* Used by block quotes.
*/
open?: boolean | undefined
/**
* Current marker, used by lists.
*/
marker?: Code | undefined
/**
* Current token type, used by lists.
*/
type?: TokenType | undefined
/**
* Current size, used by lists.
*/
size?: number | undefined
/**
* Whether there first line is blank, used by lists.
*/
initialBlankLine?: boolean | undefined
/**
* Whether there are further blank lines, used by lists.
*/
furtherBlankLines?: boolean | undefined
}
/**
* A context object that helps w/ tokenizing markdown constructs.
*/
export type TokenizeContext = {
export interface TokenizeContext {
/**
* The previous code.
* The previous code.
*/
previous: Code
/**
* Current code.
* Current code.
*/
code: Code
/**
* Whether we’re currently interrupting.
*
* Take for example:

@@ -433,9 +638,13 @@ *

interrupt?: boolean | undefined
/**
* The current construct.
*
* Constructs that are not `partial` are set here.
*/
currentConstruct?: Construct | undefined
/**
* Info set when parsing containers.
* share state set when parsing containers.
*
* Containers are parsed in separate phases: their first line (`tokenize`),

@@ -445,43 +654,75 @@ * continued lines (`continuation.tokenize`), and finally `exit`.

*/
containerState?:
| (Record<string, unknown> & {
_closeFlow?: boolean | undefined
})
| undefined
containerState?: ContainerState | undefined
/**
* Current list of events.
* Current list of events.
*/
events: Event[]
events: Array<Event>
/**
* The relevant parsing context.
* The relevant parsing context.
*/
parser: ParseContext
/**
* Get the chunks that span a token (or location).
* Get the chunks that span a token (or location).
*
* @param token
* Start/end in stream.
* @returns
* List of chunks.
*/
sliceStream: (token: Pick<Token, 'start' | 'end'>) => Chunk[]
sliceStream: (token: Pick<Token, 'end' | 'start'>) => Array<Chunk>
/**
* Get the source text that spans a token (or location).
* Get the source text that spans a token (or location).
*
* @param token
* Start/end in stream.
* @param expandTabs
* Whether to expand tabs.
* @returns
* Serialized chunks.
*/
sliceSerialize: (
token: Pick<Token, 'start' | 'end'>,
token: Pick<Token, 'end' | 'start'>,
expandTabs?: boolean | undefined
) => string
/**
* Get the current place.
* Get the current place.
*
* @returns
* Current point.
*/
now: () => Point
/**
* Define a skip: as containers (block quotes, lists), “nibble” a prefix from
* the margins, where a line starts after that prefix is defined here.
* When the tokenizers moves after consuming a line ending corresponding to
* the line number in the given point, the tokenizer shifts past the prefix
* based on the column in the shifted point.
* Define a skip
*
* As containers (block quotes, lists), “nibble” a prefix from the margins,
* where a line starts after that prefix is defined here.
* When the tokenizers moves after consuming a line ending corresponding to
* the line number in the given point, the tokenizer shifts past the prefix
* based on the column in the shifted point.
*
* @param point
* Skip.
* @returns
* Nothing.
*/
defineSkip: (value: Point) => void
defineSkip: (point: Point) => void
/**
* Write a slice of chunks.
* The eof code (`null`) can be used to signal the end of the stream.
* Write a slice of chunks.
*
* The eof code (`null`) can be used to signal the end of the stream.
*
* @param slice
* Chunks.
* @returns
* Events.
*/
write: (slice: Chunk[]) => Event[]
write: (slice: Array<Chunk>) => Array<Event>
/**

@@ -493,2 +734,5 @@ * Internal boolean shared with `micromark-extension-gfm-task-list-item` to

_gfmTasklistFirstContentOfListItem?: boolean | undefined
// To do: next major: remove `_gfmTableDynamicInterruptHack` (no longer
// needed in micromark-extension-gfm-table@1.0.6).
/**

@@ -515,6 +759,8 @@ * Internal boolean shared with `micromark-extension-gfm-table` whose body

*/
_gfmTableDynamicInterruptHack?: boolean | undefined
_gfmTableDynamicInterruptHack?: boolean
}
/**
* Encodings supported by the buffer class.
*
* This is a copy of the typing from Node, copied to prevent Node globals from

@@ -526,13 +772,15 @@ * being needed.

| 'ascii'
| 'utf8'
| 'utf-8'
| 'utf16le'
| 'ucs2'
| 'ucs-2'
| 'base64'
| 'latin1'
| 'binary'
| 'hex'
| 'latin1'
| 'ucs-2'
| 'ucs2'
| 'utf-8'
| 'utf16le'
| 'utf8'
/**
* Contents of the file.
*
* Can either be text, or a `Buffer` like structure.

@@ -544,82 +792,98 @@ * This does not directly use type `Buffer`, because it can also be used in a

*/
export type Value = string | Uint8Array
export type _ExtensionFields = {
document: ConstructRecord
contentInitial: ConstructRecord
flowInitial: ConstructRecord
flow: ConstructRecord
string: ConstructRecord
text: ConstructRecord
disable: {
null?: string[]
}
insideSpan: {
null?: Pick<Construct, 'resolveAll'>[]
}
attentionMarkers: {
null?: Code[]
}
}
export type _NormalizedExtensionFields = {
document: Record<string, Construct[]>
contentInitial: Record<string, Construct[]>
flowInitial: Record<string, Construct[]>
flow: Record<string, Construct[]>
string: Record<string, Construct[]>
text: Record<string, Construct[]>
disable: {
null: string[]
}
insideSpan: {
null: Pick<Construct, 'resolveAll'>[]
}
attentionMarkers: {
null: Code[]
}
}
export type Value = Uint8Array | string
/**
* A syntax extension changes how markdown is tokenized.
*
* See: <https://github.com/micromark/micromark#syntaxextension>
*/
export type Extension = Record<string, Record<string, unknown>> &
Partial<_ExtensionFields>
export type FullNormalizedExtension = Record<
string,
Record<string, unknown[]>
> &
_NormalizedExtensionFields
export type NormalizedExtension = Record<
string,
Record<string, unknown[] | undefined>
> &
Partial<_NormalizedExtensionFields>
export interface Extension {
document?: ConstructRecord | undefined
contentInitial?: ConstructRecord | undefined
flowInitial?: ConstructRecord | undefined
flow?: ConstructRecord | undefined
string?: ConstructRecord | undefined
text?: ConstructRecord | undefined
disable?: {null?: Array<string> | undefined} | undefined
insideSpan?:
| {null?: Array<Pick<Construct, 'resolveAll'>> | undefined}
| undefined
attentionMarkers?: {null?: Array<Code> | undefined} | undefined
}
/**
* Set up a tokenizer for a content type.
* A filtered, combined, extension.
*/
export type NormalizedExtension = {
[Key in keyof Extension]: Exclude<Extension[Key], undefined>
}
/**
* A full, filtereed, normalized, extension.
*/
export type FullNormalizedExtension = {
[Key in keyof Extension]-?: Exclude<Extension[Key], undefined>
}
/**
* Create a context.
*
* @param from
* Where to create from.
* @returns
* Context.
*/
export type Create = (
from?: Omit<Point, '_index' | '_bufferIndex'> | undefined
from?: Omit<Point, '_bufferIndex' | '_index'> | undefined
) => TokenizeContext
/**
* Parse options.
* Config defining how to parse.
*/
export type ParseOptions = {
export interface ParseOptions {
/**
* Array of syntax extensions
* Array of syntax extensions (default: `[]`).
*/
extensions?: Extension[] | undefined
extensions?: Array<Extension> | null | undefined
}
/**
* A context object that helps w/ parsing markdown.
*/
export type ParseContext = {
export interface ParseContext {
/**
* All constructs.
*/
constructs: FullNormalizedExtension
/**
* Create a content tokenizer.
*/
content: Create
/**
* Create a document tokenizer.
*/
document: Create
/**
* Create a flow tokenizer.
*/
flow: Create
/**
* Create a string tokenizer.
*/
string: Create
/**
* Create a text tokenizer.
*/
text: Create
/**
* List of defined identifiers.
*/
defined: string[]
defined: Array<string>
/**

@@ -639,57 +903,124 @@ * Map of line numbers to whether they are lazy (as opposed to the line before

}
/**
* HTML compiler context
* HTML compiler context.
*/
export type CompileContext = {
/**
* Configuration passed by the user.
* Configuration passed by the user.
*/
options: CompileOptions
/**
* Set data into the key-value store.
* Set data into the key-value store.
*
* @param key
* Key.
* @param value
* Value.
* @returns
* Nothing.
*/
setData: (key: string, value?: unknown) => void
setData: <Key extends keyof CompileData>(
key: Key,
value?: CompileData[Key]
) => void
/**
* Get data from the key-value store.
* Get data from the key-value store.
*
* @param key
* Key.
* @returns
* Value at `key` in compile data.
*/
getData: <K extends string>(key: K) => CompileData[K]
getData: <Key extends keyof CompileData>(key: Key) => CompileData[Key]
/**
* Output an extra line ending if the previous value wasn’t EOF/EOL.
* Output an extra line ending if the previous value wasn’t EOF/EOL.
*
* @returns
* Nothing.
*/
lineEndingIfNeeded: () => void
/**
* Make a value safe for injection in HTML (except w/ `ignoreEncode`).
* Make a value safe for injection in HTML (except w/ `ignoreEncode`).
*
* @param value
* Raw value.
* @returns
* Safe value.
*/
encode: (value: string) => string
/**
* Capture some of the output data.
* Capture some of the output data.
*
* @returns
* Nothing.
*/
buffer: () => void
/**
* Stop capturing and access the output data.
* Stop capturing and access the output data.
*
* @returns
* Captured data.
*/
resume: () => string
/**
* Output raw data.
* Output raw data.
*
* @param value
* Raw value.
* @returns
* Nothing.
*/
raw: (value: string) => void
/**
* Output (parts of) HTML tags.
* Output (parts of) HTML tags.
*
* @param value
* Raw value.
* @returns
* Nothing.
*/
tag: (value: string) => void
/**
* Get the string value of a token
* Get the string value of a token.
*
* @param token
* Start/end in stream.
* @param expandTabs
* Whether to expand tabs.
* @returns
* Serialized chunks.
*/
sliceSerialize: TokenizeContext['sliceSerialize']
}
/**
* Serialize micromark events as HTML
* Serialize micromark events as HTML.
*/
export type Compile = (events: Event[]) => string
export type Compile = (events: Array<Event>) => string
/**
* Handle one token
* Handle one token.
*
* @param token
* Token.
* @returns
* Nothing.
*/
export type Handle = (this: CompileContext, token: Token) => void
/**
* Handle the whole
* Handle the whole document.
*
* @returns
* Nothing.
*/

@@ -699,67 +1030,300 @@ export type DocumentHandle = (

) => void
/**
* Token types mapping to handles
* Token types mapping to handles.
*/
export type Handles = Record<string, Handle> & {
export type Handles = {
/**
* Token handle.
*/
[Key in TokenType]?: Handle
} & {
/**
* Document handle.
*/
null?: DocumentHandle
}
export type NormalizedHtmlExtension = Record<
string,
Record<string, unknown>
> & {
enter: Handles
exit: Handles
/**
* Normalized extenion.
*/
export interface HtmlExtension {
enter?: Handles | undefined
exit?: Handles | undefined
}
/**
* An HTML extension changes how markdown tokens are serialized.
*/
export type HtmlExtension = Partial<NormalizedHtmlExtension>
export type _CompileDataFields = {
lastWasTag: boolean
expectFirstItem: boolean
slurpOneLineEnding: boolean
slurpAllLineEndings: boolean
fencedCodeInside: boolean
fencesCount: number
flowCodeSeenData: boolean
ignoreEncode: boolean
headingRank: number
inCodeText: boolean
characterReferenceType: string
tightStack: boolean[]
export type NormalizedHtmlExtension = {
[Key in keyof HtmlExtension]-?: Exclude<HtmlExtension[Key], undefined>
}
export type CompileData = Record<string, unknown> & Partial<_CompileDataFields>
/**
* Compile options
* Definition.
*/
export type CompileOptions = {
export type Definition = {
/**
* Value to use for line endings not in `doc` (`string`, default: first line
* ending or `'\n'`).
* Destination.
*/
destination?: string | undefined
/**
* Title.
*/
title?: string | undefined
}
/**
* State tracked to compile events as HTML.
*/
export interface CompileData {
/**
* Whether the last emitted value was a tag.
*/
lastWasTag?: boolean | undefined
/**
* Whether the first list item is expected, used by lists.
*/
expectFirstItem?: boolean | undefined
/**
* Whether to slurp the next line ending (resets itself on the next line
* ending).
*/
slurpOneLineEnding?: boolean | undefined
/**
* Whether to slurp all future line endings (has to be unset manually).
*/
slurpAllLineEndings?: boolean | undefined
/**
* Whether we’re in fenced code, used by code (fenced).
*/
fencedCodeInside?: boolean | undefined
/**
* Number of fences that were seen, used by code (fenced).
*/
fencesCount?: number | undefined
/**
* Whether we’ve seen code data, used by code (fenced, indented).
*/
flowCodeSeenData?: boolean | undefined
/**
* Ignore encoding unsafe characters, used for example for URLs which are
* first percent encoded, or by HTML when supporting it.
*/
ignoreEncode?: boolean | undefined
/**
* Current heading rank, used by heading (atx, setext).
*/
headingRank?: number | undefined
/**
* Whether we’re in code data, used by code (text).
*/
inCodeText?: boolean | undefined
/**
* Current character reference kind.
*/
characterReferenceType?: string | undefined
/**
* Stack of containers, whether they’re tight or not.
*/
tightStack: Array<boolean>
/**
* Collected definitions.
*/
definitions: Record<string, Definition>
}
/**
* Type of line ending in markdown.
*/
export type LineEnding = '\r' | '\n' | '\r\n'
/**
* Compile options.
*/
export interface CompileOptions {
/**
* Whether to allow (dangerous) HTML (`boolean`, default: `false`).
*
* Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the
* markdown document over to the compiled HTML.
* In some cases, such as `> a`, CommonMark requires that extra line endings
* are added: `<blockquote>\n<p>a</p>\n</blockquote>`.
* The default is `false`, which still parses the HTML according to
* `CommonMark` but shows the HTML as text instead of as elements.
*
* Pass `true` for trusted content to get actual HTML elements.
*/
defaultLineEnding?: '\r' | '\n' | '\r\n' | undefined
allowDangerousHtml?: boolean | null | undefined
/**
* Whether to allow embedded HTML (`boolean`, default: `false`).
* Whether to allow dangerous protocols in links and images (`boolean`,
* default: `false`).
*
* The default is `false`, which drops URLs in links and images that use
* dangerous protocols.
*
* Pass `true` for trusted content to support all protocols.
*
* URLs that have no protocol (which means it’s relative to the current page,
* such as `./some/page.html`) and URLs that have a safe protocol (for
* images: `http`, `https`; for links: `http`, `https`, `irc`, `ircs`,
* `mailto`, `xmpp`), are safe.
* All other URLs are dangerous and dropped.
*/
allowDangerousHtml?: boolean | undefined
allowDangerousProtocol?: boolean | null | undefined
/**
* Whether to allow potentially dangerous protocols in links and images
* (`boolean`, default: `false`).
* URLs relative to the current protocol are always allowed (such as,
* `image.jpg`).
* For links, the allowed protocols are `http`, `https`, `irc`, `ircs`,
* `mailto`, and `xmpp`.
* For images, the allowed protocols are `http` and `https`.
* Default line ending to use when compiling to HTML, for line endings not in
* `value`.
*
* Generally, `micromark` copies line endings (`\r`, `\n`, `\r\n`) in the
* markdown document over to the compiled HTML.
* In some cases, such as `> a`, CommonMark requires that extra line endings
* are added: `<blockquote>\n<p>a</p>\n</blockquote>`.
*
* To create that line ending, the document is checked for the first line
* ending that is used.
* If there is no line ending, `defaultLineEnding` is used.
* If that isn’t configured, `\n` is used.
*/
allowDangerousProtocol?: boolean | undefined
defaultLineEnding?: LineEnding | null | undefined
/**
* Array of HTML extensions
* Array of HTML extensions (default: `[]`).
*/
htmlExtensions?: Partial<NormalizedHtmlExtension>[] | undefined
htmlExtensions?: Array<HtmlExtension> | null | undefined
}
/**
* Configuration.
*/
export type Options = ParseOptions & CompileOptions
/**
* Enum of allowed token types.
*/
export type TokenType = keyof TokenTypeMap
// Note: when changing the next interface, you likely also have to change
// `micromark-util-symbol/types.js`.
/**
* Map of allowed token types.
*/
export interface TokenTypeMap {
// Note: these are compiled away.
attentionSequence: 'attentionSequence' // To do: remove.
space: 'space' // To do: remove.
data: 'data'
whitespace: 'whitespace'
lineEnding: 'lineEnding'
lineEndingBlank: 'lineEndingBlank'
linePrefix: 'linePrefix'
lineSuffix: 'lineSuffix'
atxHeading: 'atxHeading'
atxHeadingSequence: 'atxHeadingSequence'
atxHeadingText: 'atxHeadingText'
autolink: 'autolink'
autolinkEmail: 'autolinkEmail'
autolinkMarker: 'autolinkMarker'
autolinkProtocol: 'autolinkProtocol'
characterEscape: 'characterEscape'
characterEscapeValue: 'characterEscapeValue'
characterReference: 'characterReference'
characterReferenceMarker: 'characterReferenceMarker'
characterReferenceMarkerNumeric: 'characterReferenceMarkerNumeric'
characterReferenceMarkerHexadecimal: 'characterReferenceMarkerHexadecimal'
characterReferenceValue: 'characterReferenceValue'
codeFenced: 'codeFenced'
codeFencedFence: 'codeFencedFence'
codeFencedFenceSequence: 'codeFencedFenceSequence'
codeFencedFenceInfo: 'codeFencedFenceInfo'
codeFencedFenceMeta: 'codeFencedFenceMeta'
codeFlowValue: 'codeFlowValue'
codeIndented: 'codeIndented'
codeText: 'codeText'
codeTextData: 'codeTextData'
codeTextPadding: 'codeTextPadding'
codeTextSequence: 'codeTextSequence'
content: 'content'
definition: 'definition'
definitionDestination: 'definitionDestination'
definitionDestinationLiteral: 'definitionDestinationLiteral'
definitionDestinationLiteralMarker: 'definitionDestinationLiteralMarker'
definitionDestinationRaw: 'definitionDestinationRaw'
definitionDestinationString: 'definitionDestinationString'
definitionLabel: 'definitionLabel'
definitionLabelMarker: 'definitionLabelMarker'
definitionLabelString: 'definitionLabelString'
definitionMarker: 'definitionMarker'
definitionTitle: 'definitionTitle'
definitionTitleMarker: 'definitionTitleMarker'
definitionTitleString: 'definitionTitleString'
emphasis: 'emphasis'
emphasisSequence: 'emphasisSequence'
emphasisText: 'emphasisText'
escapeMarker: 'escapeMarker'
hardBreakEscape: 'hardBreakEscape'
hardBreakTrailing: 'hardBreakTrailing'
htmlFlow: 'htmlFlow'
htmlFlowData: 'htmlFlowData'
htmlText: 'htmlText'
htmlTextData: 'htmlTextData'
image: 'image'
label: 'label'
labelText: 'labelText'
labelLink: 'labelLink'
labelImage: 'labelImage'
labelMarker: 'labelMarker'
labelImageMarker: 'labelImageMarker'
labelEnd: 'labelEnd'
link: 'link'
paragraph: 'paragraph'
reference: 'reference'
referenceMarker: 'referenceMarker'
referenceString: 'referenceString'
resource: 'resource'
resourceDestination: 'resourceDestination'
resourceDestinationLiteral: 'resourceDestinationLiteral'
resourceDestinationLiteralMarker: 'resourceDestinationLiteralMarker'
resourceDestinationRaw: 'resourceDestinationRaw'
resourceDestinationString: 'resourceDestinationString'
resourceMarker: 'resourceMarker'
resourceTitle: 'resourceTitle'
resourceTitleMarker: 'resourceTitleMarker'
resourceTitleString: 'resourceTitleString'
setextHeading: 'setextHeading'
setextHeadingText: 'setextHeadingText'
setextHeadingLine: 'setextHeadingLine'
setextHeadingLineSequence: 'setextHeadingLineSequence'
strong: 'strong'
strongSequence: 'strongSequence'
strongText: 'strongText'
thematicBreak: 'thematicBreak'
thematicBreakSequence: 'thematicBreakSequence'
blockQuote: 'blockQuote'
blockQuotePrefix: 'blockQuotePrefix'
blockQuoteMarker: 'blockQuoteMarker'
blockQuotePrefixWhitespace: 'blockQuotePrefixWhitespace'
listOrdered: 'listOrdered'
listUnordered: 'listUnordered'
listItemIndent: 'listItemIndent'
listItemMarker: 'listItemMarker'
listItemPrefix: 'listItemPrefix'
listItemPrefixWhitespace: 'listItemPrefixWhitespace'
listItemValue: 'listItemValue'
chunkDocument: 'chunkDocument'
chunkContent: 'chunkContent'
chunkFlow: 'chunkFlow'
chunkText: 'chunkText'
chunkString: 'chunkString'
}

@@ -1,543 +0,2 @@

/**
* @typedef {number|null} Code
* A character code.
*
* This is often the same as what `String#charCodeAt()` yields but micromark
* adds meaning to certain other values.
*
* `null` represents the end of the input stream (called eof).
* Negative integers are used instead of certain sequences of characters (such
* as line endings and tabs).
*
* @typedef {Code|string} Chunk
* A chunk is either a character code or a slice of a buffer in the form of a
* string.
*
* Chunks are used because strings are more efficient storage that character
* codes, but limited in what they can represent.
*
* @typedef {'document'|'flow'|'content'|'text'|'string'} ContentType
* Enumeration of the content types.
*
* Technically `document` is also a content type, which includes containers
* (lists, block quotes) and flow.
* As `ContentType` is used on tokens to define the type of subcontent but
* `document` is the highest level of content, so it’s not listed here.
*
* Containers in markdown come from the margin and include more constructs
* on the lines that define them.
* Take for example a block quote with a paragraph inside it (such as
* `> asd`).
*
* `flow` represents the sections, such as headings, code, and content, which
* is also parsed per line
* An example is HTML, which has a certain starting condition (such as
* `<script>` on its own line), then continues for a while, until an end
* condition is found (such as `</style>`).
* If that line with an end condition is never found, that flow goes until
* the end.
*
* `content` is zero or more definitions, and then zero or one paragraph.
* It’s a weird one, and needed to make certain edge cases around definitions
* spec compliant.
* Definitions are unlike other things in markdown, in that they behave like
* `text` in that they can contain arbitrary line endings, but *have* to end
* at a line ending.
* If they end in something else, the whole definition instead is seen as a
* paragraph.
*
* The content in markdown first needs to be parsed up to this level to
* figure out which things are defined, for the whole document, before
* continuing on with `text`, as whether a link or image reference forms or
* not depends on whether it’s defined.
* This unfortunately prevents a true streaming markdown to HTML compiler.
*
* `text` contains phrasing content such as attention (emphasis, strong),
* media (links, images), and actual text.
*
* `string` is a limited `text` like content type which only allows character
* references and character escapes.
* It exists in things such as identifiers (media references, definitions),
* titles, or URLs.
*
* @typedef Point
* A location in the document (`line`/`column`/`offset`) and chunk (`_index`,
* `_bufferIndex`).
*
* `_bufferIndex` is `-1` when `_index` points to a code chunk and it’s a
* non-negative integer when pointing to a string chunk.
*
* The interface for the location in the document comes from unist `Point`:
* <https://github.com/syntax-tree/unist#point>
* @property {number} line
* 1-indexed line number
* @property {number} column
* 1-indexed column number
* @property {number} offset
* 0-indexed position in the document
* @property {number} _index
* Position in a list of chunks
* @property {number} _bufferIndex
* Position in a string chunk (or `-1` when pointing to a numeric chunk)
*
* @typedef Token
* A token: a span of chunks.
* Tokens are what the core of micromark produces: the built in HTML compiler
* or other tools can turn them into different things.
*
* Tokens are essentially names attached to a slice of chunks, such as
* `lineEndingBlank` for certain line endings, or `codeFenced` for a whole
* fenced code.
*
* Sometimes, more info is attached to tokens, such as `_open` and `_close`
* by `attention` (strong, emphasis) to signal whether the sequence can open
* or close an attention run.
*
* Linked tokens are used because outer constructs are parsed first.
* Take for example:
*
* ```markdown
* > *a
* b*.
* ```
*
* 1. The block quote marker and the space after it is parsed first
* 2. The rest of the line is a `chunkFlow` token
* 3. The two spaces on the second line are a `linePrefix`
* 4. The rest of the line is another `chunkFlow` token
*
* The two `chunkFlow` tokens are linked together.
* The chunks they span are then passed through the flow tokenizer.
*
* @property {string} type
* @property {Point} start
* @property {Point} end
* @property {Token} [previous]
* The previous token in a list of linked tokens.
* @property {Token} [next]
* The next token in a list of linked tokens
* @property {ContentType} [contentType]
* Declares a token as having content of a certain type.
* @property {TokenizeContext} [_tokenizer]
* Used when dealing with linked tokens.
* A child tokenizer is needed to tokenize them, which is stored on those
* tokens.
* @property {boolean} [_open]
* A marker used to parse attention, depending on the characters before
* sequences (`**`), the sequence can open, close, both, or none
* @property {boolean} [_close]
* A marker used to parse attention, depending on the characters after
* sequences (`**`), the sequence can open, close, both, or none
* @property {boolean} [_isInFirstContentOfListItem]
* A boolean used internally to figure out if a token is in the first content
* of a list item construct.
* @property {boolean} [_container]
* A boolean used internally to figure out if a token is a container token.
* @property {boolean} [_loose]
* A boolean used internally to figure out if a list is loose or not.
* @property {boolean} [_inactive]
* A boolean used internally to figure out if a link opening can’t be used
* (because links in links are incorrect).
* @property {boolean} [_balanced]
* A boolean used internally to figure out if a link opening is balanced: it’s
* not a link opening but has a balanced closing.
*
* @typedef {['enter'|'exit', Token, TokenizeContext]} Event
* An event is the start or end of a token amongst other events.
* Tokens can “contain” other tokens, even though they are stored in a flat
* list, through `enter`ing before them, and `exit`ing after them.
*
* @callback Enter
* Open a token.
* @param {string} type
* Token to enter.
* @param {Record<string, unknown>} [fields]
* Fields to patch on the token
* @returns {Token}
*
* @callback Exit
* Close a token.
* @param {string} type
* Token to close.
* Should match the current open token.
* @returns {Token}
*
* @callback Consume
* Deal with the character and move to the next.
* @param {Code} code
* Code that was given to the state function
* @returns {void}
*
* @callback Attempt
* Attempt deals with several values, and tries to parse according to those
* values.
* If a value resulted in `ok`, it worked, the tokens that were made are used,
* and `returnState` is switched to.
* If the result is `nok`, the attempt failed, so we revert to the original
* state, and `bogusState` is used.
* @param {Construct|Construct[]|ConstructRecord} construct
* @param {State} returnState
* @param {State} [bogusState]
* @returns {(code: Code) => void}
*
* @typedef Effects
* A context object to transition the state machine.
* @property {Enter} enter
* Start a new token.
* @property {Exit} exit
* End a started token.
* @property {Consume} consume
* Deal with the character and move to the next.
* @property {Attempt} attempt
* Try to tokenize a construct.
* @property {Attempt} interrupt
* Interrupt is used for stuff right after a line of content.
* @property {Attempt} check
* Attempt, then revert.
*
* @callback State
* The main unit in the state machine: a function that gets a character code
* and has certain effects.
*
* A state function should return another function: the next
* state-as-a-function to go to.
*
* But there is one case where they return void: for the eof character code
* (at the end of a value).
* The reason being: well, there isn’t any state that makes sense, so void
* works well.
* Practically that has also helped: if for some reason it was a mistake, then
* an exception is throw because there is no next function, meaning it
* surfaces early.
* @param {Code} code
* @returns {State|void}
*
* @callback Resolver
* A resolver handles and cleans events coming from `tokenize`.
* @param {Event[]} events
* List of events.
* @param {TokenizeContext} context
* Context.
* @returns {Event[]}
*
* @typedef {(this: TokenizeContext, effects: Effects, ok: State, nok: State) => State} Tokenizer
* A tokenize function sets up a state machine to handle character codes streaming in.
*
* @typedef {(this: TokenizeContext, effects: Effects) => State} Initializer
* Like a tokenizer, but without `ok` or `nok`.
*
* @typedef {(this: TokenizeContext, effects: Effects) => void} Exiter
* Like a tokenizer, but without `ok` or `nok`, and returning void.
* This is the final hook when a container must be closed.
*
* @typedef {(this: TokenizeContext, code: Code) => boolean} Previous
* Guard whether `code` can come before the construct.
* In certain cases a construct can hook into many potential start characters.
* Instead of setting up an attempt to parse that construct for most
* characters, this is a speedy way to reduce that.
*
* @typedef Construct
* An object descibing how to parse a markdown construct.
* @property {Tokenizer} tokenize
* @property {Previous} [previous]
* Guard whether the previous character can come before the construct
* @property {Construct} [continuation]
* For containers, a continuation construct.
* @property {Exiter} [exit]
* For containers, a final hook.
* @property {string} [name]
* Name of the construct, used to toggle constructs off.
* Named constructs must not be `partial`.
* @property {boolean} [partial=false]
* Whether this construct represents a partial construct.
* Partial constructs must not have a `name`.
* @property {Resolver} [resolve]
* Resolve the events parsed by `tokenize`.
*
* For example, if we’re currently parsing a link title and this construct
* parses character references, then `resolve` is called with the events
* ranging from the start to the end of a character reference each time one is
* found.
* @property {Resolver} [resolveTo]
* Resolve the events from the start of the content (which includes other
* constructs) to the last one parsed by `tokenize`.
*
* For example, if we’re currently parsing a link title and this construct
* parses character references, then `resolveTo` is called with the events
* ranging from the start of the link title to the end of a character
* reference each time one is found.
* @property {Resolver} [resolveAll]
* Resolve all events when the content is complete, from the start to the end.
* Only used if `tokenize` is successful once in the content.
*
* For example, if we’re currently parsing a link title and this construct
* parses character references, then `resolveAll` is called *if* at least one
* character reference is found, ranging from the start to the end of the link
* title to the end.
* @property {boolean} [concrete]
* Concrete constructs cannot be interrupted by more containers.
*
* For example, when parsing the document (containers, such as block quotes
* and lists) and this construct is parsing fenced code:
*
* ````markdown
* > ```js
* > - list?
* ````
*
* …then `- list?` cannot form if this fenced code construct is concrete.
*
* An example of a construct that is not concrete is a GFM table:
*
* ````markdown
* | a |
* | - |
* > | b |
* ````
*
* …`b` is not part of the table.
* @property {'before'|'after'} [add='before']
* Whether the construct, when in a `ConstructRecord`, precedes over existing
* constructs for the same character code when merged
* The default is that new constructs precede over existing ones.
*
* @typedef {Construct & {tokenize: Initializer}} InitialConstruct
* Like a construct, but `tokenize` does not accept `ok` or `nok`.
*
* @typedef {Record<string, undefined|Construct|Construct[]>} ConstructRecord
* Several constructs, mapped from their initial codes.
*
* @typedef TokenizeContext
* A context object that helps w/ tokenizing markdown constructs.
* @property {Code} previous
* The previous code.
* @property {Code} code
* Current code.
* @property {boolean} [interrupt]
* Whether we’re currently interrupting.
* Take for example:
*
* ```markdown
* a
* # b
* ```
*
* At 2:1, we’re “interrupting”.
* @property {Construct} [currentConstruct]
* The current construct.
* Constructs that are not `partial` are set here.
* @property {Record<string, unknown> & {_closeFlow?: boolean}} [containerState]
* Info set when parsing containers.
* Containers are parsed in separate phases: their first line (`tokenize`),
* continued lines (`continuation.tokenize`), and finally `exit`.
* This record can be used to store some information between these hooks.
* @property {Event[]} events
* Current list of events.
* @property {ParseContext} parser
* The relevant parsing context.
* @property {(token: Pick<Token, 'start'|'end'>) => Chunk[]} sliceStream
* Get the chunks that span a token (or location).
* @property {(token: Pick<Token, 'start'|'end'>, expandTabs?: boolean) => string} sliceSerialize
* Get the source text that spans a token (or location).
* @property {() => Point} now
* Get the current place.
* @property {(value: Point) => void} defineSkip
* Define a skip: as containers (block quotes, lists), “nibble” a prefix from
* the margins, where a line starts after that prefix is defined here.
* When the tokenizers moves after consuming a line ending corresponding to
* the line number in the given point, the tokenizer shifts past the prefix
* based on the column in the shifted point.
* @property {(slice: Chunk[]) => Event[]} write
* Write a slice of chunks.
* The eof code (`null`) can be used to signal the end of the stream.
* @property {boolean} [_gfmTasklistFirstContentOfListItem]
* Internal boolean shared with `micromark-extension-gfm-task-list-item` to
* signal whether the tokenizer is tokenizing the first content of a list item
* construct.
* @property {boolean} [_gfmTableDynamicInterruptHack=false]
* Internal boolean shared with `micromark-extension-gfm-table` whose body
* rows are not affected by normal interruption rules.
* “Normal” rules are, for example, that an empty list item can’t interrupt:
*
* ````markdown
* a
* *
* ````
*
* The above is one paragraph.
* These rules don’t apply to table body rows:
*
* ````markdown
* | a |
* | - |
* *
* ````
*
* The above list interrupts the table.
*/
/**
* @typedef {'ascii'|'utf8'|'utf-8'|'utf16le'|'ucs2'|'ucs-2'|'base64'|'latin1'|'binary'|'hex'} Encoding
* Encodings supported by the buffer class.
* This is a copy of the typing from Node, copied to prevent Node globals from
* being needed.
* Copied from: <https://github.com/DefinitelyTyped/DefinitelyTyped/blob/a2bc1d8/types/node/globals.d.ts#L174>
*
* @typedef {string|Uint8Array} Value
* Contents of the file.
* Can either be text, or a `Buffer` like structure.
* This does not directly use type `Buffer`, because it can also be used in a
* browser context.
* Instead this leverages `Uint8Array` which is the base type for `Buffer`,
* and a native JavaScript construct.
*/
/**
* @typedef _ExtensionFields
* @property {ConstructRecord} document
* @property {ConstructRecord} contentInitial
* @property {ConstructRecord} flowInitial
* @property {ConstructRecord} flow
* @property {ConstructRecord} string
* @property {ConstructRecord} text
* @property {{null?: string[]}} disable
* @property {{null?: Pick<Construct, 'resolveAll'>[]}} insideSpan
* @property {{null?: Code[]}} attentionMarkers
*
* @typedef _NormalizedExtensionFields
* @property {Record<string, Construct[]>} document
* @property {Record<string, Construct[]>} contentInitial
* @property {Record<string, Construct[]>} flowInitial
* @property {Record<string, Construct[]>} flow
* @property {Record<string, Construct[]>} string
* @property {Record<string, Construct[]>} text
* @property {{null: string[]}} disable
* @property {{null: Pick<Construct, 'resolveAll'>[]}} insideSpan
* @property {{null: Code[]}} attentionMarkers
*
* @typedef {Record<string, Record<string, unknown>> & Partial<_ExtensionFields>} Extension
* A syntax extension changes how markdown is tokenized.
* See: <https://github.com/micromark/micromark#syntaxextension>
*
* @typedef {Record<string, Record<string, unknown[]>> & _NormalizedExtensionFields} FullNormalizedExtension
* @typedef {Record<string, Record<string, unknown[]|undefined>> & Partial<_NormalizedExtensionFields>} NormalizedExtension
*
* @callback Create
* Set up a tokenizer for a content type.
* @param {Omit<Point, '_index'|'_bufferIndex'>} [from]
* @returns {TokenizeContext}
*
* @typedef ParseOptions
* Parse options.
* @property {Extension[]} [extensions] Array of syntax extensions
*
* @typedef ParseContext
* A context object that helps w/ parsing markdown.
* @property {FullNormalizedExtension} constructs
* @property {Create} content
* @property {Create} document
* @property {Create} flow
* @property {Create} string
* @property {Create} text
* @property {string[]} defined List of defined identifiers.
* @property {Record<number, boolean>} lazy
* Map of line numbers to whether they are lazy (as opposed to the line before
* them).
* Take for example:
*
* ```markdown
* > a
* b
* ```
*
* L1 here is not lazy, L2 is.
*/
/**
* @typedef CompileContext
* HTML compiler context
* @property {CompileOptions} options
* Configuration passed by the user.
* @property {(key: string, value?: unknown) => void} setData
* Set data into the key-value store.
* @property {<K extends string>(key: K) => CompileData[K]} getData
* Get data from the key-value store.
* @property {() => void} lineEndingIfNeeded
* Output an extra line ending if the previous value wasn’t EOF/EOL.
* @property {(value: string) => string} encode
* Make a value safe for injection in HTML (except w/ `ignoreEncode`).
* @property {() => void} buffer
* Capture some of the output data.
* @property {() => string} resume
* Stop capturing and access the output data.
* @property {(value: string) => void} raw
* Output raw data.
* @property {(value: string) => void} tag
* Output (parts of) HTML tags.
* @property {TokenizeContext['sliceSerialize']} sliceSerialize
* Get the string value of a token
*
* @callback Compile
* Serialize micromark events as HTML
* @param {Event[]} events
* @returns {string}
*
* @typedef {(this: CompileContext, token: Token) => void} Handle
* Handle one token
*
* @typedef {(this: Omit<CompileContext, 'sliceSerialize'>) => void} DocumentHandle
* Handle the whole
*
* @typedef {Record<string, Handle> & {null?: DocumentHandle}} Handles
* Token types mapping to handles
*
* @typedef {Record<string, Record<string, unknown>> & {enter: Handles, exit: Handles}} NormalizedHtmlExtension
*
* @typedef {Partial<NormalizedHtmlExtension>} HtmlExtension
* An HTML extension changes how markdown tokens are serialized.
*
* @typedef _CompileDataFields
* @property {boolean} lastWasTag
* @property {boolean} expectFirstItem
* @property {boolean} slurpOneLineEnding
* @property {boolean} slurpAllLineEndings
* @property {boolean} fencedCodeInside
* @property {number} fencesCount
* @property {boolean} flowCodeSeenData
* @property {boolean} ignoreEncode
* @property {number} headingRank
* @property {boolean} inCodeText
* @property {string} characterReferenceType
* @property {boolean[]} tightStack
*
* @typedef {Record<string, unknown> & Partial<_CompileDataFields>} CompileData
*
* @typedef CompileOptions
* Compile options
* @property {'\r'|'\n'|'\r\n'} [defaultLineEnding]
* Value to use for line endings not in `doc` (`string`, default: first line
* ending or `'\n'`).
*
* Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the
* markdown document over to the compiled HTML.
* In some cases, such as `> a`, CommonMark requires that extra line endings
* are added: `<blockquote>\n<p>a</p>\n</blockquote>`.
* @property {boolean} [allowDangerousHtml=false]
* Whether to allow embedded HTML (`boolean`, default: `false`).
* @property {boolean} [allowDangerousProtocol=false]
* Whether to allow potentially dangerous protocols in links and images
* (`boolean`, default: `false`).
* URLs relative to the current protocol are always allowed (such as,
* `image.jpg`).
* For links, the allowed protocols are `http`, `https`, `irc`, `ircs`,
* `mailto`, and `xmpp`.
* For images, the allowed protocols are `http` and `https`.
* @property {HtmlExtension[]} [htmlExtensions=[]]
* Array of HTML extensions
*/
/**
* @typedef {ParseOptions & CompileOptions} Options
*/
// Note: types exported from `index.d.ts`.
export {}
{
"name": "micromark-util-types",
"version": "1.0.2",
"version": "1.1.0",
"description": "micromark utility with a couple of typescript types",

@@ -38,6 +38,4 @@ "license": "MIT",

"exports": "./index.js",
"scripts": {
"build": "rimraf \"*.d.ts\" && tsc"
},
"scripts": {},
"xo": false
}

@@ -11,9 +11,13 @@ # micromark-util-types

micromark utility with a couple of typescript types.
[micromark][] utility package with TypeScript types.
## Contents
* [What is this?](#what-is-this)
* [When should I use this?](#when-should-i-use-this)
* [Install](#install)
* [Use](#use)
* [API](#api)
* [Types](#types)
* [Compatibility](#compatibility)
* [Security](#security)

@@ -23,5 +27,15 @@ * [Contribute](#contribute)

## What is this?
This package exposes TypeScript types shared throughout the micromark
ecosystem.
## When should I use this?
This package is useful when you are making your own, typed, micromark
extensions.
## Install
[npm][]:
In Node.js (version 16+), install with [npm][]:

@@ -32,2 +46,8 @@ ```sh

In Deno with [`esm.sh`][esmsh]:
```js
import type {Point, /* … */} from 'https://esm.sh/micromark-util-types@1'
```
## Use

@@ -46,8 +66,21 @@

See
[the code](https://github.com/micromark/micromark/blob/main/packages/micromark-util-types/index.js)
for all about the exposed types.
See [the code][code] for all about the exposed types.
## Types
This package is just [TypeScript][] types.
It exports lots of different types.
## Compatibility
Projects maintained by the unified collective are compatible with all maintained
versions of Node.js.
As of now, that is Node.js 16+.
Our projects sometimes work with older versions, but this is not guaranteed.
This package work with `micromark` version 3+.
## Security
This package is safe.
See [`security.md`][securitymd] in [`micromark/.github`][health] for how to

@@ -84,5 +117,5 @@ submit a security report.

[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-encode.svg
[bundle-size-badge]: https://img.shields.io/badge/dynamic/json?label=minzipped%20size&query=$.size.compressedSize&url=https://deno.bundlejs.com/?q=micromark-util-encode
[bundle-size]: https://bundlephobia.com/result?p=micromark-util-encode
[bundle-size]: https://bundlejs.com/?q=micromark-util-encode

@@ -97,2 +130,4 @@ [sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg

[esmsh]: https://esm.sh
[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg

@@ -108,8 +143,14 @@

[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md
[securitymd]: https://github.com/micromark/.github/blob/main/security.md
[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md
[contributing]: https://github.com/micromark/.github/blob/main/contributing.md
[support]: https://github.com/micromark/.github/blob/HEAD/support.md
[support]: https://github.com/micromark/.github/blob/main/support.md
[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md
[coc]: https://github.com/micromark/.github/blob/main/code-of-conduct.md
[typescript]: https://www.typescriptlang.org
[micromark]: https://github.com/micromark/micromark
[code]: https://github.com/micromark/micromark/blob/main/packages/micromark-util-types/index.d.ts
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc