Socket
Socket
Sign inDemoInstall

remark-parse

Package Overview
Dependencies
Maintainers
5
Versions
31
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

remark-parse - npm Package Compare versions

Comparing version 6.0.3 to 7.0.0

lib/util/is-markdown-whitespace-character.js

18

lib/tokenize/code-fenced.js
'use strict'
var trim = require('trim-trailing-lines')
module.exports = fencedCode

@@ -103,4 +101,3 @@

character === lineFeed ||
character === tilde ||
character === graveAccent
(marker === graveAccent && character === marker)
) {

@@ -146,2 +143,3 @@ break

exdentedContent = ''
var skip = true

@@ -163,9 +161,9 @@ // Eat content.

// Add the newline to `subvalue` if its the first character. Otherwise,
// add it to the `closing` queue.
if (content) {
// The first line feed is ignored. Others aren’t.
if (skip) {
subvalue += character
skip = false
} else {
closing += character
exdentedClosing += character
} else {
subvalue += character
}

@@ -256,4 +254,4 @@

meta: meta || null,
value: trim(exdentedContent)
value: exdentedContent
})
}
'use strict'
var whitespace = require('is-whitespace-character')
var locate = require('../locate/code-inline')

@@ -9,3 +8,5 @@

var graveAccent = '`'
var lineFeed = 10 // '\n'
var space = 32 // ' '
var graveAccent = 96 // '`'

@@ -15,53 +16,45 @@ function inlineCode(eat, value, silent) {

var index = 0
var queue = ''
var tickQueue = ''
var contentQueue
var subqueue
var count
var openingCount
var subvalue
var character
var openingFenceEnd
var closingFenceStart
var closingFenceEnd
var code
var next
var found
var next
while (index < length) {
if (value.charAt(index) !== graveAccent) {
if (value.charCodeAt(index) !== graveAccent) {
break
}
queue += graveAccent
index++
}
if (!queue) {
if (index === 0 || index === length) {
return
}
subvalue = queue
openingCount = index
queue = ''
next = value.charAt(index)
count = 0
openingFenceEnd = index
next = value.charCodeAt(index)
while (index < length) {
character = next
next = value.charAt(index + 1)
code = next
next = value.charCodeAt(index + 1)
if (character === graveAccent) {
count++
tickQueue += character
} else {
count = 0
queue += character
}
if (code === graveAccent) {
if (closingFenceStart === undefined) {
closingFenceStart = index
}
if (count && next !== graveAccent) {
if (count === openingCount) {
subvalue += queue + tickQueue
closingFenceEnd = index + 1
if (
next !== graveAccent &&
closingFenceEnd - closingFenceStart === openingFenceEnd
) {
found = true
break
}
queue += tickQueue
tickQueue = ''
} else if (closingFenceStart !== undefined) {
closingFenceStart = undefined
closingFenceEnd = undefined
}

@@ -73,7 +66,3 @@

if (!found) {
if (openingCount % 2 !== 0) {
return
}
queue = ''
return
}

@@ -86,27 +75,39 @@

contentQueue = ''
subqueue = ''
length = queue.length
index = -1
// Remove the initial and final space (or line feed), iff they exist and there
// are non-space characters in the content.
index = openingFenceEnd
length = closingFenceStart
code = value.charCodeAt(index)
next = value.charCodeAt(length - 1)
found = false
while (++index < length) {
character = queue.charAt(index)
if (
length - index > 2 &&
(code === space || code === lineFeed) &&
(next === space || next === lineFeed)
) {
index++
length--
if (whitespace(character)) {
subqueue += character
continue
}
while (index < length) {
code = value.charCodeAt(index)
if (subqueue) {
if (contentQueue) {
contentQueue += subqueue
if (code !== space && code !== lineFeed) {
found = true
break
}
subqueue = ''
index++
}
contentQueue += character
if (found === true) {
openingFenceEnd++
closingFenceStart--
}
}
return eat(subvalue)({type: 'inlineCode', value: contentQueue})
return eat(value.slice(0, closingFenceEnd))({
type: 'inlineCode',
value: value.slice(openingFenceEnd, closingFenceStart)
})
}

@@ -7,4 +7,2 @@ 'use strict'

module.exports = definition
definition.notInList = true
definition.notInBlock = true

@@ -11,0 +9,0 @@ var quotationMark = '"'

'use strict'
/* eslint-disable max-params */
var trim = require('trim')

@@ -437,2 +435,3 @@ var repeat = require('repeat-string')

/* eslint-disable-next-line max-params */
function replacer($0, $1, $2, $3, $4) {

@@ -439,0 +438,0 @@ bullet = $1 + $2 + $3

@@ -26,2 +26,3 @@ 'use strict'

var commonmark = self.options.commonmark
var footnotes = self.options.footnotes
var character = value.charAt(0)

@@ -59,3 +60,3 @@ var index = 0

// Check whether we’re eating a footnote.
if (self.options.footnotes && value.charAt(index) === caret) {
if (footnotes && value.charAt(index) === caret) {
// Exit if `![^` is found, so the `!` will be seen as text after this,

@@ -128,4 +129,9 @@ // and we’ll enter this function again when `[^` is found.

// Inline footnotes cannot have an identifier.
if (type !== footnote && character === leftSquareBracket) {
// Inline footnotes cannot have a label.
// If footnotes are enabled, link labels cannot start with a caret.
if (
type !== footnote &&
character === leftSquareBracket &&
(!footnotes || value.charAt(index + 1) !== caret)
) {
identifier = ''

@@ -132,0 +138,0 @@ queue += character

@@ -13,3 +13,2 @@ 'use strict'

var backslash = '\\'
var graveAccent = '`'
var verticalBar = '|'

@@ -39,4 +38,2 @@

var preamble
var count
var opening
var now

@@ -167,4 +164,2 @@ var position

preamble = true
count = null
opening = null

@@ -189,8 +184,2 @@ while (index < length) {

} else {
if (character && opening) {
queue += character
index++
continue
}
if ((cell || character) && !preamble) {

@@ -234,18 +223,2 @@ subvalue = cell

}
if (character === graveAccent) {
count = 1
while (line.charAt(index + 1) === character) {
cell += character
index++
count++
}
if (!opening) {
opening = count
} else if (count >= opening) {
opening = 0
}
}
}

@@ -252,0 +225,0 @@

@@ -305,5 +305,5 @@ 'use strict'

// Merge two blockquotes: `node` into `prev`, unless in CommonMark mode.
// Merge two blockquotes: `node` into `prev`, unless in CommonMark or gfm modes.
function mergeBlockquote(prev, node) {
if (this.options.commonmark) {
if (this.options.commonmark || this.options.gfm) {
return node

@@ -310,0 +310,0 @@ }

{
"name": "remark-parse",
"version": "6.0.3",
"description": "Markdown parser for remark",
"version": "7.0.0",
"description": "remark plugin to parse Markdown",
"license": "MIT",
"keywords": [
"unified",
"remark",
"plugin",
"markdown",
"mdast",
"abstract",

@@ -14,2 +18,3 @@ "syntax",

],
"types": "types/index.d.ts",
"homepage": "https://remark.js.org",

@@ -28,3 +33,4 @@ "repository": "https://github.com/remarkjs/remark/tree/master/packages/remark-parse",

"index.js",
"lib"
"lib",
"types/index.d.ts"
],

@@ -48,7 +54,2 @@ "dependencies": {

},
"devDependencies": {
"tape": "^4.9.1",
"unified": "^7.0.0",
"vfile": "^3.0.0"
},
"scripts": {

@@ -55,0 +56,0 @@ "test": "tape test.js"

@@ -1,14 +0,16 @@

# remark-parse [![Travis][build-badge]][build-status] [![Coverage][coverage-badge]][coverage-status] [![Downloads][dl-badge]][dl] [![Size][size-badge]][size] [![Chat][chat-badge]][chat]
# remark-parse
[![Build][build-badge]][build]
[![Coverage][coverage-badge]][coverage]
[![Downloads][downloads-badge]][downloads]
[![Size][size-badge]][size]
[![Sponsors][sponsors-badge]][collective]
[![Backers][backers-badge]][collective]
[![Chat][chat-badge]][chat]
[Parser][] for [**unified**][unified].
Parses markdown to [**mdast**][mdast] syntax trees.
Used in the [**remark** processor][processor] but can be used on its own as
well.
Parses Markdown to [**mdast**][mdast] syntax trees.
Used in the [**remark** processor][remark] but can be used on its own as well.
Can be [extended][extend] to change how markdown is parsed.
* * *
**Announcing the unified collective! 🎉
[Read more about it on Medium »][announcement]**
## Sponsors

@@ -46,4 +48,6 @@

## Installation
[**Read more about the unified collective on Medium »**][announcement]
## Install
[npm][]:

@@ -55,3 +59,3 @@

## Usage
## Use

@@ -62,6 +66,8 @@ ```js

var markdown = require('remark-parse')
var html = require('remark-html')
var remark2rehype = require('remark-rehype')
var html = require('rehype-stringify')
var processor = unified()
.use(markdown, {commonmark: true})
.use(remark2rehype)
.use(html)

@@ -72,6 +78,8 @@

[See **unified** for more examples »][unified]
## Table of Contents
* [API](#api)
* [processor.use(parse\[, options\])](#processoruseparse-options)
* [processor().use(parse\[, options\])](#processoruseparse-options)
* [parse.Parser](#parseparser)

@@ -90,2 +98,3 @@ * [Extending the Parser](#extending-the-parser)

* [Turning off a tokenizer](#turning-off-a-tokenizer)
* [Contribute](#contribute)
* [License](#license)

@@ -95,5 +104,7 @@

### `processor.use(parse[, options])`
[See **unified** for API docs »][unified]
Configure the `processor` to read markdown as input and process
### `processor().use(parse[, options])`
Configure the `processor` to read Markdown as input and process
[**mdast**][mdast] syntax trees.

@@ -103,11 +114,14 @@

Options are passed directly, or passed later through [`processor.data()`][data].
Options can be passed directly, or passed later through
[`processor.data()`][data].
##### `options.gfm`
###### `options.gfm`
```md
GFM mode (`boolean`, default: `true`).
```markdown
hello ~~hi~~ world
```
GFM mode (`boolean`, default: `true`) turns on:
Turns on:

@@ -120,5 +134,7 @@ * [Fenced code blocks](https://help.github.com/articles/github-flavored-markdown/#fenced-code-blocks)

##### `options.commonmark`
###### `options.commonmark`
```md
CommonMark mode (`boolean`, default: `false`).
```markdown
This is a paragraph

@@ -128,25 +144,27 @@ and this is also part of the preceding paragraph.

CommonMark mode (`boolean`, default: `false`) allows:
Allows:
* Empty lines to split blockquotes
* Parentheses (`(` and `)`) around for link and image titles
* Any escaped [ASCII-punctuation][escapes] character
* Parentheses (`(` and `)`) around link and image titles
* Any escaped [ASCII punctuation][escapes] character
* Closing parenthesis (`)`) as an ordered list marker
* URL definitions (and footnotes, when enabled) in blockquotes
CommonMark mode disallows:
Disallows:
* Code directly following a paragraph
* ATX-headings (`# Hash headings`) without spacing after opening hashes
or and before closing hashes
* Indented code blocks directly following a paragraph
* ATX headings (`# Hash headings`) without spacing after opening hashes or and
before closing hashes
* Setext headings (`Underline headings\n---`) when following a paragraph
* Newlines in link and image titles
* White space in link and image URLs in auto-links (links in brackets,
`<` and `>`)
* Lazy blockquote continuation, lines not preceded by a closing angle
bracket (`>`), for lists, code, and thematicBreak
* White space in link and image URLs in auto-links (links in brackets, `<` and
`>`)
* Lazy blockquote continuation, lines not preceded by a greater than character
(`>`), for lists, code, and thematic breaks
##### `options.footnotes`
###### `options.footnotes`
```md
Footnotes mode (`boolean`, default: `false`).
```markdown
Something something[^or something?].

@@ -161,32 +179,34 @@

Footnotes mode (`boolean`, default: `false`) enables reference footnotes and
inline footnotes. Both are wrapped in square brackets and preceded by a caret
(`^`), and can be referenced from inside other footnotes.
Enables reference footnotes and inline footnotes.
Both are wrapped in square brackets and preceded by a caret (`^`), and can be
referenced from inside other footnotes.
##### `options.blocks`
###### `options.pedantic`
```md
<block>foo
</block>
Pedantic mode (`boolean`, default: `false`).
```markdown
Check out some_file_name.txt
```
Blocks (`Array.<string>`, default: list of [block HTML elements][blocks])
exposes let’s users define block-level HTML elements.
Turns on:
##### `options.pedantic`
* Emphasis (`_alpha_`) and importance (`__bravo__`) with underscores in words
* Unordered lists with different markers (`*`, `-`, `+`)
* If `commonmark` is also turned on, ordered lists with different markers
(`.`, `)`)
* And removes less spaces in list items (at most four, instead of the whole
indent)
```md
Check out some_file_name.txt
###### `options.blocks`
Blocks (`Array.<string>`, default: list of [block HTML elements][blocks]).
```markdown
<block>foo
</block>
```
Pedantic mode (`boolean`, default: `false`) turns on:
Defines which HTML elements are seen as block level.
* Emphasis (`_alpha_`) and importance (`__bravo__`) with underscores
in words
* Unordered lists with different markers (`*`, `-`, `+`)
* If `commonmark` is also turned on, ordered lists with different
markers (`.`, `)`)
* And pedantic mode removes less spaces in list-items (at most four,
instead of the whole indent)
### `parse.Parser`

@@ -198,10 +218,11 @@

Most often, using transformers to manipulate a syntax tree produces
the desired output. Sometimes, mainly when introducing new syntactic
entities with a certain level of precedence, interfacing with the parser
is necessary.
Typically, using [*transformers*][transformer] to manipulate a syntax tree
produces the desired output.
Sometimes, such as when introducing new syntactic entities with a certain
precedence, interfacing with the parser is necessary.
If the `remark-parse` plugin is used, it adds a [`Parser`][parser] constructor
to the `processor`. Other plugins can add tokenizers to the parser’s prototype
to change how markdown is parsed.
function to the `processor`.
Other plugins can add tokenizers to its prototype to change how Markdown is
parsed.

@@ -228,14 +249,15 @@ The below plugin adds a [tokenizer][] for at-mentions.

An object mapping tokenizer names to [tokenizer][]s. These
tokenizers (for example: `fencedCode`, `table`, and `paragraph`) eat
from the start of a value to a line ending.
Map of names to [tokenizer][]s (`Object.<Function>`).
These tokenizers (such as `fencedCode`, `table`, and `paragraph`) eat from the
start of a value to a line ending.
See `#blockMethods` below for a list of methods that are included by
default.
See `#blockMethods` below for a list of methods that are included by default.
### `Parser#blockMethods`
Array of `blockTokenizers` names (`string`) specifying the order in
which they run.
List of `blockTokenizers` names (`Array.<string>`).
Specifies the order in which tokenizers run.
Precedence of default block methods is as follows:
<!--methods-block start-->

@@ -261,14 +283,16 @@

An object mapping tokenizer names to [tokenizer][]s. These tokenizers
(for example: `url`, `reference`, and `emphasis`) eat from the start
of a value. To increase performance, they depend on [locator][]s.
Map of names to [tokenizer][]s (`Object.<Function>`).
These tokenizers (such as `url`, `reference`, and `emphasis`) eat from the start
of a value.
To increase performance, they depend on [locator][]s.
See `#inlineMethods` below for a list of methods that are included by
default.
See `#inlineMethods` below for a list of methods that are included by default.
### `Parser#inlineMethods`
Array of `inlineTokenizers` names (`string`) specifying the order in
which they run.
List of `inlineTokenizers` names (`Array.<string>`).
Specifies the order in which tokenizers run.
Precedence of default inline methods is as follows:
<!--methods-inline start-->

@@ -293,2 +317,9 @@

There are two types of tokenizers: block level and inline level.
Both are functions, and work the same, but inline tokenizers must have a
[locator][].
The following example shows an inline tokenizer that is added by the mentions
plugin above.
```js

@@ -315,11 +346,8 @@ tokenizeMention.notInLink = true

The parser knows two types of tokenizers: block level and inline level.
Block level tokenizers are the same as inline level tokenizers, with
the exception that the latter must have a [locator][].
Tokenizers *test* whether a document starts with a certain syntactic entity.
In *silent* mode, they return whether that test passes.
In *normal* mode, they consume that token, a process which is called “eating”.
Tokenizers _test_ whether a document starts with a certain syntactic
entity. In _silent_ mode, they return whether that test passes.
In _normal_ mode, they consume that token, a process which is called
“eating”. Locators enable tokenizers to function faster by providing
information on where the next entity may occur.
Locators enable inline tokenizers to function faster by providing where the next
entity may occur.

@@ -339,21 +367,25 @@ ###### Signatures

* `locator` ([`Function`][locator])
— Required for inline tokenizers
* `onlyAtStart` (`boolean`)
— Whether nodes can only be found at the beginning of the document
* `notInBlock` (`boolean`)
— Whether nodes cannot be in blockquotes, lists, or footnote
definitions
* `notInList` (`boolean`)
— Whether nodes cannot be in lists
* `notInLink` (`boolean`)
— Whether nodes cannot be in links
* `locator` ([`Function`][locator]) — Required for inline tokenizers
* `onlyAtStart` (`boolean`) — Whether nodes can only be found at the beginning
of the document
* `notInBlock` (`boolean`) — Whether nodes cannot be in blockquotes, lists, or
footnote definitions
* `notInList` (`boolean`) — Whether nodes cannot be in lists
* `notInLink` (`boolean`) — Whether nodes cannot be in links
###### Returns
* In _silent_ mode, whether a node can be found at the start of `value`
* In _normal_ mode, a node if it can be found at the start of `value`
* `boolean?`, in *silent* mode — whether a node can be found at the start of
`value`
* [`Node?`][node], In *normal* mode — If it can be found at the start of
`value`
### `tokenizer.locator(value, fromIndex)`
Locators are required for inline tokenizers.
Their role is to keep parsing performant.
The following example shows a locator that is added by the mentions tokenizer
above.
```js

@@ -365,7 +397,6 @@ function locateMention(value, fromIndex) {

Locators are required for inline tokenization to keep the process
performant. Locators enable inline tokenizers to function faster by
providing information on the where the next entity occurs. Locators
may be wrong, it’s OK if there actually isn’t a node to be found at
the index they return, but they must skip any nodes.
Locators enable inline tokenizers to function faster by providing information on
where the next entity *may* occur.
Locators may be wrong, it’s OK if there actually isn’t a node to be found at the
index they return.

@@ -379,3 +410,3 @@ ###### Parameters

Index at which an entity may start, and `-1` otherwise.
`number` — Index at which an entity may start, and `-1` otherwise.

@@ -388,9 +419,8 @@ ### `eat(subvalue)`

Eat `subvalue`, which is a string at the start of the
[tokenize][tokenizer]d `value` (it’s tracked to ensure the correct
value is eaten).
Eat `subvalue`, which is a string at the start of the [tokenized][tokenizer]
`value`.
###### Parameters
* `subvalue` (`string`) - Value to eat.
* `subvalue` (`string`) - Value to eat

@@ -409,28 +439,29 @@ ###### Returns

Add [positional information][location] to `node` and add it to `parent`.
Add [positional information][position] to `node` and add `node` to `parent`.
###### Parameters
* `node` ([`Node`][node]) - Node to patch position on and insert
* `parent` ([`Node`][node], optional) - Place to add `node` to in
the syntax tree. Defaults to the currently processed node
* `node` ([`Node`][node]) - Node to patch position on and to add
* `parent` ([`Parent`][parent], optional) - Place to add `node` to in the
syntax tree.
Defaults to the currently processed node
###### Returns
The given `node`.
[`Node`][node] — The given `node`.
### `add.test()`
Get the [positional information][location] which would be patched on
`node` by `add`.
Get the [positional information][position] that would be patched on `node` by
`add`.
###### Returns
[`Location`][location].
[`Position`][position].
### `add.reset(node[, parent])`
`add`, but resets the internal location. Useful for example in
lists, where the same content is first eaten for a list, and later
for list items
`add`, but resets the internal position.
Useful for example in lists, where the same content is first eaten for a list,
and later for list items.

@@ -441,15 +472,21 @@ ###### Parameters

* `parent` ([`Node`][node], optional) - Place to add `node` to in
the syntax tree. Defaults to the currently processed node
the syntax tree.
Defaults to the currently processed node
###### Returns
The given `node`.
[`Node`][node] — The given node.
### Turning off a tokenizer
In rare situations, you may want to turn off a tokenizer to avoid parsing
that syntactic feature. This can be done by replacing the tokenizer from
your Parser’s `blockTokenizers` (or `blockMethods`) or `inlineTokenizers`
(or `inlineMethods`).
In some situations, you may want to turn off a tokenizer to avoid parsing that
syntactic feature.
Preferably, use the [`remark-disable-tokenizers`][remark-disable-tokenizers]
plugin to turn off tokenizers.
Alternatively, this can be done by replacing the tokenizer from
`blockTokenizers` (or `blockMethods`) or `inlineTokenizers` (or
`inlineMethods`).
The following example turns off indented code blocks:

@@ -465,4 +502,16 @@

Preferably, just use [this plugin](https://github.com/zestedesavoir/zmarkdown/tree/master/packages/remark-disable-tokenizers).
## Contribute
See [`contributing.md`][contributing] in [`remarkjs/.github`][health] for ways
to get started.
See [`support.md`][support] for ways to get help.
Ideas for new plugins and tools can be posted in [`remarkjs/ideas`][ideas].
A curated list of awesome remark resources can be found in [**awesome
remark**][awesome].
This project has a [Code of Conduct][coc].
By interacting with this repository, organisation, or community you agree to
abide by its terms.
## License

@@ -474,13 +523,13 @@

[build-badge]: https://img.shields.io/travis/remarkjs/remark/master.svg
[build-badge]: https://img.shields.io/travis/remarkjs/remark.svg
[build-status]: https://travis-ci.org/remarkjs/remark
[build]: https://travis-ci.org/remarkjs/remark
[coverage-badge]: https://img.shields.io/codecov/c/github/remarkjs/remark.svg
[coverage-status]: https://codecov.io/github/remarkjs/remark
[coverage]: https://codecov.io/github/remarkjs/remark
[dl-badge]: https://img.shields.io/npm/dm/remark-parse.svg
[downloads-badge]: https://img.shields.io/npm/dm/remark-parse.svg
[dl]: https://www.npmjs.com/package/remark-parse
[downloads]: https://www.npmjs.com/package/remark-parse

@@ -491,2 +540,8 @@ [size-badge]: https://img.shields.io/bundlephobia/minzip/remark-parse.svg

[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg
[backers-badge]: https://opencollective.com/unified/backers/badge.svg
[collective]: https://opencollective.com/unified
[chat-badge]: https://img.shields.io/badge/join%20the%20community-on%20spectrum-7b16ff.svg

@@ -496,2 +551,14 @@

[health]: https://github.com/remarkjs/.github
[contributing]: https://github.com/remarkjs/.github/blob/master/contributing.md
[support]: https://github.com/remarkjs/.github/blob/master/support.md
[coc]: https://github.com/remarkjs/.github/blob/master/code-of-conduct.md
[ideas]: https://github.com/remarkjs/ideas
[awesome]: https://github.com/remarkjs/awesome-remark
[license]: https://github.com/remarkjs/remark/blob/master/license

@@ -507,14 +574,20 @@

[processor]: https://github.com/remarkjs/remark/blob/master/packages/remark
[remark]: https://github.com/remarkjs/remark/tree/master/packages/remark
[blocks]: https://github.com/remarkjs/remark/blob/master/packages/remark-parse/lib/block-elements.js
[mdast]: https://github.com/syntax-tree/mdast
[escapes]: https://spec.commonmark.org/0.28/#backslash-escapes
[escapes]: https://spec.commonmark.org/0.29/#backslash-escapes
[node]: https://github.com/syntax-tree/unist#node
[location]: https://github.com/syntax-tree/unist#location
[parent]: https://github.com/syntax-tree/unist#parent
[position]: https://github.com/syntax-tree/unist#position
[parser]: https://github.com/unifiedjs/unified#processorparser
[transformer]: https://github.com/unifiedjs/unified#function-transformernode-file-next
[extend]: #extending-the-parser

@@ -530,4 +603,4 @@

[blocks]: https://github.com/remarkjs/remark/blob/master/packages/remark-parse/lib/block-elements.js
[announcement]: https://medium.com/unifiedjs/collectively-evolving-through-crowdsourcing-22c359ea95cc
[announcement]: https://medium.com/unifiedjs/collectively-evolving-through-crowdsourcing-22c359ea95cc
[remark-disable-tokenizers]: https://github.com/zestedesavoir/zmarkdown/tree/master/packages/remark-disable-tokenizers
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc