Socket
Socket
Sign inDemoInstall

micromark

Package Overview
Dependencies
Maintainers
1
Versions
40
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

micromark - npm Package Compare versions

Comparing version 2.9.0 to 2.9.1

3

dist/character/ascii-control.js

@@ -6,5 +6,6 @@ module.exports = asciiControl

return (
// Special whitespace codes (which have negative codes) or `nul` through `del`…
// Special whitespace codes (which have negative values), C0 and Control
// character DEL
code < 32 || code === 127
)
}

@@ -150,2 +150,4 @@ // This module is compiled away!

// Unicode Specials block.
exports.byteOrderMarker = 65279
// Unicode Specials block.
exports.replacementCharacter = 65533 // `�`

@@ -5,2 +5,4 @@ exports.tokenize = initializeContent

var createSpace = require('../tokenize/factory-space')
function initializeContent(effects) {

@@ -26,3 +28,3 @@ var contentStart = effects.attempt(

effects.exit('lineEnding')
return contentStart
return createSpace(effects, contentStart, 'linePrefix')
}

@@ -29,0 +31,0 @@

@@ -6,2 +6,3 @@ module.exports = preprocessor

function preprocessor() {
var start = true
var column = 1

@@ -25,2 +26,10 @@ var buffer = ''

if (start) {
if (value.charCodeAt(0) === 65279) {
startPosition++
}
start = undefined
}
while (startPosition < value.length) {

@@ -37,3 +46,3 @@ search.lastIndex = startPosition

if (startPosition === endPosition && atCarriageReturn && code === 10) {
if (code === 10 && startPosition === endPosition && atCarriageReturn) {
chunks.push(-3)

@@ -40,0 +49,0 @@ atCarriageReturn = undefined

@@ -56,6 +56,3 @@ exports.tokenize = tokenizeCodeFenced

if (code === 96 && code === marker) {
return nok(code)
}
if (code === 96 && code === marker) return nok(code)
effects.consume(code)

@@ -82,6 +79,3 @@ return info

if (code === 96 && code === marker) {
return nok(code)
}
if (code === 96 && code === marker) return nok(code)
effects.consume(code)

@@ -151,6 +145,3 @@ return meta

if (size < sizeOpen) {
return nok(code)
}
if (size < sizeOpen) return nok(code)
effects.exit('codeFencedFenceSequence')

@@ -157,0 +148,0 @@ return createSpace(effects, closingSequenceEnd, 'whitespace')(code)

@@ -113,6 +113,3 @@ module.exports = createDestination

if (asciiControl(code)) {
return nok(code)
}
if (asciiControl(code)) return nok(code)
effects.consume(code)

@@ -119,0 +116,0 @@ return code === 92 ? destinationRawEscape : destinationRaw

@@ -1,2 +0,2 @@

module.exports = createSpaceOrLineEndingTokenizer
module.exports = createWhitespace

@@ -8,3 +8,4 @@ var markdownLineEnding = require('../character/markdown-line-ending')

function createSpaceOrLineEndingTokenizer(effects, ok) {
function createWhitespace(effects, ok) {
var seen
return start

@@ -17,2 +18,3 @@

effects.exit('lineEnding')
seen = true
return start

@@ -22,3 +24,7 @@ }

if (markdownSpace(code)) {
return createSpace(effects, start, 'whitespace')(code)
return createSpace(
effects,
start,
seen ? 'linePrefix' : 'lineSuffix'
)(code)
}

@@ -25,0 +31,0 @@

@@ -150,3 +150,3 @@ exports.tokenize = tokenizeHtml

) {
if (startTag && code !== 47 && raws.indexOf(buffer.toLowerCase()) > -1) {
if (code !== 47 && startTag && raws.indexOf(buffer.toLowerCase()) > -1) {
kind = 1

@@ -167,4 +167,4 @@ return self.interrupt ? ok(code) : continuation(code)

kind = 7
// Do not support complete HTML when interrupting.
kind = 7
return self.interrupt

@@ -171,0 +171,0 @@ ? nok(code)

@@ -305,2 +305,12 @@ exports.tokenize = tokenizeHtml

function tagOpenAttributeValueBefore(code) {
if (
code === null ||
code === 60 ||
code === 61 ||
code === 62 ||
code === 96
) {
return nok(code)
}
if (code === 34 || code === 39) {

@@ -312,6 +322,2 @@ effects.consume(code)

if (code === 60 || code === 61 || code === 62 || code === 96) {
return nok(code)
}
if (markdownLineEnding(code)) {

@@ -318,0 +324,0 @@ returnState = tagOpenAttributeValueBefore

@@ -181,6 +181,3 @@ exports.tokenize = tokenizeLabelEnd

// It’s a balanced bracket, but contains a link.
if (labelStart._inactive) {
return balanced(code)
}
if (labelStart._inactive) return balanced(code)
defined =

@@ -187,0 +184,0 @@ self.parser.defined.indexOf(

@@ -45,3 +45,3 @@ module.exports = normalizeUri

else {
replace = fromCharCode(65533)
replace = '\uFFFD'
}

@@ -48,0 +48,0 @@ }

@@ -17,4 +17,2 @@ module.exports = serializeChunks

value = chunk
} else if (chunk === -3) {
value = '\r' + '\n'
} else if (chunk === -5) {

@@ -24,2 +22,4 @@ value = '\r'

value = '\n'
} else if (chunk === -3) {
value = '\r' + '\n'
} else if (chunk === -2) {

@@ -26,0 +26,0 @@ value = '\t'

@@ -8,5 +8,6 @@ module.exports = asciiControl

return (
// Special whitespace codes (which have negative codes) or `nul` through `del`…
// Special whitespace codes (which have negative values), C0 and Control
// character DEL
code < codes.space || code === codes.del
)
}

@@ -150,2 +150,4 @@ // This module is compiled away!

// Unicode Specials block.
exports.byteOrderMarker = 65279
// Unicode Specials block.
exports.replacementCharacter = 65533 // `�`

@@ -8,2 +8,3 @@ exports.tokenize = initializeContent

var types = require('../constant/types')
var createSpace = require('../tokenize/factory-space')

@@ -34,3 +35,3 @@ function initializeContent(effects) {

effects.exit(types.lineEnding)
return contentStart
return createSpace(effects, contentStart, types.linePrefix)
}

@@ -37,0 +38,0 @@

@@ -9,2 +9,3 @@ module.exports = preprocessor

function preprocessor() {
var start = true
var column = 1

@@ -28,2 +29,10 @@ var buffer = ''

if (start) {
if (value.charCodeAt(0) === codes.byteOrderMarker) {
startPosition++
}
start = undefined
}
while (startPosition < value.length) {

@@ -41,5 +50,5 @@ search.lastIndex = startPosition

if (
code === codes.lf &&
startPosition === endPosition &&
atCarriageReturn &&
code === codes.lf
atCarriageReturn
) {

@@ -46,0 +55,0 @@ chunks.push(codes.carriageReturnLineFeed)

@@ -63,6 +63,3 @@ exports.tokenize = tokenizeCodeFenced

if (code === codes.graveAccent && code === marker) {
return nok(code)
}
if (code === codes.graveAccent && code === marker) return nok(code)
effects.consume(code)

@@ -89,6 +86,3 @@ return info

if (code === codes.graveAccent && code === marker) {
return nok(code)
}
if (code === codes.graveAccent && code === marker) return nok(code)
effects.consume(code)

@@ -163,6 +157,3 @@ return meta

if (size < sizeOpen) {
return nok(code)
}
if (size < sizeOpen) return nok(code)
effects.exit(types.codeFencedFenceSequence)

@@ -169,0 +160,0 @@ return createSpace(effects, closingSequenceEnd, types.whitespace)(code)

@@ -126,6 +126,3 @@ module.exports = createDestination

if (asciiControl(code)) {
return nok(code)
}
if (asciiControl(code)) return nok(code)
effects.consume(code)

@@ -132,0 +129,0 @@ return code === codes.backslash ? destinationRawEscape : destinationRaw

@@ -1,2 +0,2 @@

module.exports = createSpaceOrLineEndingTokenizer
module.exports = createWhitespace

@@ -8,3 +8,4 @@ var markdownLineEnding = require('../character/markdown-line-ending')

function createSpaceOrLineEndingTokenizer(effects, ok) {
function createWhitespace(effects, ok) {
var seen
return start

@@ -17,2 +18,3 @@

effects.exit(types.lineEnding)
seen = true
return start

@@ -22,3 +24,7 @@ }

if (markdownSpace(code)) {
return createSpace(effects, start, types.whitespace)(code)
return createSpace(
effects,
start,
seen ? types.linePrefix : types.lineSuffix
)(code)
}

@@ -25,0 +31,0 @@

@@ -156,4 +156,4 @@ exports.tokenize = tokenizeHtml

if (
code !== codes.slash &&
startTag &&
code !== codes.slash &&
raws.indexOf(buffer.toLowerCase()) > -1

@@ -176,4 +176,4 @@ ) {

kind = constants.htmlComplete
// Do not support complete HTML when interrupting.
kind = constants.htmlComplete
return self.interrupt

@@ -180,0 +180,0 @@ ? nok(code)

@@ -313,9 +313,4 @@ exports.tokenize = tokenizeHtml

function tagOpenAttributeValueBefore(code) {
if (code === codes.quotationMark || code === codes.apostrophe) {
effects.consume(code)
marker = code
return tagOpenAttributeValueQuoted
}
if (
code === codes.eof ||
code === codes.lessThan ||

@@ -329,2 +324,8 @@ code === codes.equalsTo ||

if (code === codes.quotationMark || code === codes.apostrophe) {
effects.consume(code)
marker = code
return tagOpenAttributeValueQuoted
}
if (markdownLineEnding(code)) {

@@ -331,0 +332,0 @@ returnState = tagOpenAttributeValueBefore

@@ -186,6 +186,3 @@ exports.tokenize = tokenizeLabelEnd

// It’s a balanced bracket, but contains a link.
if (labelStart._inactive) {
return balanced(code)
}
if (labelStart._inactive) return balanced(code)
defined =

@@ -192,0 +189,0 @@ self.parser.defined.indexOf(

module.exports = normalizeUri
var codes = require('../character/codes')
var values = require('../character/values')
var asciiAlphanumeric = require('../character/ascii-alphanumeric')

@@ -46,3 +47,3 @@ var fromCharCode = require('../constant/from-char-code')

else {
replace = fromCharCode(codes.replacementCharacter)
replace = values.replacementCharacter
}

@@ -49,0 +50,0 @@ }

@@ -20,4 +20,2 @@ module.exports = serializeChunks

value = chunk
} else if (chunk === codes.carriageReturnLineFeed) {
value = values.cr + values.lf
} else if (chunk === codes.carriageReturn) {

@@ -27,2 +25,4 @@ value = values.cr

value = values.lf
} else if (chunk === codes.carriageReturnLineFeed) {
value = values.cr + values.lf
} else if (chunk === codes.horizontalTab) {

@@ -29,0 +29,0 @@ value = values.ht

{
"name": "micromark",
"version": "2.9.0",
"version": "2.9.1",
"description": "small commonmark compliant markdown parser with positional info and concrete tokens",

@@ -65,2 +65,3 @@ "license": "MIT",

"browserify": "^16.0.0",
"character-entities": "^1.0.0",
"commonmark.json": "^0.29.0",

@@ -89,3 +90,3 @@ "concat-stream": "^2.0.0",

"generate": "npm run generate-expressions && npm run generate-dist && npm run generate-size",
"format": "remark . -qfo && prettier . --write && xo --fix",
"format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix",
"test-api": "node test",

@@ -92,0 +93,0 @@ "test-coverage": "nyc --reporter lcov tape test/index.js",

@@ -13,3 +13,4 @@ <h1 align="center">

smol markdown parser that’s different (open beta)
Small CommonMark compliant markdown parser with positional info and concrete
tokens.

@@ -56,10 +57,10 @@ ## Intro

* [x] Streaming interface
* [x] 1500+ tests and 100% coverage
* [x] 1750+ tests and 100% coverage
* [x] Abstract syntax tree ([`mdast-util-from-markdown`][from-markdown],
[`mdast-util-to-markdown`][to-markdown])
* [x] [Extensions][]: [GFM][], [footnotes][], [frontmatter][]
* [x] Performance (good enough for now)
* [ ] Integrate into remark
* [ ] Complementary docs on state machine ([CMSM][]) for parsers in other
languages
* [ ] Performance
* [ ] Concrete syntax tree

@@ -241,3 +242,3 @@

micromark will adhere to semver at `3.0.0`.
Use tilde ranges for now: `"micromark": "~2.8.0"`.
Use tilde ranges for now: `"micromark": "~2.9.0"`.

@@ -404,3 +405,3 @@ ## Security

[footnotes]: https://github.com/micromark/micromark-extension-footnotes
[footnotes]: https://github.com/micromark/micromark-extension-footnote

@@ -407,0 +408,0 @@ [frontmatter]: https://github.com/micromark/micromark-extension-frontmatter

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc