tidy-markdown
Advanced tools
Comparing version 0.3.2 to 0.3.3
@@ -1,2 +0,2 @@ | ||
// Generated by CoffeeScript 1.8.0 | ||
// Generated by CoffeeScript 1.10.0 | ||
(function() { | ||
@@ -3,0 +3,0 @@ var ArgumentParser, argparser, argv, packageInfo, tidyMarkdown; |
234
lib/index.js
@@ -1,12 +0,11 @@ | ||
// Generated by CoffeeScript 1.8.0 | ||
// Generated by CoffeeScript 1.10.0 | ||
(function() { | ||
var CODE_REGEX, Entities, IMG_REGEX, LINK_REGEX, delimitCode, fixHeaders, fm, formatTable, htmlEntities, indent, longestStringInArray, marked, nestContainingTokens, nestingEndTokens, nestingStartTokens, pad, preprocessAST, prettyInlineMarkdown, yaml, | ||
__indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }; | ||
var delimitCode, fixHeaders, fm, formatTable, indent, longestStringInArray, marked, pad, preprocessAST, ref, stringRepeat, tidyInlineMarkdown, yaml; | ||
marked = require('marked'); | ||
fm = require('front-matter'); | ||
Entities = require('html-entities').AllHtmlEntities; | ||
indent = require('indent'); | ||
marked = require('marked'); | ||
pad = require('pad'); | ||
@@ -16,177 +15,10 @@ | ||
fm = require('front-matter'); | ||
ref = require('./utils'), stringRepeat = ref.stringRepeat, longestStringInArray = ref.longestStringInArray, delimitCode = ref.delimitCode; | ||
htmlEntities = new Entities(); | ||
preprocessAST = require('./preprocess'); | ||
function stringRepeat(x, n) { | ||
var s = ''; | ||
for (;;) { | ||
if (n & 1) s += x; | ||
n >>= 1; | ||
if (n) x += x; | ||
else break; | ||
} | ||
return s; | ||
}; | ||
tidyInlineMarkdown = require('./tidy-inline-markdown'); | ||
/** | ||
* Find the length of the longest string in an array | ||
* @param {String[]} array Array of strings | ||
*/ | ||
longestStringInArray = function(array) { | ||
var len, longest, str, _i, _len; | ||
longest = 0; | ||
for (_i = 0, _len = array.length; _i < _len; _i++) { | ||
str = array[_i]; | ||
len = str.length; | ||
if (len > longest) { | ||
longest = len; | ||
} | ||
} | ||
return longest; | ||
}; | ||
/** | ||
* Wrap code with delimiters | ||
* @param {[type]} code | ||
* @param {[type]} delimiter The delimiter to start with, additional backticks | ||
will be added if needed; like if the code contains a sequence of backticks | ||
that would end the code block prematurely. | ||
*/ | ||
delimitCode = function(code, delimiter) { | ||
while (RegExp("([^`]|^)" + delimiter + "([^`]|$)").test(code)) { | ||
delimiter += '`'; | ||
} | ||
if (code[0] === '`') { | ||
code = ' ' + code; | ||
} | ||
if (code.slice(-1) === '`') { | ||
code += ' '; | ||
} | ||
return delimiter + code + delimiter; | ||
}; | ||
IMG_REGEX = /<img src="([^"]*)"(?: alt="([^"]*)")?(?: title="([^"]*)")?>/g; | ||
LINK_REGEX = /<a href="([^"]*)"(?: title="([^"]*)")?>([^<]*)<\/a>/g; | ||
CODE_REGEX = /<code>([^<]+)<\/code>/g; | ||
prettyInlineMarkdown = function(token) { | ||
token.text = marked.inlineLexer(token.text, token.links || {}).replace(/\u2014/g, '--').replace(/\u2018|\u2019/g, '\'').replace(/\u201c|\u201d/g, '"').replace(/\u2026/g, '...').replace(/<\/?strong>/g, '**').replace(/<\/?em>/g, '_').replace(/<\/?del>/g, '~~').replace(CODE_REGEX, function(m, code) { | ||
return delimitCode(code, '`'); | ||
}).replace(IMG_REGEX, function(m, url, alt, title) { | ||
if (url == null) { | ||
url = ''; | ||
} | ||
if (alt == null) { | ||
alt = ''; | ||
} | ||
if (title != null) { | ||
url += " \"" + (title.replace(/\\|"/g, function(m) { | ||
return "\\" + m; | ||
})) + "\""; | ||
} | ||
return "![" + alt + "](" + url + ")"; | ||
}).replace(LINK_REGEX, function(m, url, title, text) { | ||
if (url == null) { | ||
url = ''; | ||
} | ||
if (text == null) { | ||
text = ''; | ||
} | ||
if (title != null) { | ||
url += " \"" + (title.replace(/\\|"/g, function(m) { | ||
return "\\" + m; | ||
})) + "\""; | ||
} | ||
return "[" + text + "](" + url + ")"; | ||
}); | ||
token.text = htmlEntities.decode(token.text); | ||
return token; | ||
}; | ||
nestingStartTokens = ['list_item_start', 'blockquote_start', 'loose_item_start']; | ||
nestingEndTokens = ['list_item_end', 'blockquote_end', 'loose_item_end']; | ||
nestContainingTokens = ['list_item', 'blockquote', 'loose_item']; | ||
preprocessAST = function(ast) { | ||
var currentToken, e, i, nestingLevel, orderedList, orderedListItemNumber, out, subAST, token, tokenIndex, _i, _len, _ref, _ref1, _ref2, _ref3; | ||
i = 0; | ||
out = []; | ||
orderedList = false; | ||
while (i < ast.length) { | ||
currentToken = ast[i]; | ||
if (currentToken.type === 'list_start') { | ||
orderedListItemNumber = 0; | ||
orderedList = currentToken.ordered; | ||
} else if (_ref = currentToken.type, __indexOf.call(nestingStartTokens, _ref) >= 0) { | ||
tokenIndex = nestingStartTokens.indexOf(currentToken.type); | ||
currentToken.type = nestContainingTokens[tokenIndex]; | ||
i++; | ||
nestingLevel = 1; | ||
subAST = []; | ||
while (true) { | ||
if (_ref1 = ast[i].type, __indexOf.call(nestingEndTokens, _ref1) >= 0) { | ||
nestingLevel--; | ||
} else if (_ref2 = ast[i].type, __indexOf.call(nestingStartTokens, _ref2) >= 0) { | ||
nestingLevel++; | ||
} | ||
if (nestingLevel === 0) { | ||
break; | ||
} | ||
subAST.push(ast[i]); | ||
i++; | ||
} | ||
e = 0; | ||
_ref3 = preprocessAST(subAST); | ||
for (_i = 0, _len = _ref3.length; _i < _len; _i++) { | ||
token = _ref3[_i]; | ||
if (token.nesting == null) { | ||
token.nesting = []; | ||
} | ||
if (token.indent == null) { | ||
token.indent = ''; | ||
} | ||
token.nesting.push(currentToken.type); | ||
if (token.nesting !== [] && token.nesting.length > 1) { | ||
token.indent = ' ' + token.indent; | ||
} else if (currentToken.type === 'blockquote') { | ||
token.indent += '> '; | ||
} else if (currentToken.type === 'list_item') { | ||
token.type = 'list_item'; | ||
if (orderedList) { | ||
orderedListItemNumber++; | ||
token.indent += "" + orderedListItemNumber + ". "; | ||
} else { | ||
token.indent += '- '; | ||
} | ||
} else if (e === 0 && token.type === 'text' && currentToken.type === 'loose_item') { | ||
token.type = 'list_item'; | ||
token.indent += '- '; | ||
} else { | ||
token.indent = ' ' + token.indent; | ||
} | ||
if (token.type === 'text' && currentToken.type === 'loose_item') { | ||
token.type = 'paragraph'; | ||
} | ||
e++; | ||
out.push(token); | ||
} | ||
} else { | ||
out.push(currentToken); | ||
} | ||
i++; | ||
} | ||
return out; | ||
}; | ||
/** | ||
* Some people accidently skip levels in their headers (like jumping from h1 to | ||
@@ -202,3 +34,3 @@ * h3), which screws up things like tables of contents. This function fixes | ||
fixHeaders = function(ast, ensureFirstHeaderIsH1) { | ||
var e, gap, i, lastHeaderDepth, parentDepth, rootDepth, _ref; | ||
var e, gap, i, lastHeaderDepth, parentDepth, ref1, rootDepth; | ||
i = 0; | ||
@@ -221,3 +53,3 @@ lastHeaderDepth = 0; | ||
} else if ((rootDepth <= (_ref = ast[i].depth) && _ref <= lastHeaderDepth + 1)) { | ||
} else if ((rootDepth <= (ref1 = ast[i].depth) && ref1 <= lastHeaderDepth + 1)) { | ||
lastHeaderDepth = ast[i].depth; | ||
@@ -250,7 +82,7 @@ } else { | ||
formatTable = function(token) { | ||
var alignment, col, colWidth, i, j, out, row, _i, _j, _k, _l, _len, _len1, _m, _ref, _ref1, _ref2, _ref3, _ref4; | ||
var alignment, col, colWidth, i, j, k, l, len, len1, m, n, o, out, ref1, ref2, ref3, ref4, ref5, row; | ||
out = []; | ||
for (i = _i = 0, _ref = token.header.length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { | ||
for (i = k = 0, ref1 = token.header.length; 0 <= ref1 ? k < ref1 : k > ref1; i = 0 <= ref1 ? ++k : --k) { | ||
col = [token.header[i]]; | ||
for (j = _j = 0, _ref1 = token.cells.length; 0 <= _ref1 ? _j < _ref1 : _j > _ref1; j = 0 <= _ref1 ? ++_j : --_j) { | ||
for (j = l = 0, ref2 = token.cells.length; 0 <= ref2 ? l < ref2 : l > ref2; j = 0 <= ref2 ? ++l : --l) { | ||
token.cells[j][i] = (token.cells[j][i] != null ? token.cells[j][i].trim() : ''); | ||
@@ -274,3 +106,3 @@ col.push(token.cells[j][i]); | ||
})()); | ||
for (j = _k = 0, _ref2 = token.cells.length; 0 <= _ref2 ? _k < _ref2 : _k > _ref2; j = 0 <= _ref2 ? ++_k : --_k) { | ||
for (j = m = 0, ref3 = token.cells.length; 0 <= ref3 ? m < ref3 : m > ref3; j = 0 <= ref3 ? ++m : --m) { | ||
token.cells[j][i] = (alignment === 'right' ? pad(colWidth, token.cells[j][i]) : pad(token.cells[j][i], colWidth)); | ||
@@ -282,5 +114,5 @@ } | ||
out.push(token.align.join(' | ')); | ||
_ref3 = token.cells; | ||
for (_l = 0, _len = _ref3.length; _l < _len; _l++) { | ||
row = _ref3[_l]; | ||
ref4 = token.cells; | ||
for (n = 0, len = ref4.length; n < len; n++) { | ||
row = ref4[n]; | ||
out.push(row.join(' | ').trimRight()); | ||
@@ -291,5 +123,5 @@ } | ||
out.push('| ' + token.align[0]); | ||
_ref4 = token.cells; | ||
for (_m = 0, _len1 = _ref4.length; _m < _len1; _m++) { | ||
row = _ref4[_m]; | ||
ref5 = token.cells; | ||
for (o = 0, len1 = ref5.length; o < len1; o++) { | ||
row = ref5[o]; | ||
out.push('| ' + row[0].trimRight()); | ||
@@ -303,3 +135,3 @@ } | ||
module.exports = function(dirtyMarkdown, options) { | ||
var ast, content, id, line, link, links, optionalTitle, out, previousToken, token, _i, _j, _len, _len1, _ref, _ref1, _ref2; | ||
var ast, content, id, k, l, len, len1, line, link, links, optionalTitle, out, previousToken, ref1, ref2, ref3, token; | ||
if (options == null) { | ||
@@ -320,9 +152,9 @@ options = {}; | ||
ast = ast.filter(function(token) { | ||
var _ref; | ||
return (_ref = token.type) !== 'space' && _ref !== 'list_end'; | ||
var ref1; | ||
return (ref1 = token.type) !== 'space' && ref1 !== 'list_end'; | ||
}); | ||
ast = preprocessAST(ast); | ||
ast = fixHeaders(ast, options.ensureFirstHeaderIsH1); | ||
for (_i = 0, _len = ast.length; _i < _len; _i++) { | ||
token = ast[_i]; | ||
for (k = 0, len = ast.length; k < len; k++) { | ||
token = ast[k]; | ||
if (token.indent == null) { | ||
@@ -342,13 +174,13 @@ token.indent = ''; | ||
case 'paragraph': | ||
if ((_ref = previousToken != null ? previousToken.type : void 0) === 'paragraph' || _ref === 'list_item' || _ref === 'text') { | ||
if ((ref1 = previousToken != null ? previousToken.type : void 0) === 'paragraph' || ref1 === 'list_item' || ref1 === 'text') { | ||
out.push(''); | ||
} | ||
out.push(token.indent + prettyInlineMarkdown(token).text.replace(/\n/g, ' ')); | ||
out.push(token.indent + tidyInlineMarkdown(token).text.replace(/\n/g, ' ')); | ||
break; | ||
case 'text': | ||
case 'list_item': | ||
if ((previousToken != null) && token.type === 'list_item' && (previousToken.nesting.length > token.nesting.length || (previousToken.type === 'paragraph' && ((_ref1 = previousToken.nesting) != null ? _ref1.length : void 0) >= token.nesting.length))) { | ||
if ((previousToken != null) && token.type === 'list_item' && (previousToken.nesting.length > token.nesting.length || (previousToken.type === 'paragraph' && ((ref2 = previousToken.nesting) != null ? ref2.length : void 0) >= token.nesting.length))) { | ||
out.push(''); | ||
} | ||
out.push(token.indent + prettyInlineMarkdown(token).text); | ||
out.push(token.indent + tidyInlineMarkdown(token).text); | ||
break; | ||
@@ -359,3 +191,3 @@ case 'code': | ||
} | ||
token.text = delimitCode("" + token.lang + "\n" + token.text + "\n", '```'); | ||
token.text = delimitCode(token.lang + "\n" + token.text + "\n", '```'); | ||
out.push('', indent(token.text, token.indent), ''); | ||
@@ -376,5 +208,5 @@ break; | ||
case 'html': | ||
_ref2 = token.text.split('\n'); | ||
for (_j = 0, _len1 = _ref2.length; _j < _len1; _j++) { | ||
line = _ref2[_j]; | ||
ref3 = token.text.split('\n'); | ||
for (l = 0, len1 = ref3.length; l < len1; l++) { | ||
line = ref3[l]; | ||
out.push(line); | ||
@@ -381,0 +213,0 @@ } |
{ | ||
"name": "tidy-markdown", | ||
"description": "fix ugly markdown.", | ||
"version": "0.3.2", | ||
"version": "0.3.3", | ||
"author": "Sean Lang", | ||
@@ -15,3 +15,3 @@ "bin": { | ||
"js-yaml": "^3.2.7", | ||
"marked": "^0.3.2", | ||
"marked": "0.3.2", | ||
"pad": "0.0.5" | ||
@@ -25,9 +25,9 @@ }, | ||
"keywords": [ | ||
"markdown", | ||
"parse", | ||
"pretty", | ||
"markdown", | ||
"reformat", | ||
"parse", | ||
"styleguide" | ||
], | ||
"license": "GPLv3", | ||
"license": "GPL-3.0", | ||
"main": "lib", | ||
@@ -34,0 +34,0 @@ "repository": "git://github.com/slang800/tidy-markdown.git", |
Sorry, the diff of this file is not supported yet
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
New author
Supply chain riskA new npm collaborator published a version of the package for the first time. New collaborators are usually benign additions to a project, but do indicate a change to the security surface area of a package.
Found 1 instance in 1 package
54348
11
431
2
+ Addedmarked@0.3.2(transitive)
- Removedmarked@0.3.19(transitive)
Updatedmarked@0.3.2