tidy-markdown
Advanced tools
Comparing version 0.4.0 to 1.0.0
#!/usr/bin/env node | ||
try { | ||
require('coffee-script/register'); | ||
// in production, this will fail if coffeescript isn't installed, but the | ||
// coffee is compiled anyway, so it doesn't matter | ||
} catch(e){} | ||
require('../lib/cli'); |
// Generated by CoffeeScript 1.10.0 | ||
(function() { | ||
var ArgumentParser, argparser, argv, packageInfo, tidyMarkdown; | ||
var ArgumentParser, argparser, argv, packageInfo, tidyMarkdown; | ||
tidyMarkdown = require('./'); | ||
ArgumentParser = require('argparse').ArgumentParser; | ||
packageInfo = require('../package'); | ||
packageInfo = require('../package'); | ||
ArgumentParser = require('argparse').ArgumentParser; | ||
tidyMarkdown = require('./'); | ||
argparser = new ArgumentParser({ | ||
version: packageInfo.version, | ||
addHelp: true, | ||
description: packageInfo.description | ||
}); | ||
argparser = new ArgumentParser({ | ||
version: packageInfo.version, | ||
addHelp: true, | ||
description: packageInfo.description | ||
}); | ||
argparser.addArgument(['--no-ensure-first-header-is-h1'], { | ||
action: 'storeFalse', | ||
help: 'Disable fixing the first header when it isn\'t an H1. This is useful if the markdown you\'re processing isn\'t a full document, but rather a piece of a larger document.', | ||
defaultValue: true, | ||
dest: 'ensureFirstHeaderIsH1' | ||
}); | ||
argparser.addArgument(['--no-ensure-first-header-is-h1'], { | ||
action: 'storeFalse', | ||
help: 'Disable fixing the first header when it isn\'t an H1. This is useful if the markdown you\'re processing isn\'t a full document, but rather a piece of a larger document.', | ||
defaultValue: true, | ||
dest: 'ensureFirstHeaderIsH1' | ||
}); | ||
argv = argparser.parseArgs(); | ||
argv = argparser.parseArgs(); | ||
process.stdin.setEncoding('utf8'); | ||
process.stdin.setEncoding('utf8'); | ||
process.stdin.on('readable', function() { | ||
var buffer, chunk; | ||
buffer = ''; | ||
while (null !== (chunk = process.stdin.read())) { | ||
buffer += chunk; | ||
} | ||
process.stdout.write(tidyMarkdown(buffer, argv)); | ||
}); | ||
}).call(this); | ||
process.stdin.on('readable', function() { | ||
var buffer, chunk; | ||
buffer = ''; | ||
while (null !== (chunk = process.stdin.read())) { | ||
buffer += chunk; | ||
} | ||
process.stdout.write(tidyMarkdown(buffer, argv)); | ||
}); |
384
lib/index.js
// Generated by CoffeeScript 1.10.0 | ||
(function() { | ||
var delimitCode, fixHeaders, fm, formatTable, indent, longestStringInArray, marked, pad, preprocessAST, ref, stringRepeat, tidyInlineMarkdown, yaml; | ||
var delimitCode, fixHeaders, fm, formatTable, indent, longestStringInArray, marked, pad, preprocessAST, ref, stringRepeat, tidyInlineMarkdown, yaml; | ||
fm = require('front-matter'); | ||
fm = require('front-matter'); | ||
indent = require('indent'); | ||
indent = require('indent'); | ||
marked = require('marked'); | ||
marked = require('marked'); | ||
pad = require('pad'); | ||
pad = require('pad'); | ||
yaml = require('js-yaml'); | ||
yaml = require('js-yaml'); | ||
ref = require('./utils'), stringRepeat = ref.stringRepeat, longestStringInArray = ref.longestStringInArray, delimitCode = ref.delimitCode; | ||
ref = require('./utils'), stringRepeat = ref.stringRepeat, longestStringInArray = ref.longestStringInArray, delimitCode = ref.delimitCode; | ||
preprocessAST = require('./preprocess'); | ||
preprocessAST = require('./preprocess'); | ||
tidyInlineMarkdown = require('./tidy-inline-markdown'); | ||
tidyInlineMarkdown = require('./tidy-inline-markdown'); | ||
/** | ||
* Some people accidently skip levels in their headers (like jumping from h1 to | ||
* h3), which screws up things like tables of contents. This function fixes | ||
* that. | ||
* The algorithm assumes that relations between nearby headers are correct and | ||
* will try to preserve them. For example, "h1, h3, h3" becomes "h1, h2, h2" | ||
* rather than "h1, h2, h3". | ||
*/ | ||
/** | ||
* Some people accidently skip levels in their headers (like jumping from h1 to | ||
* h3), which screws up things like tables of contents. This function fixes | ||
* that. | ||
fixHeaders = function(ast, ensureFirstHeaderIsH1) { | ||
var e, gap, i, lastHeaderDepth, parentDepth, ref1, rootDepth; | ||
i = 0; | ||
lastHeaderDepth = 0; | ||
if (!ensureFirstHeaderIsH1) { | ||
e = 0; | ||
while (e < ast.length) { | ||
if (ast[e].type !== 'heading') { | ||
e++; | ||
} else { | ||
lastHeaderDepth = ast[e].depth - 1; | ||
break; | ||
} | ||
* The algorithm assumes that relations between nearby headers are correct and | ||
* will try to preserve them. For example, "h1, h3, h3" becomes "h1, h2, h2" | ||
* rather than "h1, h2, h3". | ||
*/ | ||
fixHeaders = function(ast, ensureFirstHeaderIsH1) { | ||
var e, gap, i, lastHeaderDepth, parentDepth, ref1, rootDepth; | ||
i = 0; | ||
lastHeaderDepth = 0; | ||
if (!ensureFirstHeaderIsH1) { | ||
e = 0; | ||
while (e < ast.length) { | ||
if (ast[e].type !== 'heading') { | ||
e++; | ||
} else { | ||
lastHeaderDepth = ast[e].depth - 1; | ||
break; | ||
} | ||
} | ||
rootDepth = lastHeaderDepth + 1; | ||
while (i < ast.length) { | ||
if (ast[i].type !== 'heading') { | ||
} | ||
rootDepth = lastHeaderDepth + 1; | ||
while (i < ast.length) { | ||
if (ast[i].type !== 'heading') { | ||
} else if ((rootDepth <= (ref1 = ast[i].depth) && ref1 <= lastHeaderDepth + 1)) { | ||
lastHeaderDepth = ast[i].depth; | ||
} else if ((rootDepth <= (ref1 = ast[i].depth) && ref1 <= lastHeaderDepth + 1)) { | ||
lastHeaderDepth = ast[i].depth; | ||
} else { | ||
e = i; | ||
if (ast[i].depth <= rootDepth) { | ||
gap = ast[i].depth - rootDepth; | ||
} else { | ||
e = i; | ||
if (ast[i].depth <= rootDepth) { | ||
gap = ast[i].depth - rootDepth; | ||
gap = ast[i].depth - (lastHeaderDepth + 1); | ||
} | ||
parentDepth = ast[i].depth; | ||
while (e < ast.length) { | ||
if (ast[e].type !== 'heading') { | ||
} else if (ast[e].depth >= parentDepth) { | ||
ast[e].depth -= gap; | ||
} else { | ||
gap = ast[i].depth - (lastHeaderDepth + 1); | ||
break; | ||
} | ||
parentDepth = ast[i].depth; | ||
while (e < ast.length) { | ||
if (ast[e].type !== 'heading') { | ||
} else if (ast[e].depth >= parentDepth) { | ||
ast[e].depth -= gap; | ||
} else { | ||
break; | ||
} | ||
e++; | ||
} | ||
continue; | ||
e++; | ||
} | ||
i++; | ||
continue; | ||
} | ||
return ast; | ||
}; | ||
i++; | ||
} | ||
return ast; | ||
}; | ||
formatTable = function(token) { | ||
var alignment, col, colWidth, i, j, k, l, len, len1, m, n, o, out, ref1, ref2, ref3, ref4, ref5, row; | ||
out = []; | ||
for (i = k = 0, ref1 = token.header.length; 0 <= ref1 ? k < ref1 : k > ref1; i = 0 <= ref1 ? ++k : --k) { | ||
col = [token.header[i]]; | ||
for (j = l = 0, ref2 = token.cells.length; 0 <= ref2 ? l < ref2 : l > ref2; j = 0 <= ref2 ? ++l : --l) { | ||
token.cells[j][i] = (token.cells[j][i] != null ? token.cells[j][i].trim() : ''); | ||
col.push(token.cells[j][i]); | ||
} | ||
colWidth = longestStringInArray(col); | ||
token.header[i] = pad(token.header[i], colWidth); | ||
alignment = token.align[i]; | ||
token.align[i] = ((function() { | ||
switch (alignment) { | ||
case null: | ||
return pad('', colWidth, '-'); | ||
case 'left': | ||
return ':' + pad('', colWidth - 1, '-'); | ||
case 'center': | ||
return ':' + pad('', colWidth - 2, '-') + ':'; | ||
case 'right': | ||
return pad('', colWidth - 1, '-') + ':'; | ||
} | ||
})()); | ||
for (j = m = 0, ref3 = token.cells.length; 0 <= ref3 ? m < ref3 : m > ref3; j = 0 <= ref3 ? ++m : --m) { | ||
token.cells[j][i] = (alignment === 'right' ? pad(colWidth, token.cells[j][i]) : pad(token.cells[j][i], colWidth)); | ||
} | ||
formatTable = function(token) { | ||
var alignment, col, colWidth, i, j, k, l, len, len1, m, n, o, out, ref1, ref2, ref3, ref4, ref5, row; | ||
out = []; | ||
for (i = k = 0, ref1 = token.header.length; 0 <= ref1 ? k < ref1 : k > ref1; i = 0 <= ref1 ? ++k : --k) { | ||
col = [token.header[i]]; | ||
for (j = l = 0, ref2 = token.cells.length; 0 <= ref2 ? l < ref2 : l > ref2; j = 0 <= ref2 ? ++l : --l) { | ||
token.cells[j][i] = (token.cells[j][i] != null ? token.cells[j][i].trim() : ''); | ||
col.push(token.cells[j][i]); | ||
} | ||
if (token.header.length > 1) { | ||
out.push(token.header.join(' | ').trimRight()); | ||
out.push(token.align.join(' | ')); | ||
ref4 = token.cells; | ||
for (n = 0, len = ref4.length; n < len; n++) { | ||
row = ref4[n]; | ||
out.push(row.join(' | ').trimRight()); | ||
colWidth = longestStringInArray(col); | ||
token.header[i] = pad(token.header[i], colWidth); | ||
alignment = token.align[i]; | ||
token.align[i] = ((function() { | ||
switch (alignment) { | ||
case null: | ||
return pad('', colWidth, '-'); | ||
case 'left': | ||
return ':' + pad('', colWidth - 1, '-'); | ||
case 'center': | ||
return ':' + pad('', colWidth - 2, '-') + ':'; | ||
case 'right': | ||
return pad('', colWidth - 1, '-') + ':'; | ||
} | ||
} else { | ||
out.push('| ' + token.header[0].trimRight()); | ||
out.push('| ' + token.align[0]); | ||
ref5 = token.cells; | ||
for (o = 0, len1 = ref5.length; o < len1; o++) { | ||
row = ref5[o]; | ||
out.push('| ' + row[0].trimRight()); | ||
} | ||
})()); | ||
for (j = m = 0, ref3 = token.cells.length; 0 <= ref3 ? m < ref3 : m > ref3; j = 0 <= ref3 ? ++m : --m) { | ||
token.cells[j][i] = (alignment === 'right' ? pad(colWidth, token.cells[j][i]) : pad(token.cells[j][i], colWidth)); | ||
} | ||
out.push(''); | ||
return out; | ||
}; | ||
module.exports = function(dirtyMarkdown, options) { | ||
var ast, content, id, k, l, len, len1, line, link, links, optionalTitle, out, previousToken, ref1, ref2, ref3, token; | ||
if (options == null) { | ||
options = {}; | ||
} | ||
if (token.header.length > 1) { | ||
out.push(token.header.join(' | ').trimRight()); | ||
out.push(token.align.join(' | ')); | ||
ref4 = token.cells; | ||
for (n = 0, len = ref4.length; n < len; n++) { | ||
row = ref4[n]; | ||
out.push(row.join(' | ').trimRight()); | ||
} | ||
if (options.ensureFirstHeaderIsH1 == null) { | ||
options.ensureFirstHeaderIsH1 = true; | ||
} else { | ||
out.push('| ' + token.header[0].trimRight()); | ||
out.push('| ' + token.align[0]); | ||
ref5 = token.cells; | ||
for (o = 0, len1 = ref5.length; o < len1; o++) { | ||
row = ref5[o]; | ||
out.push('| ' + row[0].trimRight()); | ||
} | ||
out = []; | ||
content = fm(dirtyMarkdown); | ||
if (Object.keys(content.attributes).length !== 0) { | ||
out.push('---', yaml.safeDump(content.attributes).trim(), '---\n'); | ||
} | ||
out.push(''); | ||
return out; | ||
}; | ||
module.exports = function(dirtyMarkdown, options) { | ||
var ast, content, id, k, l, len, len1, line, link, links, optionalTitle, out, previousToken, ref1, ref2, ref3, token; | ||
if (options == null) { | ||
options = {}; | ||
} | ||
if (options.ensureFirstHeaderIsH1 == null) { | ||
options.ensureFirstHeaderIsH1 = true; | ||
} | ||
out = []; | ||
content = fm(dirtyMarkdown); | ||
if (Object.keys(content.attributes).length !== 0) { | ||
out.push('---', yaml.safeDump(content.attributes).trim(), '---\n'); | ||
} | ||
ast = marked.lexer(content.body); | ||
links = ast.links; | ||
previousToken = void 0; | ||
ast = ast.filter(function(token) { | ||
var ref1; | ||
return (ref1 = token.type) !== 'space' && ref1 !== 'list_end'; | ||
}); | ||
ast = preprocessAST(ast); | ||
ast = fixHeaders(ast, options.ensureFirstHeaderIsH1); | ||
for (k = 0, len = ast.length; k < len; k++) { | ||
token = ast[k]; | ||
if (token.indent == null) { | ||
token.indent = ''; | ||
} | ||
ast = marked.lexer(content.body); | ||
links = ast.links; | ||
previousToken = void 0; | ||
ast = ast.filter(function(token) { | ||
var ref1; | ||
return (ref1 = token.type) !== 'space' && ref1 !== 'list_end'; | ||
}); | ||
ast = preprocessAST(ast); | ||
ast = fixHeaders(ast, options.ensureFirstHeaderIsH1); | ||
for (k = 0, len = ast.length; k < len; k++) { | ||
token = ast[k]; | ||
if (token.indent == null) { | ||
token.indent = ''; | ||
} | ||
if (token.nesting == null) { | ||
token.nesting = []; | ||
} | ||
switch (token.type) { | ||
case 'heading': | ||
if ((previousToken != null) && previousToken.type !== 'heading') { | ||
out.push(''); | ||
} | ||
out.push(stringRepeat('#', token.depth) + ' ' + token.text); | ||
break; | ||
case 'paragraph': | ||
if ((ref1 = previousToken != null ? previousToken.type : void 0) === 'paragraph' || ref1 === 'list_item' || ref1 === 'text') { | ||
out.push(''); | ||
} | ||
out.push(token.indent + tidyInlineMarkdown(token).text.replace(/\n/g, ' ')); | ||
break; | ||
case 'text': | ||
case 'list_item': | ||
if ((previousToken != null) && token.type === 'list_item' && (previousToken.nesting.length > token.nesting.length || (previousToken.type === 'paragraph' && ((ref2 = previousToken.nesting) != null ? ref2.length : void 0) >= token.nesting.length))) { | ||
out.push(''); | ||
} | ||
out.push(token.indent + tidyInlineMarkdown(token).text); | ||
break; | ||
case 'code': | ||
if (token.lang == null) { | ||
token.lang = ''; | ||
} | ||
token.text = delimitCode(token.lang + "\n" + token.text + "\n", '```'); | ||
out.push('', indent(token.text, token.indent), ''); | ||
break; | ||
case 'table': | ||
if (previousToken != null) { | ||
out.push(''); | ||
} | ||
out.push.apply(out, formatTable(token)); | ||
break; | ||
case 'hr': | ||
if (previousToken != null) { | ||
out.push(''); | ||
} | ||
out.push(token.indent + stringRepeat('-', 80), ''); | ||
break; | ||
case 'html': | ||
ref3 = token.text.split('\n'); | ||
for (l = 0, len1 = ref3.length; l < len1; l++) { | ||
line = ref3[l]; | ||
out.push(line); | ||
} | ||
break; | ||
default: | ||
throw new Error("Unknown Token: " + token.type); | ||
} | ||
previousToken = token; | ||
if (token.nesting == null) { | ||
token.nesting = []; | ||
} | ||
if (Object.keys(links).length > 0) { | ||
out.push(''); | ||
switch (token.type) { | ||
case 'heading': | ||
if (previousToken != null) { | ||
out.push(''); | ||
} | ||
out.push(stringRepeat('#', token.depth) + ' ' + token.text); | ||
out.push(''); | ||
break; | ||
case 'paragraph': | ||
if ((ref1 = previousToken != null ? previousToken.type : void 0) === 'paragraph' || ref1 === 'list_item' || ref1 === 'text') { | ||
out.push(''); | ||
} | ||
out.push(token.indent + tidyInlineMarkdown(token).text.replace(/\n/g, ' ')); | ||
break; | ||
case 'text': | ||
case 'list_item': | ||
if ((previousToken != null) && token.type === 'list_item' && (previousToken.nesting.length !== token.nesting.length || (previousToken.type === 'paragraph' && ((ref2 = previousToken.nesting) != null ? ref2.length : void 0) >= token.nesting.length))) { | ||
out.push(''); | ||
} | ||
out.push(token.indent + tidyInlineMarkdown(token).text); | ||
break; | ||
case 'code': | ||
if (token.lang == null) { | ||
token.lang = ''; | ||
} | ||
token.text = delimitCode(token.lang + "\n" + token.text + "\n", '```'); | ||
out.push('', indent(token.text, token.indent), ''); | ||
break; | ||
case 'table': | ||
if (previousToken != null) { | ||
out.push(''); | ||
} | ||
out.push.apply(out, formatTable(token)); | ||
break; | ||
case 'hr': | ||
if (previousToken != null) { | ||
out.push(''); | ||
} | ||
out.push(token.indent + stringRepeat('-', 80), ''); | ||
break; | ||
case 'html': | ||
ref3 = token.text.split('\n'); | ||
for (l = 0, len1 = ref3.length; l < len1; l++) { | ||
line = ref3[l]; | ||
out.push(line); | ||
} | ||
break; | ||
default: | ||
throw new Error("Unknown Token: " + token.type); | ||
} | ||
for (id in links) { | ||
link = links[id]; | ||
optionalTitle = link.title ? " \"" + link.title + "\"" : ''; | ||
out.push("[" + id + "]: " + link.href + optionalTitle); | ||
} | ||
previousToken = token; | ||
} | ||
if (Object.keys(links).length > 0) { | ||
out.push(''); | ||
out = out.filter(function(val, i, arr) { | ||
return !(val === '' && arr[i - 1] === ''); | ||
}); | ||
return out.join('\n'); | ||
}; | ||
}).call(this); | ||
} | ||
for (id in links) { | ||
link = links[id]; | ||
optionalTitle = link.title ? " \"" + link.title + "\"" : ''; | ||
out.push("[" + id + "]: " + link.href + optionalTitle); | ||
} | ||
out.push(''); | ||
out = out.filter(function(val, i, arr) { | ||
return !(val === '' && arr[i - 1] === ''); | ||
}); | ||
return out.join('\n'); | ||
}; |
// Generated by CoffeeScript 1.10.0 | ||
(function() { | ||
var nestContainingTokens, nestingEndTokens, nestingStartTokens, preprocessAST, | ||
indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }; | ||
var nestContainingTokens, nestingEndTokens, nestingStartTokens, preprocessAST, | ||
indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }; | ||
nestingStartTokens = ['list_item_start', 'blockquote_start', 'loose_item_start']; | ||
nestingStartTokens = ['list_item_start', 'blockquote_start', 'loose_item_start']; | ||
nestingEndTokens = ['list_item_end', 'blockquote_end', 'loose_item_end']; | ||
nestingEndTokens = ['list_item_end', 'blockquote_end', 'loose_item_end']; | ||
nestContainingTokens = ['list_item', 'blockquote', 'loose_item']; | ||
nestContainingTokens = ['list_item', 'blockquote', 'loose_item']; | ||
preprocessAST = function(ast) { | ||
var currentToken, e, i, j, len, nestingLevel, orderedList, orderedListItemNumber, out, ref, ref1, ref2, ref3, subAST, token, tokenIndex; | ||
i = 0; | ||
out = []; | ||
orderedList = false; | ||
while (i < ast.length) { | ||
currentToken = ast[i]; | ||
if (currentToken.type === 'list_start') { | ||
orderedListItemNumber = 0; | ||
orderedList = currentToken.ordered; | ||
} else if (ref = currentToken.type, indexOf.call(nestingStartTokens, ref) >= 0) { | ||
tokenIndex = nestingStartTokens.indexOf(currentToken.type); | ||
currentToken.type = nestContainingTokens[tokenIndex]; | ||
preprocessAST = function(ast) { | ||
var currentToken, e, i, j, len, nestingLevel, orderedList, orderedListItemNumber, out, ref, ref1, ref2, ref3, subAST, token, tokenIndex; | ||
i = 0; | ||
out = []; | ||
orderedList = false; | ||
while (i < ast.length) { | ||
currentToken = ast[i]; | ||
if (currentToken.type === 'list_start') { | ||
orderedListItemNumber = 0; | ||
orderedList = currentToken.ordered; | ||
} else if (ref = currentToken.type, indexOf.call(nestingStartTokens, ref) >= 0) { | ||
tokenIndex = nestingStartTokens.indexOf(currentToken.type); | ||
currentToken.type = nestContainingTokens[tokenIndex]; | ||
i++; | ||
nestingLevel = 1; | ||
subAST = []; | ||
while (true) { | ||
if (ref1 = ast[i].type, indexOf.call(nestingEndTokens, ref1) >= 0) { | ||
nestingLevel--; | ||
} else if (ref2 = ast[i].type, indexOf.call(nestingStartTokens, ref2) >= 0) { | ||
nestingLevel++; | ||
} | ||
if (nestingLevel === 0) { | ||
break; | ||
} | ||
subAST.push(ast[i]); | ||
i++; | ||
nestingLevel = 1; | ||
subAST = []; | ||
while (true) { | ||
if (ref1 = ast[i].type, indexOf.call(nestingEndTokens, ref1) >= 0) { | ||
nestingLevel--; | ||
} else if (ref2 = ast[i].type, indexOf.call(nestingStartTokens, ref2) >= 0) { | ||
nestingLevel++; | ||
} | ||
if (nestingLevel === 0) { | ||
break; | ||
} | ||
subAST.push(ast[i]); | ||
i++; | ||
} | ||
e = 0; | ||
ref3 = preprocessAST(subAST); | ||
for (j = 0, len = ref3.length; j < len; j++) { | ||
token = ref3[j]; | ||
if (token.nesting == null) { | ||
token.nesting = []; | ||
} | ||
e = 0; | ||
ref3 = preprocessAST(subAST); | ||
for (j = 0, len = ref3.length; j < len; j++) { | ||
token = ref3[j]; | ||
if (token.nesting == null) { | ||
token.nesting = []; | ||
} | ||
if (token.indent == null) { | ||
token.indent = ''; | ||
} | ||
token.nesting.push(currentToken.type); | ||
if (token.nesting !== [] && token.nesting.length > 1) { | ||
token.indent = ' ' + token.indent; | ||
} else if (currentToken.type === 'blockquote') { | ||
token.indent += '> '; | ||
} else if (currentToken.type === 'list_item') { | ||
token.type = 'list_item'; | ||
if (orderedList) { | ||
orderedListItemNumber++; | ||
token.indent += orderedListItemNumber + ". "; | ||
} else { | ||
token.indent += '- '; | ||
} | ||
} else if (e === 0 && token.type === 'text' && currentToken.type === 'loose_item') { | ||
token.type = 'list_item'; | ||
if (token.indent == null) { | ||
token.indent = ''; | ||
} | ||
token.nesting.push(currentToken.type); | ||
if (token.nesting !== [] && token.nesting.length > 1) { | ||
token.indent = ' ' + token.indent; | ||
} else if (currentToken.type === 'blockquote') { | ||
token.indent += '> '; | ||
} else if (currentToken.type === 'list_item') { | ||
token.type = 'list_item'; | ||
if (orderedList) { | ||
orderedListItemNumber++; | ||
token.indent += orderedListItemNumber + ". "; | ||
} else { | ||
token.indent += '- '; | ||
} else { | ||
token.indent = ' ' + token.indent; | ||
} | ||
if (token.type === 'text' && currentToken.type === 'loose_item') { | ||
token.type = 'paragraph'; | ||
} | ||
e++; | ||
out.push(token); | ||
} else if (e === 0 && token.type === 'text' && currentToken.type === 'loose_item') { | ||
token.type = 'list_item'; | ||
token.indent += '- '; | ||
} else { | ||
token.indent = ' ' + token.indent; | ||
} | ||
} else { | ||
out.push(currentToken); | ||
if (token.type === 'text' && currentToken.type === 'loose_item') { | ||
token.type = 'paragraph'; | ||
} | ||
e++; | ||
out.push(token); | ||
} | ||
i++; | ||
} else { | ||
out.push(currentToken); | ||
} | ||
return out; | ||
}; | ||
i++; | ||
} | ||
return out; | ||
}; | ||
module.exports = preprocessAST; | ||
}).call(this); | ||
module.exports = preprocessAST; |
// Generated by CoffeeScript 1.10.0 | ||
(function() { | ||
var CODE_REGEX, Entities, IMG_REGEX, LINK_REGEX, delimitCode, htmlEntities, marked, tidyInlineMarkdown; | ||
var CODE_REGEX, Entities, IMG_REGEX, LINK_REGEX, delimitCode, htmlEntities, marked, tidyInlineMarkdown; | ||
Entities = require('html-entities').AllHtmlEntities; | ||
Entities = require('html-entities').AllHtmlEntities; | ||
marked = require('marked'); | ||
marked = require('marked'); | ||
delimitCode = require('./utils').delimitCode; | ||
delimitCode = require('./utils').delimitCode; | ||
htmlEntities = new Entities(); | ||
htmlEntities = new Entities(); | ||
IMG_REGEX = /<img src="([^"]*)"(?: alt="([^"]*)")?(?: title="([^"]*)")?>/g; | ||
IMG_REGEX = /<img src="([^"]*)"(?: alt="([^"]*)")?(?: title="([^"]*)")?>/g; | ||
LINK_REGEX = /<a href="([^"]*)"(?: title="([^"]*)")?>([^<]*)<\/a>/g; | ||
LINK_REGEX = /<a href="([^"]*)"(?: title="([^"]*)")?>([^<]*)<\/a>/g; | ||
CODE_REGEX = /<code>([^<]+)<\/code>/g; | ||
CODE_REGEX = /<code>([^<]+)<\/code>/g; | ||
tidyInlineMarkdown = function(token) { | ||
token.text = marked.inlineLexer(token.text, token.links || {}).replace(/\u2014/g, '--').replace(/\u2018|\u2019/g, '\'').replace(/\u201c|\u201d/g, '"').replace(/\u2026/g, '...').replace(/<\/?strong>/g, '**').replace(/<\/?em>/g, '_').replace(/<\/?del>/g, '~~').replace(CODE_REGEX, function(m, code) { | ||
return delimitCode(code, '`'); | ||
}).replace(IMG_REGEX, function(m, url, alt, title) { | ||
if (url == null) { | ||
url = ''; | ||
} | ||
if (alt == null) { | ||
alt = ''; | ||
} | ||
if (title != null) { | ||
title = title.replace(/\\|"/g, function(m) { | ||
return "\\" + m; | ||
}); | ||
url += " \"" + title + "\""; | ||
} | ||
return "![" + alt + "](" + url + ")"; | ||
}).replace(LINK_REGEX, function(m, url, title, text) { | ||
if (url == null) { | ||
url = ''; | ||
} | ||
if (text == null) { | ||
text = ''; | ||
} | ||
if (title != null) { | ||
title = title.replace(/\\|"/g, function(m) { | ||
return "\\" + m; | ||
}); | ||
url += " \"" + title + "\""; | ||
} | ||
if (url === text && url !== '') { | ||
return "<" + url + ">"; | ||
} else { | ||
return "[" + text + "](" + url + ")"; | ||
} | ||
}); | ||
token.text = htmlEntities.decode(token.text); | ||
return token; | ||
}; | ||
tidyInlineMarkdown = function(token) { | ||
token.text = marked.inlineLexer(token.text, token.links || {}).replace(/\u2014/g, '--').replace(/\u2018|\u2019/g, '\'').replace(/\u201c|\u201d/g, '"').replace(/\u2026/g, '...').replace(/<\/?strong>/g, '**').replace(/<\/?em>/g, '_').replace(/<\/?del>/g, '~~').replace(CODE_REGEX, function(m, code) { | ||
return delimitCode(code, '`'); | ||
}).replace(IMG_REGEX, function(m, url, alt, title) { | ||
if (url == null) { | ||
url = ''; | ||
} | ||
if (alt == null) { | ||
alt = ''; | ||
} | ||
if (title != null) { | ||
title = title.replace(/\\|"/g, function(m) { | ||
return "\\" + m; | ||
}); | ||
url += " \"" + title + "\""; | ||
} | ||
return "![" + alt + "](" + url + ")"; | ||
}).replace(LINK_REGEX, function(m, url, title, text) { | ||
if (url == null) { | ||
url = ''; | ||
} | ||
if (text == null) { | ||
text = ''; | ||
} | ||
if (title != null) { | ||
title = title.replace(/\\|"/g, function(m) { | ||
return "\\" + m; | ||
}); | ||
url += " \"" + title + "\""; | ||
} | ||
if (url === text && url !== '') { | ||
return "<" + url + ">"; | ||
} else { | ||
return "[" + text + "](" + url + ")"; | ||
} | ||
}); | ||
token.text = htmlEntities.decode(token.text); | ||
return token; | ||
}; | ||
module.exports = tidyInlineMarkdown; | ||
}).call(this); | ||
module.exports = tidyInlineMarkdown; |
114
lib/utils.js
@@ -8,70 +8,66 @@ // Generated by CoffeeScript 1.10.0 | ||
*/ | ||
var delimitCode, longestStringInArray, stringRepeat; | ||
(function() { | ||
var delimitCode, longestStringInArray, stringRepeat; | ||
stringRepeat = function(x, n) { | ||
var s; | ||
s = ''; | ||
while (true) { | ||
if (n & 1) { | ||
s += x; | ||
} | ||
n >>= 1; | ||
if (n) { | ||
x += x; | ||
} else { | ||
break; | ||
} | ||
stringRepeat = function(x, n) { | ||
var s; | ||
s = ''; | ||
while (true) { | ||
if (n & 1) { | ||
s += x; | ||
} | ||
return s; | ||
}; | ||
n >>= 1; | ||
if (n) { | ||
x += x; | ||
} else { | ||
break; | ||
} | ||
} | ||
return s; | ||
}; | ||
/** | ||
* Find the length of the longest string in an array | ||
* @param {String[]} array Array of strings | ||
*/ | ||
/** | ||
* Find the length of the longest string in an array | ||
* @param {String[]} array Array of strings | ||
*/ | ||
longestStringInArray = function(array) { | ||
var i, len, len1, longest, str; | ||
longest = 0; | ||
for (i = 0, len1 = array.length; i < len1; i++) { | ||
str = array[i]; | ||
len = str.length; | ||
if (len > longest) { | ||
longest = len; | ||
} | ||
longestStringInArray = function(array) { | ||
var i, len, len1, longest, str; | ||
longest = 0; | ||
for (i = 0, len1 = array.length; i < len1; i++) { | ||
str = array[i]; | ||
len = str.length; | ||
if (len > longest) { | ||
longest = len; | ||
} | ||
return longest; | ||
}; | ||
} | ||
return longest; | ||
}; | ||
/** | ||
* Wrap code with delimiters | ||
* @param {String} code | ||
* @param {String} delimiter The delimiter to start with, additional backticks | ||
will be added if needed; like if the code contains a sequence of backticks | ||
that would end the code block prematurely. | ||
*/ | ||
/** | ||
* Wrap code with delimiters | ||
* @param {String} code | ||
* @param {String} delimiter The delimiter to start with, additional backticks | ||
will be added if needed; like if the code contains a sequence of backticks | ||
that would end the code block prematurely. | ||
*/ | ||
delimitCode = function(code, delimiter) { | ||
while (RegExp("([^`]|^)" + delimiter + "([^`]|$)").test(code)) { | ||
delimiter += '`'; | ||
} | ||
if (code[0] === '`') { | ||
code = ' ' + code; | ||
} | ||
if (code.slice(-1) === '`') { | ||
code += ' '; | ||
} | ||
return delimiter + code + delimiter; | ||
}; | ||
delimitCode = function(code, delimiter) { | ||
while (RegExp("([^`]|^)" + delimiter + "([^`]|$)").test(code)) { | ||
delimiter += '`'; | ||
} | ||
if (code[0] === '`') { | ||
code = ' ' + code; | ||
} | ||
if (code.slice(-1) === '`') { | ||
code += ' '; | ||
} | ||
return delimiter + code + delimiter; | ||
}; | ||
module.exports = { | ||
stringRepeat: stringRepeat, | ||
longestStringInArray: longestStringInArray, | ||
delimitCode: delimitCode | ||
}; | ||
}).call(this); | ||
module.exports = { | ||
stringRepeat: stringRepeat, | ||
longestStringInArray: longestStringInArray, | ||
delimitCode: delimitCode | ||
}; |
{ | ||
"name": "tidy-markdown", | ||
"description": "fix ugly markdown.", | ||
"version": "0.4.0", | ||
"version": "1.0.0", | ||
"author": "Sean Lang", | ||
@@ -19,4 +19,4 @@ "bin": { | ||
"devDependencies": { | ||
"coffee-script": "^1.7.1", | ||
"mocha": "^1.21.4", | ||
"coffee-script": "^1.10.0", | ||
"mocha": "^2.4.5", | ||
"should": "^4.0.4" | ||
@@ -23,0 +23,0 @@ }, |
# Tidy Markdown | ||
[![Build Status](http://img.shields.io/travis/slang800/tidy-markdown.svg?style=flat-square)](https://travis-ci.org/slang800/tidy-markdown) [![NPM version](http://img.shields.io/npm/v/tidy-markdown.svg?style=flat-square)](https://www.npmjs.org/package/tidy-markdown) [![NPM license](http://img.shields.io/npm/l/tidy-markdown.svg?style=flat-square)](https://www.npmjs.org/package/tidy-markdown) | ||
@@ -9,2 +10,3 @@ | ||
## Install | ||
Tidy Markdown is an [npm](http://npmjs.org/package/tidy-markdown) package, so it can be installed like this: | ||
@@ -17,2 +19,3 @@ | ||
## CLI | ||
Tidy Markdown includes a simple CLI. It operates entirely over STDIN/STDOUT. For example: | ||
@@ -30,3 +33,5 @@ | ||
# Some markdown | ||
Lorem ipsum dolor adipiscing | ||
- one | ||
@@ -61,2 +66,3 @@ - two | ||
## API | ||
Tidy Markdown only exports one function, that takes one argument (the string of ugly markdown). Here's an example of how it can be used: | ||
@@ -86,3 +92,5 @@ | ||
# Some markdown | ||
Lorem ipsum dolor adipiscing | ||
- one | ||
@@ -94,2 +102,3 @@ - two | ||
## Features | ||
- standardize syntactical elements to use a single way of being written (for example, all unordered lists are formatted to start with hyphens, rather than allowing asterisks and/or addition signs to be mixed in). | ||
@@ -96,0 +105,0 @@ - fix numbering - making ordered lists count naturally from 1 to _n_ and reference links do the same (based on first occurance). |
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
1
103
53325
423