@stacksjs/logsmith
Advanced tools
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
| import { | ||
| __commonJS | ||
| } from "./chunk-xne0pxt1.js"; | ||
| // ../../node_modules/markdownlint/helpers/shared.cjs | ||
| var require_shared = __commonJS((exports, module) => { | ||
| exports.flatTokensSymbol = Symbol("flat-tokens"); | ||
| exports.htmlFlowSymbol = Symbol("html-flow"); | ||
| exports.newLineRe = /\r\n?|\n/g; | ||
| exports.nextLinesRe = /[\r\n][\s\S]*$/; | ||
| }); | ||
| // ../../node_modules/markdownlint/helpers/micromark-helpers.cjs | ||
| var require_micromark_helpers = __commonJS((exports, module) => { | ||
| var { flatTokensSymbol, htmlFlowSymbol } = require_shared(); | ||
| function inHtmlFlow(token) { | ||
| return Boolean(token[htmlFlowSymbol]); | ||
| } | ||
| function isHtmlFlowComment(token) { | ||
| const { text, type } = token; | ||
| if (type === "htmlFlow" && text.startsWith("<!--") && text.endsWith("-->")) { | ||
| const comment = text.slice(4, -3); | ||
| return !comment.startsWith(">") && !comment.startsWith("->") && !comment.endsWith("-"); | ||
| } | ||
| return false; | ||
| } | ||
| function addRangeToSet(set, start, end) { | ||
| for (let i = start;i <= end; i++) { | ||
| set.add(i); | ||
| } | ||
| } | ||
| function filterByPredicate(tokens, allowed, transformChildren) { | ||
| const result = []; | ||
| const queue = [ | ||
| { | ||
| array: tokens, | ||
| index: 0 | ||
| } | ||
| ]; | ||
| while (queue.length > 0) { | ||
| const current = queue[queue.length - 1]; | ||
| const { array, index } = current; | ||
| if (index < array.length) { | ||
| const token = array[current.index++]; | ||
| if (allowed(token)) { | ||
| result.push(token); | ||
| } | ||
| const { children } = token; | ||
| if (children.length > 0) { | ||
| const transformed = transformChildren ? transformChildren(token) : children; | ||
| queue.push({ | ||
| array: transformed, | ||
| index: 0 | ||
| }); | ||
| } | ||
| } else { | ||
| queue.pop(); | ||
| } | ||
| } | ||
| return result; | ||
| } | ||
| function filterByTypes(tokens, types, htmlFlow) { | ||
| const predicate = (token) => types.includes(token.type) && (htmlFlow || !inHtmlFlow(token)); | ||
| const flatTokens = tokens[flatTokensSymbol]; | ||
| if (flatTokens) { | ||
| return flatTokens.filter(predicate); | ||
| } | ||
| return filterByPredicate(tokens, predicate); | ||
| } | ||
| function getBlockQuotePrefixText(tokens, lineNumber, count = 1) { | ||
| return filterByTypes(tokens, ["blockQuotePrefix", "linePrefix"]).filter((prefix) => prefix.startLine === lineNumber).map((prefix) => prefix.text).join("").trimEnd().concat(` | ||
| `).repeat(count); | ||
| } | ||
| function getDescendantsByType(parent, typePath) { | ||
| let tokens = Array.isArray(parent) ? parent : [parent]; | ||
| for (const type of typePath) { | ||
| const predicate = (token) => Array.isArray(type) ? type.includes(token.type) : type === token.type; | ||
| tokens = tokens.flatMap((t) => t.children.filter(predicate)); | ||
| } | ||
| return tokens; | ||
| } | ||
| function getHeadingLevel(heading) { | ||
| let level = 1; | ||
| const headingSequence = heading.children.find((child) => ["atxHeadingSequence", "setextHeadingLine"].includes(child.type)); | ||
| const { text } = headingSequence; | ||
| if (text[0] === "#") { | ||
| level = Math.min(text.length, 6); | ||
| } else if (text[0] === "-") { | ||
| level = 2; | ||
| } | ||
| return level; | ||
| } | ||
| function getHeadingStyle(heading) { | ||
| if (heading.type === "setextHeading") { | ||
| return "setext"; | ||
| } | ||
| const atxHeadingSequenceLength = heading.children.filter((child) => child.type === "atxHeadingSequence").length; | ||
| if (atxHeadingSequenceLength === 1) { | ||
| return "atx"; | ||
| } | ||
| return "atx_closed"; | ||
| } | ||
| function getHeadingText(heading) { | ||
| const headingText = getDescendantsByType(heading, [["atxHeadingText", "setextHeadingText"]]).flatMap((descendant) => descendant.children.filter((child) => child.type !== "htmlText")).map((data) => data.text).join("").replace(/[\r\n]+/g, " "); | ||
| return headingText || ""; | ||
| } | ||
| function getHtmlTagInfo(token) { | ||
| const htmlTagNameRe = /^<([^!>][^/\s>]*)/; | ||
| if (token.type === "htmlText") { | ||
| const match = htmlTagNameRe.exec(token.text); | ||
| if (match) { | ||
| const name = match[1]; | ||
| const close = name.startsWith("/"); | ||
| return { | ||
| close, | ||
| name: close ? name.slice(1) : name | ||
| }; | ||
| } | ||
| } | ||
| return null; | ||
| } | ||
| function getParentOfType(token, types) { | ||
| let current = token; | ||
| while ((current = current.parent) && !types.includes(current.type)) {} | ||
| return current; | ||
| } | ||
| var docfxTabSyntaxRe = /^#tab\//; | ||
| function isDocfxTab(heading) { | ||
| if (heading?.type === "atxHeading") { | ||
| const headingTexts = getDescendantsByType(heading, ["atxHeadingText"]); | ||
| if (headingTexts.length === 1 && headingTexts[0].children.length === 1 && headingTexts[0].children[0].type === "link") { | ||
| const resourceDestinationStrings = filterByTypes(headingTexts[0].children[0].children, ["resourceDestinationString"]); | ||
| return resourceDestinationStrings.length === 1 && docfxTabSyntaxRe.test(resourceDestinationStrings[0].text); | ||
| } | ||
| } | ||
| return false; | ||
| } | ||
| var nonContentTokens = new Set([ | ||
| "blockQuoteMarker", | ||
| "blockQuotePrefix", | ||
| "blockQuotePrefixWhitespace", | ||
| "lineEnding", | ||
| "lineEndingBlank", | ||
| "linePrefix", | ||
| "listItemIndent", | ||
| "undefinedReference", | ||
| "undefinedReferenceCollapsed", | ||
| "undefinedReferenceFull", | ||
| "undefinedReferenceShortcut" | ||
| ]); | ||
| module.exports = { | ||
| addRangeToSet, | ||
| filterByPredicate, | ||
| filterByTypes, | ||
| getBlockQuotePrefixText, | ||
| getDescendantsByType, | ||
| getHeadingLevel, | ||
| getHeadingStyle, | ||
| getHeadingText, | ||
| getHtmlTagInfo, | ||
| getParentOfType, | ||
| inHtmlFlow, | ||
| isDocfxTab, | ||
| isHtmlFlowComment, | ||
| nonContentTokens | ||
| }; | ||
| }); | ||
| // ../../node_modules/markdownlint/helpers/helpers.cjs | ||
| var require_helpers = __commonJS((exports, module) => { | ||
| var micromark = require_micromark_helpers(); | ||
| var { newLineRe, nextLinesRe } = require_shared(); | ||
| exports.newLineRe = newLineRe; | ||
| exports.nextLinesRe = nextLinesRe; | ||
| exports.frontMatterRe = /((^---[^\S\r\n\u2028\u2029]*$[\s\S]+?^---\s*)|(^\+\+\+[^\S\r\n\u2028\u2029]*$[\s\S]+?^(\+\+\+|\.\.\.)\s*)|(^\{[^\S\r\n\u2028\u2029]*$[\s\S]+?^\}\s*))(\r\n|\r|\n|$)/m; | ||
| var inlineCommentStartRe = /(<!--\s*markdownlint-(disable|enable|capture|restore|disable-file|enable-file|disable-line|disable-next-line|configure-file))(?:\s|-->)/gi; | ||
| exports.inlineCommentStartRe = inlineCommentStartRe; | ||
| exports.endOfLineHtmlEntityRe = /&(?:#\d+|#[xX][\da-fA-F]+|[a-zA-Z]{2,31}|blk\d{2}|emsp1[34]|frac\d{2}|sup\d|there4);$/; | ||
| exports.endOfLineGemojiCodeRe = /:(?:[abmovx]|[-+]1|100|1234|(?:1st|2nd|3rd)_place_medal|8ball|clock\d{1,4}|e-mail|non-potable_water|o2|t-rex|u5272|u5408|u55b6|u6307|u6708|u6709|u6e80|u7121|u7533|u7981|u7a7a|[a-z]{2,15}2?|[a-z]{1,14}(?:_[a-z\d]{1,16})+):$/; | ||
| var allPunctuation = ".,;:!?。,;:!?"; | ||
| exports.allPunctuation = allPunctuation; | ||
| exports.allPunctuationNoQuestion = allPunctuation.replace(/[??]/gu, ""); | ||
| function isNumber(obj) { | ||
| return typeof obj === "number"; | ||
| } | ||
| exports.isNumber = isNumber; | ||
| function isString(obj) { | ||
| return typeof obj === "string"; | ||
| } | ||
| exports.isString = isString; | ||
| function isEmptyString(str) { | ||
| return str.length === 0; | ||
| } | ||
| exports.isEmptyString = isEmptyString; | ||
| function isObject(obj) { | ||
| return !!obj && typeof obj === "object" && !Array.isArray(obj); | ||
| } | ||
| exports.isObject = isObject; | ||
| function isUrl(obj) { | ||
| return !!obj && Object.getPrototypeOf(obj) === URL.prototype; | ||
| } | ||
| exports.isUrl = isUrl; | ||
| function cloneIfArray(arr) { | ||
| return Array.isArray(arr) ? [...arr] : arr; | ||
| } | ||
| exports.cloneIfArray = cloneIfArray; | ||
| function cloneIfUrl(url) { | ||
| return isUrl(url) ? new URL(url) : url; | ||
| } | ||
| exports.cloneIfUrl = cloneIfUrl; | ||
| exports.getHtmlAttributeRe = function getHtmlAttributeRe(name) { | ||
| return new RegExp(`\\s${name}\\s*=\\s*['"]?([^'"\\s>]*)`, "iu"); | ||
| }; | ||
| function isBlankLine(line) { | ||
| const startComment = "<!--"; | ||
| const endComment = "-->"; | ||
| const removeComments = (s) => { | ||
| while (true) { | ||
| const start = s.indexOf(startComment); | ||
| const end = s.indexOf(endComment); | ||
| if (end !== -1 && (start === -1 || end < start)) { | ||
| s = s.slice(end + endComment.length); | ||
| } else if (start !== -1 && end !== -1) { | ||
| s = s.slice(0, start) + s.slice(end + endComment.length); | ||
| } else if (start !== -1 && end === -1) { | ||
| s = s.slice(0, start); | ||
| } else { | ||
| return s; | ||
| } | ||
| } | ||
| }; | ||
| return !line || !line.trim() || !removeComments(line).replace(/>/g, "").trim(); | ||
| } | ||
| exports.isBlankLine = isBlankLine; | ||
| var htmlCommentBegin = "<!--"; | ||
| var htmlCommentEnd = "-->"; | ||
| var safeCommentCharacter = "."; | ||
| var startsWithPipeRe = /^ *\|/; | ||
| var notCrLfRe = /[^\r\n]/g; | ||
| var notSpaceCrLfRe = /[^ \r\n]/g; | ||
| var trailingSpaceRe = / +[\r\n]/g; | ||
| var replaceTrailingSpace = (s) => s.replace(notCrLfRe, safeCommentCharacter); | ||
| exports.clearHtmlCommentText = function clearHtmlCommentText(text) { | ||
| let i = 0; | ||
| while ((i = text.indexOf(htmlCommentBegin, i)) !== -1) { | ||
| const j = text.indexOf(htmlCommentEnd, i + 2); | ||
| if (j === -1) { | ||
| break; | ||
| } | ||
| if (j > i + htmlCommentBegin.length) { | ||
| const content = text.slice(i + htmlCommentBegin.length, j); | ||
| const lastLf = text.lastIndexOf(` | ||
| `, i) + 1; | ||
| const preText = text.slice(lastLf, i); | ||
| const isBlock = preText.trim().length === 0; | ||
| const couldBeTable = startsWithPipeRe.test(preText); | ||
| const spansTableCells = couldBeTable && content.includes(` | ||
| `); | ||
| const isValid = isBlock || !(spansTableCells || content.startsWith(">") || content.startsWith("->") || content.endsWith("-") || content.includes("--")); | ||
| if (isValid) { | ||
| const clearedContent = content.replace(notSpaceCrLfRe, safeCommentCharacter).replace(trailingSpaceRe, replaceTrailingSpace); | ||
| text = text.slice(0, i + htmlCommentBegin.length) + clearedContent + text.slice(j); | ||
| } | ||
| } | ||
| i = j + htmlCommentEnd.length; | ||
| } | ||
| return text; | ||
| }; | ||
| exports.escapeForRegExp = function escapeForRegExp(str) { | ||
| return str.replace(/[-/\\^$*+?.()|[\]{}]/g, "\\$&"); | ||
| }; | ||
| function ellipsify(text, start, end) { | ||
| if (text.length <= 30) {} else if (start && end) { | ||
| text = text.slice(0, 15) + "..." + text.slice(-15); | ||
| } else if (end) { | ||
| text = "..." + text.slice(-30); | ||
| } else { | ||
| text = text.slice(0, 30) + "..."; | ||
| } | ||
| return text; | ||
| } | ||
| exports.ellipsify = ellipsify; | ||
| function addError(onError, lineNumber, detail, context, range, fixInfo) { | ||
| onError({ | ||
| lineNumber, | ||
| detail, | ||
| context, | ||
| range, | ||
| fixInfo | ||
| }); | ||
| } | ||
| exports.addError = addError; | ||
| function addErrorDetailIf(onError, lineNumber, expected, actual, detail, context, range, fixInfo) { | ||
| if (expected !== actual) { | ||
| addError(onError, lineNumber, "Expected: " + expected + "; Actual: " + actual + (detail ? "; " + detail : ""), context, range, fixInfo); | ||
| } | ||
| } | ||
| exports.addErrorDetailIf = addErrorDetailIf; | ||
| function addErrorContext(onError, lineNumber, context, start, end, range, fixInfo) { | ||
| context = ellipsify(context.replace(newLineRe, ` | ||
| `), start, end); | ||
| addError(onError, lineNumber, undefined, context, range, fixInfo); | ||
| } | ||
| exports.addErrorContext = addErrorContext; | ||
| var positionLessThanOrEqual = (lineA, columnA, lineB, columnB) => lineA < lineB || lineA === lineB && columnA <= columnB; | ||
| exports.hasOverlap = function hasOverlap(rangeA, rangeB) { | ||
| const lte = positionLessThanOrEqual(rangeA.startLine, rangeA.startColumn, rangeB.startLine, rangeB.startColumn); | ||
| const first = lte ? rangeA : rangeB; | ||
| const second = lte ? rangeB : rangeA; | ||
| return positionLessThanOrEqual(second.startLine, second.startColumn, first.endLine, first.endColumn); | ||
| }; | ||
| exports.frontMatterHasTitle = function frontMatterHasTitle(frontMatterLines, frontMatterTitlePattern) { | ||
| const ignoreFrontMatter = frontMatterTitlePattern !== undefined && !frontMatterTitlePattern; | ||
| const frontMatterTitleRe = new RegExp(String(frontMatterTitlePattern || '^\\s*"?title"?\\s*[:=]'), "i"); | ||
| return !ignoreFrontMatter && frontMatterLines.some((line) => frontMatterTitleRe.test(line)); | ||
| }; | ||
| function getReferenceLinkImageData(tokens) { | ||
| const normalizeReference = (s) => s.toLowerCase().trim().replace(/\s+/g, " "); | ||
| const getText = (t) => t?.children.filter((c) => c.type !== "blockQuotePrefix").map((c) => c.text).join(""); | ||
| const references = new Map; | ||
| const shortcuts = new Map; | ||
| const addReferenceToDictionary = (token, label, isShortcut) => { | ||
| const referenceDatum = [ | ||
| token.startLine - 1, | ||
| token.startColumn - 1, | ||
| token.text.length | ||
| ]; | ||
| const reference = normalizeReference(label); | ||
| const dictionary = isShortcut ? shortcuts : references; | ||
| const referenceData = dictionary.get(reference) || []; | ||
| referenceData.push(referenceDatum); | ||
| dictionary.set(reference, referenceData); | ||
| }; | ||
| const definitions = new Map; | ||
| const definitionLineIndices = []; | ||
| const duplicateDefinitions = []; | ||
| const filteredTokens = micromark.filterByTypes(tokens, [ | ||
| "definition", | ||
| "gfmFootnoteDefinition", | ||
| "definitionLabelString", | ||
| "gfmFootnoteDefinitionLabelString", | ||
| "gfmFootnoteCall", | ||
| "image", | ||
| "link", | ||
| "undefinedReferenceCollapsed", | ||
| "undefinedReferenceFull", | ||
| "undefinedReferenceShortcut" | ||
| ]); | ||
| for (const token of filteredTokens) { | ||
| let labelPrefix = ""; | ||
| switch (token.type) { | ||
| case "definition": | ||
| case "gfmFootnoteDefinition": | ||
| for (let i = token.startLine;i <= token.endLine; i++) { | ||
| definitionLineIndices.push(i - 1); | ||
| } | ||
| break; | ||
| case "gfmFootnoteDefinitionLabelString": | ||
| labelPrefix = "^"; | ||
| case "definitionLabelString": | ||
| { | ||
| const reference = normalizeReference(`${labelPrefix}${token.text}`); | ||
| if (definitions.has(reference)) { | ||
| duplicateDefinitions.push([reference, token.startLine - 1]); | ||
| } else { | ||
| const parent = micromark.getParentOfType(token, ["definition"]); | ||
| const destinationString = parent && micromark.getDescendantsByType(parent, ["definitionDestination", "definitionDestinationRaw", "definitionDestinationString"])[0]?.text; | ||
| definitions.set(reference, [token.startLine - 1, destinationString]); | ||
| } | ||
| } | ||
| break; | ||
| case "gfmFootnoteCall": | ||
| case "image": | ||
| case "link": | ||
| { | ||
| let isShortcut = token.children.length === 1; | ||
| const isFullOrCollapsed = token.children.length === 2 && !token.children.some((t) => t.type === "resource"); | ||
| const [labelText] = micromark.getDescendantsByType(token, ["label", "labelText"]); | ||
| const [referenceString] = micromark.getDescendantsByType(token, ["reference", "referenceString"]); | ||
| let label = getText(labelText); | ||
| if (!isShortcut && !isFullOrCollapsed) { | ||
| const [footnoteCallMarker, footnoteCallString] = token.children.filter((t) => ["gfmFootnoteCallMarker", "gfmFootnoteCallString"].includes(t.type)); | ||
| if (footnoteCallMarker && footnoteCallString) { | ||
| label = `${footnoteCallMarker.text}${footnoteCallString.text}`; | ||
| isShortcut = true; | ||
| } | ||
| } | ||
| if (isShortcut || isFullOrCollapsed) { | ||
| addReferenceToDictionary(token, getText(referenceString) || label, isShortcut); | ||
| } | ||
| } | ||
| break; | ||
| case "undefinedReferenceCollapsed": | ||
| case "undefinedReferenceFull": | ||
| case "undefinedReferenceShortcut": | ||
| { | ||
| const undefinedReference = micromark.getDescendantsByType(token, ["undefinedReference"])[0]; | ||
| const label = undefinedReference.children.map((t) => t.text).join(""); | ||
| const isShortcut = token.type === "undefinedReferenceShortcut"; | ||
| addReferenceToDictionary(token, label, isShortcut); | ||
| } | ||
| break; | ||
| } | ||
| } | ||
| return { | ||
| references, | ||
| shortcuts, | ||
| definitions, | ||
| duplicateDefinitions, | ||
| definitionLineIndices | ||
| }; | ||
| } | ||
| exports.getReferenceLinkImageData = getReferenceLinkImageData; | ||
| function getPreferredLineEnding(input, os) { | ||
| let cr = 0; | ||
| let lf = 0; | ||
| let crlf = 0; | ||
| const endings = input.match(newLineRe) || []; | ||
| for (const ending of endings) { | ||
| switch (ending) { | ||
| case "\r": | ||
| cr++; | ||
| break; | ||
| case ` | ||
| `: | ||
| lf++; | ||
| break; | ||
| case `\r | ||
| `: | ||
| crlf++; | ||
| break; | ||
| } | ||
| } | ||
| let preferredLineEnding = null; | ||
| if (!cr && !lf && !crlf) { | ||
| preferredLineEnding = os && os.EOL || ` | ||
| `; | ||
| } else if (lf >= crlf && lf >= cr) { | ||
| preferredLineEnding = ` | ||
| `; | ||
| } else if (crlf >= cr) { | ||
| preferredLineEnding = `\r | ||
| `; | ||
| } else { | ||
| preferredLineEnding = "\r"; | ||
| } | ||
| return preferredLineEnding; | ||
| } | ||
| exports.getPreferredLineEnding = getPreferredLineEnding; | ||
| function expandTildePath(file, os) { | ||
| const homedir = os && os.homedir && os.homedir(); | ||
| return homedir ? file.replace(/^~($|\/|\\)/, `${homedir}$1`) : file; | ||
| } | ||
| exports.expandTildePath = expandTildePath; | ||
| }); | ||
| // ../../node_modules/markdownlint/lib/markdownit.cjs | ||
| var require_markdownit = __commonJS((exports, module) => { | ||
| var { newLineRe } = require_helpers(); | ||
| function forEachInlineCodeSpan(input, handler) { | ||
| const backtickRe = /`+/g; | ||
| let match = null; | ||
| const backticksLengthAndIndex = []; | ||
| while ((match = backtickRe.exec(input)) !== null) { | ||
| backticksLengthAndIndex.push([match[0].length, match.index]); | ||
| } | ||
| const newLinesIndex = []; | ||
| while ((match = newLineRe.exec(input)) !== null) { | ||
| newLinesIndex.push(match.index); | ||
| } | ||
| let lineIndex = 0; | ||
| let lineStartIndex = 0; | ||
| let k = 0; | ||
| for (let i = 0;i < backticksLengthAndIndex.length - 1; i++) { | ||
| const [startLength, startIndex] = backticksLengthAndIndex[i]; | ||
| if (startIndex === 0 || input[startIndex - 1] !== "\\") { | ||
| for (let j = i + 1;j < backticksLengthAndIndex.length; j++) { | ||
| const [endLength, endIndex] = backticksLengthAndIndex[j]; | ||
| if (startLength === endLength) { | ||
| for (;k < newLinesIndex.length; k++) { | ||
| const newLineIndex = newLinesIndex[k]; | ||
| if (startIndex < newLineIndex) { | ||
| break; | ||
| } | ||
| lineIndex++; | ||
| lineStartIndex = newLineIndex + 1; | ||
| } | ||
| const columnIndex = startIndex - lineStartIndex + startLength; | ||
| handler(input.slice(startIndex + startLength, endIndex), lineIndex, columnIndex, startLength); | ||
| i = j; | ||
| break; | ||
| } | ||
| } | ||
| } | ||
| } | ||
| } | ||
| function freezeToken(token) { | ||
| if (token.attrs) { | ||
| for (const attr of token.attrs) { | ||
| Object.freeze(attr); | ||
| } | ||
| Object.freeze(token.attrs); | ||
| } | ||
| if (token.children) { | ||
| for (const child of token.children) { | ||
| freezeToken(child); | ||
| } | ||
| Object.freeze(token.children); | ||
| } | ||
| if (token.map) { | ||
| Object.freeze(token.map); | ||
| } | ||
| Object.freeze(token); | ||
| } | ||
| function annotateAndFreezeTokens(tokens, lines) { | ||
| let trMap = null; | ||
| const markdownItTokens = tokens; | ||
| for (const token of markdownItTokens) { | ||
| if (token.type === "tr_open") { | ||
| trMap = token.map; | ||
| } else if (token.type === "tr_close") { | ||
| trMap = null; | ||
| } | ||
| if (!token.map && trMap) { | ||
| token.map = [...trMap]; | ||
| } | ||
| if (token.map) { | ||
| token.line = lines[token.map[0]]; | ||
| token.lineNumber = token.map[0] + 1; | ||
| while (token.map[1] && !(lines[token.map[1] - 1] || "").trim()) { | ||
| token.map[1]--; | ||
| } | ||
| } | ||
| if (token.children) { | ||
| const codeSpanExtraLines = []; | ||
| if (token.children.some((child) => child.type === "code_inline")) { | ||
| forEachInlineCodeSpan(token.content, (code) => { | ||
| codeSpanExtraLines.push(code.split(newLineRe).length - 1); | ||
| }); | ||
| } | ||
| let lineNumber = token.lineNumber; | ||
| for (const child of token.children) { | ||
| child.lineNumber = lineNumber; | ||
| child.line = lines[lineNumber - 1]; | ||
| if (child.type === "softbreak" || child.type === "hardbreak") { | ||
| lineNumber++; | ||
| } else if (child.type === "code_inline") { | ||
| lineNumber += codeSpanExtraLines.shift(); | ||
| } | ||
| } | ||
| } | ||
| freezeToken(token); | ||
| } | ||
| Object.freeze(tokens); | ||
| } | ||
| function getMarkdownItTokens(markdownIt, content, lines) { | ||
| const tokens = markdownIt.parse(content, {}); | ||
| annotateAndFreezeTokens(tokens, lines); | ||
| return tokens; | ||
| } | ||
| module.exports = { | ||
| forEachInlineCodeSpan, | ||
| getMarkdownItTokens | ||
| }; | ||
| }); | ||
| export default require_markdownit(); | ||
| export { require_shared, require_micromark_helpers, require_helpers, require_markdownit }; |
| import { createRequire } from "node:module"; | ||
| var __create = Object.create; | ||
| var __getProtoOf = Object.getPrototypeOf; | ||
| var __defProp = Object.defineProperty; | ||
| var __getOwnPropNames = Object.getOwnPropertyNames; | ||
| var __hasOwnProp = Object.prototype.hasOwnProperty; | ||
| var __toESM = (mod, isNodeMode, target) => { | ||
| target = mod != null ? __create(__getProtoOf(mod)) : {}; | ||
| const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target; | ||
| for (let key of __getOwnPropNames(mod)) | ||
| if (!__hasOwnProp.call(to, key)) | ||
| __defProp(to, key, { | ||
| get: () => mod[key], | ||
| enumerable: true | ||
| }); | ||
| return to; | ||
| }; | ||
| var __commonJS = (cb, mod) => () => (mod || cb((mod = { exports: {} }).exports, mod), mod.exports); | ||
| var __export = (target, all) => { | ||
| for (var name in all) | ||
| __defProp(target, name, { | ||
| get: all[name], | ||
| enumerable: true, | ||
| configurable: true, | ||
| set: (newValue) => all[name] = () => newValue | ||
| }); | ||
| }; | ||
| var __require = /* @__PURE__ */ createRequire(import.meta.url); | ||
| export { __toESM, __commonJS, __export, __require }; |
+4
-4
@@ -11,4 +11,4 @@ #!/usr/bin/env node | ||
| logInfo | ||
| } from "../chunk-5ga34he1.js"; | ||
| import"../chunk-6sxyen7j.js"; | ||
| } from "../chunk-p59gvrv4.js"; | ||
| import"../chunk-xne0pxt1.js"; | ||
@@ -618,3 +618,3 @@ // bin/cli.ts | ||
| // package.json | ||
| var version = "0.1.8"; | ||
| var version = "0.1.9"; | ||
@@ -657,3 +657,3 @@ // bin/cli.ts | ||
| clean: options.clean || false, | ||
| excludeAuthors: options.excludeAuthors ? options.excludeAuthors.split(",").map((s) => s.trim()) : [], | ||
| excludeAuthors: options.excludeAuthors ? options.excludeAuthors.split(",").map((s) => s.trim()) : undefined, | ||
| includeAuthors: options.includeAuthors ? options.includeAuthors.split(",").map((s) => s.trim()) : [], | ||
@@ -660,0 +660,0 @@ hideAuthorEmail: options.hideAuthorEmail || false, |
@@ -46,4 +46,4 @@ import { | ||
| themes | ||
| } from "../chunk-5ga34he1.js"; | ||
| import"../chunk-6sxyen7j.js"; | ||
| } from "../chunk-p59gvrv4.js"; | ||
| import"../chunk-xne0pxt1.js"; | ||
| export { | ||
@@ -50,0 +50,0 @@ themes, |
+2
-2
| { | ||
| "name": "@stacksjs/logsmith", | ||
| "type": "module", | ||
| "version": "0.1.8", | ||
| "version": "0.1.9", | ||
| "description": "Forge beautiful changelog automatically.", | ||
@@ -64,3 +64,3 @@ "author": "Chris Breuer <chris@stacksjs.org>", | ||
| "bunfig": "^0.10.1", | ||
| "markdownlint": "^0.34.0" | ||
| "markdownlint": "^0.38.0" | ||
| }, | ||
@@ -67,0 +67,0 @@ "devDependencies": { |
Sorry, the diff of this file is too big to display
| import { createRequire } from "node:module"; | ||
| var __create = Object.create; | ||
| var __getProtoOf = Object.getPrototypeOf; | ||
| var __defProp = Object.defineProperty; | ||
| var __getOwnPropNames = Object.getOwnPropertyNames; | ||
| var __hasOwnProp = Object.prototype.hasOwnProperty; | ||
| var __toESM = (mod, isNodeMode, target) => { | ||
| target = mod != null ? __create(__getProtoOf(mod)) : {}; | ||
| const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target; | ||
| for (let key of __getOwnPropNames(mod)) | ||
| if (!__hasOwnProp.call(to, key)) | ||
| __defProp(to, key, { | ||
| get: () => mod[key], | ||
| enumerable: true | ||
| }); | ||
| return to; | ||
| }; | ||
| var __commonJS = (cb, mod) => () => (mod || cb((mod = { exports: {} }).exports, mod), mod.exports); | ||
| var __require = /* @__PURE__ */ createRequire(import.meta.url); | ||
| export { __toESM, __commonJS, __require }; |
Sorry, the diff of this file is too big to display
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 6 instances in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 2 instances in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
URL strings
Supply chain riskPackage contains fragments of external URLs or IP addresses, which the package may be accessing at runtime.
Found 1 instance in 1 package
Obfuscated code
Supply chain riskObfuscated files are intentionally packed to hide their behavior. This could be a sign of malware.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 4 instances in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 2 instances in 1 package
High entropy strings
Supply chain riskContains high entropy strings. This could be a sign of encrypted data, leaked secrets or obfuscated code.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
URL strings
Supply chain riskPackage contains fragments of external URLs or IP addresses, which the package may be accessing at runtime.
Found 1 instance in 1 package
15
7.14%17606
2.34%0
-100%583346
-18.44%123
57.69%+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
Updated