@lexical/markdown
Advanced tools
Comparing version 0.2.5 to 0.2.6
@@ -9,12 +9,89 @@ /** | ||
import type {DecoratorNode, LexicalEditor} from 'lexical'; | ||
import type { | ||
LexicalEditor, | ||
ElementNode, | ||
LexicalNode, | ||
TextFormatType, | ||
} from 'lexical'; | ||
export function registerMarkdownShortcuts<T>( | ||
export type Transformer = | ||
| ElementTransformer | ||
| TextFormatTransformer | ||
| TextMatchTransformer; | ||
export type ElementTransformer = { | ||
export: ( | ||
node: LexicalNode, | ||
traverseChildren: (node: ElementNode) => string, | ||
) => string | null; | ||
regExp: RegExp; | ||
replace: ( | ||
parentNode: ElementNode, | ||
children: Array<LexicalNode>, | ||
match: Array<string>, | ||
isImport: boolean, | ||
) => void; | ||
type: 'element'; | ||
}; | ||
export type TextFormatTransformer = $ReadOnly<{ | ||
format: $ReadOnlyArray<TextFormatType>; | ||
tag: string; | ||
type: 'text-format'; | ||
}>; | ||
export type TextMatchTransformer = $ReadOnly<{ | ||
export: ( | ||
node: LexicalNode, | ||
exportChildren: (node: ElementNode) => string, | ||
exportFormat: (node: TextNode, textContent: string) => string, | ||
) => string | null; | ||
importRegExp: RegExp; | ||
regExp: RegExp; | ||
replace: (node: TextNode, match: RegExp$matchResult) => void; | ||
trigger: string; | ||
type: 'text-match'; | ||
}>; | ||
// TODO: | ||
// transformers should be required argument, breaking change | ||
export function registerMarkdownShortcuts( | ||
editor: LexicalEditor, | ||
createHorizontalRuleNode: () => DecoratorNode<T>, | ||
transformers?: Array<Transformer>, | ||
): () => void; | ||
// TODO: | ||
// transformers should be required argument, breaking change | ||
export function $convertFromMarkdownString( | ||
markdownString: string, | ||
editor: LexicalEditor, | ||
markdown: string, | ||
transformers?: Array<Transformer>, | ||
): void; | ||
export function $convertToMarkdownString(): string; | ||
// TODO: | ||
// transformers should be required argument, breaking change | ||
export function $convertToMarkdownString( | ||
transformers?: Array<Transformer>, | ||
): string; | ||
export const BOLD_ITALIC_STAR: TextFormatTransformer; | ||
export const BOLD_ITALIC_UNDERSCORE: TextFormatTransformer; | ||
export const BOLD_STAR: TextFormatTransformer; | ||
export const BOLD_UNDERSCORE: TextFormatTransformer; | ||
export const INLINE_CODE: TextFormatTransformer; | ||
export const ITALIC_STAR: TextFormatTransformer; | ||
export const ITALIC_UNDERSCORE: TextFormatTransformer; | ||
export const STRIKETHROUGH: TextFormatTransformer; | ||
export const UNORDERED_LIST: ElementTransformer; | ||
export const CODE: ElementTransformer; | ||
export const HEADING: ElementTransformer; | ||
export const ORDERED_LIST: ElementTransformer; | ||
export const QUOTE: ElementTransformer; | ||
export const CHECK_LIST: ElementTransformer; | ||
export const LINK: TextMatchTransformer; | ||
export const TRANSFORMERS: Array<Transformer>; | ||
export const ELEMENT_TRANSFORMERS: Array<ElementTransformer>; | ||
export const TEXT_FORMAT_TRANSFORMERS: Array<TextFormatTransformer>; | ||
export const TEXT_MATCH_TRANSFORMERS: Array<TextFormatTransformer>; |
@@ -9,8 +9,7 @@ /** | ||
var lexical = require('lexical'); | ||
var code = require('@lexical/code'); | ||
var link = require('@lexical/link'); | ||
var list = require('@lexical/list'); | ||
var lexical = require('lexical'); | ||
var link = require('@lexical/link'); | ||
var richText = require('@lexical/rich-text'); | ||
var text = require('@lexical/text'); | ||
@@ -25,907 +24,386 @@ /** | ||
*/ | ||
/* | ||
How to add a new syntax to capture and transform. | ||
1. Create a new enumeration by adding to MarkdownFormatKind. | ||
2. Add a new criteria with a regEx pattern. See markdownStrikethrough as an example. | ||
3. Add your block criteria (e.g. '# ') to allMarkdownCriteria or | ||
your text criteria (e.g. *MyItalic*) to allMarkdownCriteriaForTextNodes. | ||
4. Add your Lexical block specific transforming code here: transformTextNodeForText. | ||
Add your Lexical text specific transforming code here: transformTextNodeForText. | ||
*/ | ||
// The trigger state helps to capture EditorState information | ||
// from the prior and current EditorState. | ||
// This is then used to determined if an auto format has been triggered. | ||
function indexBy(list, callback) { | ||
const index = {}; | ||
// Eventually we need to support multiple trigger string's including newlines. | ||
const SEPARATOR_BETWEEN_TEXT_AND_NON_TEXT_NODES = '\u0004'; // Select an unused unicode character to separate text and non-text nodes. | ||
for (const item of list) { | ||
const key = callback(item); | ||
const SEPARATOR_LENGTH = SEPARATOR_BETWEEN_TEXT_AND_NON_TEXT_NODES.length; | ||
const spaceTrigger = { | ||
triggerKind: 'space_trigger', | ||
triggerString: '\u0020' | ||
}; // Future todo: add support for ``` + carriage return either inside or not inside code block. Should toggle between. | ||
// const codeBlockTrigger : AutoFormatTrigger = { | ||
// triggerKind: 'codeBlock_trigger', | ||
// triggerString: '```', // + new paragraph element or new code block element. | ||
// }; | ||
if (index[key]) { | ||
index[key].push(item); | ||
} else { | ||
index[key] = [item]; | ||
} | ||
} | ||
const triggers = [spaceTrigger | ||
/*, codeBlockTrigger*/ | ||
]; // Future Todo: speed up performance by having non-capture group variations of the regex. | ||
const autoFormatBase = { | ||
markdownFormatKind: null, | ||
regEx: /(?:)/, | ||
regExForAutoFormatting: /(?:)/, | ||
requiresParagraphStart: false | ||
}; | ||
const paragraphStartBase = { ...autoFormatBase, | ||
requiresParagraphStart: true | ||
}; | ||
const markdownHeader1 = { ...paragraphStartBase, | ||
export: createHeadingExport(1), | ||
markdownFormatKind: 'paragraphH1', | ||
regEx: /^(?:# )/, | ||
regExForAutoFormatting: /^(?:# )/ | ||
}; | ||
const markdownHeader2 = { ...paragraphStartBase, | ||
export: createHeadingExport(2), | ||
markdownFormatKind: 'paragraphH2', | ||
regEx: /^(?:## )/, | ||
regExForAutoFormatting: /^(?:## )/ | ||
}; | ||
const markdownHeader3 = { ...paragraphStartBase, | ||
export: createHeadingExport(3), | ||
markdownFormatKind: 'paragraphH3', | ||
regEx: /^(?:### )/, | ||
regExForAutoFormatting: /^(?:### )/ | ||
}; | ||
const markdownHeader4 = { ...paragraphStartBase, | ||
export: createHeadingExport(4), | ||
markdownFormatKind: 'paragraphH4', | ||
regEx: /^(?:#### )/, | ||
regExForAutoFormatting: /^(?:#### )/ | ||
}; | ||
const markdownHeader5 = { ...paragraphStartBase, | ||
export: createHeadingExport(5), | ||
markdownFormatKind: 'paragraphH5', | ||
regEx: /^(?:##### )/, | ||
regExForAutoFormatting: /^(?:##### )/ | ||
}; | ||
const markdownBlockQuote = { ...paragraphStartBase, | ||
export: blockQuoteExport, | ||
markdownFormatKind: 'paragraphBlockQuote', | ||
regEx: /^(?:> )/, | ||
regExForAutoFormatting: /^(?:> )/ | ||
}; | ||
const markdownUnorderedListDash = { ...paragraphStartBase, | ||
export: listExport, | ||
markdownFormatKind: 'paragraphUnorderedList', | ||
regEx: /^(\s{0,10})(?:- )/, | ||
regExForAutoFormatting: /^(\s{0,10})(?:- )/ | ||
}; | ||
const markdownUnorderedListAsterisk = { ...paragraphStartBase, | ||
export: listExport, | ||
markdownFormatKind: 'paragraphUnorderedList', | ||
regEx: /^(\s{0,10})(?:\* )/, | ||
regExForAutoFormatting: /^(\s{0,10})(?:\* )/ | ||
}; | ||
const markdownCodeBlock = { ...paragraphStartBase, | ||
export: codeBlockExport, | ||
markdownFormatKind: 'paragraphCodeBlock', | ||
regEx: /^(```)$/, | ||
regExForAutoFormatting: /^(```)([a-z]*)( )/ | ||
}; | ||
const markdownOrderedList = { ...paragraphStartBase, | ||
export: listExport, | ||
markdownFormatKind: 'paragraphOrderedList', | ||
regEx: /^(\s{0,10})(\d+)\.\s/, | ||
regExForAutoFormatting: /^(\s{0,10})(\d+)\.\s/ | ||
}; | ||
const markdownHorizontalRule = { ...paragraphStartBase, | ||
markdownFormatKind: 'horizontalRule', | ||
regEx: /^(?:\*\*\*)$/, | ||
regExForAutoFormatting: /^(?:\*\*\* )/ | ||
}; | ||
const markdownHorizontalRuleUsingDashes = { ...paragraphStartBase, | ||
markdownFormatKind: 'horizontalRule', | ||
regEx: /^(?:---)$/, | ||
regExForAutoFormatting: /^(?:--- )/ | ||
}; | ||
const markdownInlineCode = { ...autoFormatBase, | ||
exportFormat: 'code', | ||
exportTag: '`', | ||
markdownFormatKind: 'code', | ||
regEx: /(`)(\s*)([^`]*)(\s*)(`)()/, | ||
regExForAutoFormatting: /(`)(\s*\b)([^`]*)(\b\s*)(`)(\s)$/ | ||
}; | ||
const markdownBold = { ...autoFormatBase, | ||
exportFormat: 'bold', | ||
exportTag: '**', | ||
markdownFormatKind: 'bold', | ||
regEx: /(\*\*)(\s*)([^\*\*]*)(\s*)(\*\*)()/, | ||
regExForAutoFormatting: /(\*\*)(\s*\b)([^\*\*]*)(\b\s*)(\*\*)(\s)$/ | ||
}; | ||
const markdownItalic = { ...autoFormatBase, | ||
exportFormat: 'italic', | ||
exportTag: '*', | ||
markdownFormatKind: 'italic', | ||
regEx: /(\*)(\s*)([^\*]*)(\s*)(\*)()/, | ||
regExForAutoFormatting: /(\*)(\s*\b)([^\*]*)(\b\s*)(\*)(\s)$/ | ||
}; | ||
const markdownBold2 = { ...autoFormatBase, | ||
exportFormat: 'bold', | ||
exportTag: '_', | ||
markdownFormatKind: 'bold', | ||
regEx: /(__)(\s*)([^__]*)(\s*)(__)()/, | ||
regExForAutoFormatting: /(__)(\s*)([^__]*)(\s*)(__)(\s)$/ | ||
}; | ||
const markdownItalic2 = { ...autoFormatBase, | ||
exportFormat: 'italic', | ||
exportTag: '_', | ||
markdownFormatKind: 'italic', | ||
regEx: /(_)()([^_]*)()(_)()/, | ||
regExForAutoFormatting: /(_)()([^_]*)()(_)(\s)$/ // Maintain 7 groups. | ||
}; // Markdown does not support underline, but we can allow folks to use | ||
// the HTML tags for underline. | ||
const fakeMarkdownUnderline = { ...autoFormatBase, | ||
exportFormat: 'underline', | ||
exportTag: '<u>', | ||
exportTagClose: '</u>', | ||
markdownFormatKind: 'underline', | ||
regEx: /(\<u\>)(\s*)([^\<]*)(\s*)(\<\/u\>)()/, | ||
regExForAutoFormatting: /(\<u\>)(\s*\b)([^\<]*)(\b\s*)(\<\/u\>)(\s)$/ | ||
}; | ||
const markdownStrikethrough = { ...autoFormatBase, | ||
exportFormat: 'strikethrough', | ||
exportTag: '~~', | ||
markdownFormatKind: 'strikethrough', | ||
regEx: /(~~)(\s*)([^~~]*)(\s*)(~~)()/, | ||
regExForAutoFormatting: /(~~)(\s*\b)([^~~]*)(\b\s*)(~~)(\s)$/ | ||
}; | ||
const markdownStrikethroughItalicBold = { ...autoFormatBase, | ||
markdownFormatKind: 'strikethrough_italic_bold', | ||
regEx: /(~~_\*\*)(\s*\b)([^~~_\*\*][^\*\*_~~]*)(\b\s*)(\*\*_~~)()/, | ||
regExForAutoFormatting: /(~~_\*\*)(\s*\b)([^~~_\*\*][^\*\*_~~]*)(\b\s*)(\*\*_~~)(\s)$/ | ||
}; | ||
const markdownItalicbold = { ...autoFormatBase, | ||
markdownFormatKind: 'italic_bold', | ||
regEx: /(_\*\*)(\s*\b)([^_\*\*][^\*\*_]*)(\b\s*)(\*\*_)/, | ||
regExForAutoFormatting: /(_\*\*)(\s*\b)([^_\*\*][^\*\*_]*)(\b\s*)(\*\*_)(\s)$/ | ||
}; | ||
const markdownStrikethroughItalic = { ...autoFormatBase, | ||
markdownFormatKind: 'strikethrough_italic', | ||
regEx: /(~~_)(\s*)([^~~_][^_~~]*)(\s*)(_~~)/, | ||
regExForAutoFormatting: /(~~_)(\s*)([^~~_][^_~~]*)(\s*)(_~~)(\s)$/ | ||
}; | ||
const markdownStrikethroughBold = { ...autoFormatBase, | ||
markdownFormatKind: 'strikethrough_bold', | ||
regEx: /(~~\*\*)(\s*\b)([^~~\*\*][^\*\*~~]*)(\b\s*)(\*\*~~)/, | ||
regExForAutoFormatting: /(~~\*\*)(\s*\b)([^~~\*\*][^\*\*~~]*)(\b\s*)(\*\*~~)(\s)$/ | ||
}; | ||
const markdownLink = { ...autoFormatBase, | ||
markdownFormatKind: 'link', | ||
regEx: /(\[)([^\]]*)(\]\()([^)]*)(\)*)()/, | ||
regExForAutoFormatting: /(\[)([^\]]*)(\]\()([^)]*)(\)*)(\s)$/ | ||
}; | ||
const allMarkdownCriteriaForTextNodes = [// Place the combination formats ahead of the individual formats. | ||
// Combos | ||
markdownStrikethroughItalicBold, markdownItalicbold, markdownStrikethroughItalic, markdownStrikethroughBold, // Individuals | ||
markdownInlineCode, markdownBold, markdownItalic, // Must appear after markdownBold | ||
markdownBold2, markdownItalic2, // Must appear after markdownBold2. | ||
fakeMarkdownUnderline, markdownStrikethrough, markdownLink]; | ||
const allMarkdownCriteriaForParagraphs = [markdownHeader1, markdownHeader2, markdownHeader3, markdownHeader4, markdownHeader5, markdownBlockQuote, markdownUnorderedListDash, markdownUnorderedListAsterisk, markdownOrderedList, markdownCodeBlock, markdownHorizontalRule, markdownHorizontalRuleUsingDashes]; | ||
const allMarkdownCriteria = [...allMarkdownCriteriaForParagraphs, ...allMarkdownCriteriaForTextNodes]; | ||
function getAllTriggers() { | ||
return triggers; | ||
return index; | ||
} | ||
function getAllMarkdownCriteriaForParagraphs() { | ||
return allMarkdownCriteriaForParagraphs; | ||
} | ||
function getAllMarkdownCriteriaForTextNodes() { | ||
return allMarkdownCriteriaForTextNodes; | ||
} | ||
function getAllMarkdownCriteria() { | ||
return allMarkdownCriteria; | ||
} | ||
function getInitialScanningContext(editor, isAutoFormatting, textNodeWithOffset, triggerState) { | ||
function transformersByType(transformers) { | ||
const byType = indexBy(transformers, t => t.type); | ||
return { | ||
currentElementNode: null, | ||
editor, | ||
isAutoFormatting, | ||
isWithinCodeBlock: false, | ||
joinedText: null, | ||
markdownCriteria: { | ||
markdownFormatKind: 'noTransformation', | ||
regEx: /(?:)/, | ||
// Empty reg ex. | ||
regExForAutoFormatting: /(?:)/, | ||
// Empty reg ex. | ||
requiresParagraphStart: null | ||
}, | ||
patternMatchResults: { | ||
regExCaptureGroups: [] | ||
}, | ||
textNodeWithOffset, | ||
triggerState | ||
// $FlowFixMe | ||
element: byType.element, | ||
// $FlowFixMe | ||
textFormat: byType['text-format'], | ||
// $FlowFixMe | ||
textMatch: byType['text-match'] | ||
}; | ||
} | ||
function resetScanningContext(scanningContext) { | ||
scanningContext.joinedText = null; | ||
scanningContext.markdownCriteria = { | ||
markdownFormatKind: 'noTransformation', | ||
regEx: /(?:)/, | ||
// Empty reg ex. | ||
regExForAutoFormatting: /(?:)/, | ||
// Empty reg ex. | ||
requiresParagraphStart: null | ||
}; | ||
scanningContext.patternMatchResults = { | ||
regExCaptureGroups: [] | ||
}; | ||
scanningContext.triggerState = null; | ||
scanningContext.textNodeWithOffset = null; | ||
return scanningContext; | ||
} | ||
function getCodeBlockCriteria() { | ||
return markdownCodeBlock; | ||
} | ||
function getPatternMatchResultsForCriteria(markdownCriteria, scanningContext, parentElementNode) { | ||
if (markdownCriteria.requiresParagraphStart === true) { | ||
return getPatternMatchResultsForParagraphs(markdownCriteria, scanningContext); | ||
} | ||
return getPatternMatchResultsForText(markdownCriteria, scanningContext, parentElementNode); | ||
} | ||
function getPatternMatchResultsForCodeBlock(scanningContext, text) { | ||
const markdownCriteria = getCodeBlockCriteria(); | ||
return getPatternMatchResultsWithRegEx(text, true, false, scanningContext.isAutoFormatting ? markdownCriteria.regExForAutoFormatting : markdownCriteria.regEx); | ||
} | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
* | ||
* | ||
*/ | ||
function createMarkdownExport(transformers) { | ||
const byType = transformersByType(transformers); // Export only uses text formats that are responsible for single format | ||
// e.g. it will filter out *** (bold, italic) and instead use separate ** and * | ||
function getPatternMatchResultsWithRegEx(textToSearch, matchMustAppearAtStartOfString, matchMustAppearAtEndOfString, regEx) { | ||
const patternMatchResults = { | ||
regExCaptureGroups: [] | ||
}; | ||
const regExMatches = textToSearch.match(regEx); | ||
const textFormatTransformers = byType.textFormat.filter(transformer => transformer.format.length === 1); | ||
return () => { | ||
const output = []; | ||
const children = lexical.$getRoot().getChildren(); | ||
if (regExMatches !== null && regExMatches.length > 0 && (matchMustAppearAtStartOfString === false || regExMatches.index === 0) && (matchMustAppearAtEndOfString === false || regExMatches.index + regExMatches[0].length === textToSearch.length)) { | ||
const captureGroupsCount = regExMatches.length; | ||
let runningLength = regExMatches.index; | ||
for (const child of children) { | ||
const result = exportTopLevelElements(child, byType.element, textFormatTransformers, byType.textMatch); | ||
for (let captureGroupIndex = 0; captureGroupIndex < captureGroupsCount; captureGroupIndex++) { | ||
const textContent = regExMatches[captureGroupIndex]; | ||
patternMatchResults.regExCaptureGroups.push({ | ||
offsetInParent: runningLength, | ||
text: textContent | ||
}); // The 0th capture group is special in that it's text contents is | ||
// a join of all subsequent capture groups. So, skip this group | ||
// when calculating the runningLength. | ||
if (captureGroupIndex > 0) { | ||
runningLength += textContent.length; | ||
if (result != null) { | ||
output.push(result); | ||
} | ||
} | ||
return patternMatchResults; | ||
} | ||
return null; | ||
return output.join('\n'); | ||
}; | ||
} | ||
function hasPatternMatchResults(scanningContext) { | ||
return scanningContext.patternMatchResults.regExCaptureGroups.length > 0; | ||
} | ||
function getTextNodeWithOffsetOrThrow(scanningContext) { | ||
const textNodeWithOffset = scanningContext.textNodeWithOffset; | ||
function exportTopLevelElements(node, elementTransformers, textTransformersIndex, textMatchTransformers) { | ||
for (const transformer of elementTransformers) { | ||
const result = transformer.export(node, _node => exportChildren(_node, textTransformersIndex, textMatchTransformers)); | ||
if (textNodeWithOffset == null) { | ||
{ | ||
throw Error(`Expect to have a text node with offset.`); | ||
if (result != null) { | ||
return result; | ||
} | ||
} | ||
return textNodeWithOffset; | ||
return lexical.$isElementNode(node) ? exportChildren(node, textTransformersIndex, textMatchTransformers) : null; | ||
} | ||
function getPatternMatchResultsForParagraphs(markdownCriteria, scanningContext) { | ||
const textNodeWithOffset = getTextNodeWithOffsetOrThrow(scanningContext); // At start of paragraph. | ||
function exportChildren(node, textTransformersIndex, textMatchTransformers) { | ||
const output = []; | ||
const children = node.getChildren(); | ||
if (textNodeWithOffset.node.getPreviousSibling() === null) { | ||
const textToSearch = textNodeWithOffset.node.getTextContent(); | ||
return getPatternMatchResultsWithRegEx(textToSearch, true, false, scanningContext.isAutoFormatting ? markdownCriteria.regExForAutoFormatting : markdownCriteria.regEx); | ||
} | ||
mainLoop: for (const child of children) { | ||
if (lexical.$isLineBreakNode(child)) { | ||
output.push('\n'); | ||
} else if (lexical.$isTextNode(child)) { | ||
output.push(exportTextFormat(child, child.getTextContent(), textTransformersIndex)); | ||
} else { | ||
for (const transformer of textMatchTransformers) { | ||
const result = transformer.export(child, parentNode => exportChildren(parentNode, textTransformersIndex, textMatchTransformers), (textNode, textContent) => exportTextFormat(textNode, textContent, textTransformersIndex)); | ||
return null; | ||
} | ||
if (result != null) { | ||
output.push(result); | ||
continue mainLoop; | ||
} | ||
} | ||
function getPatternMatchResultsForText(markdownCriteria, scanningContext, parentElementNode) { | ||
if (scanningContext.joinedText == null) { | ||
if (lexical.$isElementNode(parentElementNode)) { | ||
if (scanningContext.joinedText == null) { | ||
// Lazy calculate the text to search. | ||
scanningContext.joinedText = text.$joinTextNodesInElementNode(parentElementNode, SEPARATOR_BETWEEN_TEXT_AND_NON_TEXT_NODES, getTextNodeWithOffsetOrThrow(scanningContext)); | ||
if (lexical.$isElementNode(child)) { | ||
output.push(exportChildren(child, textTransformersIndex, textMatchTransformers)); | ||
} | ||
} else { | ||
{ | ||
throw Error(`Expected node ${parentElementNode.__key} to to be a ElementNode.`); | ||
} | ||
} | ||
} | ||
const matchMustAppearAtEndOfString = markdownCriteria.regExForAutoFormatting === true; | ||
return getPatternMatchResultsWithRegEx(scanningContext.joinedText, false, matchMustAppearAtEndOfString, scanningContext.isAutoFormatting ? markdownCriteria.regExForAutoFormatting : markdownCriteria.regEx); | ||
return output.join(''); | ||
} | ||
function getNewNodeForCriteria(scanningContext, element, createHorizontalRuleNode) { | ||
let newNode = null; | ||
const shouldDelete = false; | ||
const children = element.getChildren(); | ||
const markdownCriteria = scanningContext.markdownCriteria; | ||
const patternMatchResults = scanningContext.patternMatchResults; | ||
function exportTextFormat(node, textContent, textTransformers) { | ||
let output = textContent; | ||
const applied = new Set(); | ||
if (markdownCriteria.markdownFormatKind != null) { | ||
switch (markdownCriteria.markdownFormatKind) { | ||
case 'paragraphH1': | ||
{ | ||
newNode = richText.$createHeadingNode('h1'); | ||
newNode.append(...children); | ||
return { | ||
newNode, | ||
shouldDelete | ||
}; | ||
} | ||
for (const transformer of textTransformers) { | ||
const format = transformer.format[0]; | ||
const tag = transformer.tag; | ||
case 'paragraphH2': | ||
{ | ||
newNode = richText.$createHeadingNode('h2'); | ||
newNode.append(...children); | ||
return { | ||
newNode, | ||
shouldDelete | ||
}; | ||
} | ||
if (hasFormat(node, format) && !applied.has(format)) { | ||
// Multiple tags might be used for the same format (*, _) | ||
applied.add(format); // Prevent adding opening tag is already opened by the previous sibling | ||
case 'paragraphH3': | ||
{ | ||
newNode = richText.$createHeadingNode('h3'); | ||
newNode.append(...children); | ||
return { | ||
newNode, | ||
shouldDelete | ||
}; | ||
} | ||
const previousNode = getTextSibling(node, true); | ||
case 'paragraphH4': | ||
{ | ||
newNode = richText.$createHeadingNode('h4'); | ||
newNode.append(...children); | ||
return { | ||
newNode, | ||
shouldDelete | ||
}; | ||
} | ||
if (!hasFormat(previousNode, format)) { | ||
output = tag + output; | ||
} // Prevent adding closing tag if next sibling will do it | ||
case 'paragraphH5': | ||
{ | ||
newNode = richText.$createHeadingNode('h5'); | ||
newNode.append(...children); | ||
return { | ||
newNode, | ||
shouldDelete | ||
}; | ||
} | ||
case 'paragraphBlockQuote': | ||
{ | ||
newNode = richText.$createQuoteNode(); | ||
newNode.append(...children); | ||
return { | ||
newNode, | ||
shouldDelete | ||
}; | ||
} | ||
const nextNode = getTextSibling(node, false); | ||
case 'paragraphUnorderedList': | ||
{ | ||
createListOrMergeWithPrevious(element, children, patternMatchResults, 'ul'); | ||
return { | ||
newNode: null, | ||
shouldDelete: false | ||
}; | ||
} | ||
case 'paragraphOrderedList': | ||
{ | ||
const startAsString = patternMatchResults.regExCaptureGroups.length > 1 ? patternMatchResults.regExCaptureGroups[patternMatchResults.regExCaptureGroups.length - 1].text : '1'; // For conversion, don't use start number. | ||
// For short-cuts aka autoFormatting, use start number. | ||
// Later, this should be surface dependent and externalized. | ||
const start = scanningContext.isAutoFormatting ? parseInt(startAsString, 10) : undefined; | ||
createListOrMergeWithPrevious(element, children, patternMatchResults, 'ol', start); | ||
return { | ||
newNode: null, | ||
shouldDelete: false | ||
}; | ||
} | ||
case 'paragraphCodeBlock': | ||
{ | ||
// Toggle code and paragraph nodes. | ||
if (scanningContext.isAutoFormatting === false) { | ||
const shouldToggle = hasPatternMatchResults(scanningContext); | ||
if (shouldToggle) { | ||
scanningContext.isWithinCodeBlock = scanningContext.isWithinCodeBlock !== true; // When toggling, always clear the code block element node. | ||
scanningContext.currentElementNode = null; | ||
return { | ||
newNode: null, | ||
shouldDelete: true | ||
}; | ||
} | ||
if (scanningContext.isWithinCodeBlock) { | ||
// Create the code block and return it to the caller. | ||
if (scanningContext.currentElementNode == null) { | ||
const newCodeBlockNode = code.$createCodeNode(); | ||
newCodeBlockNode.append(...children); | ||
scanningContext.currentElementNode = newCodeBlockNode; | ||
return { | ||
newNode: newCodeBlockNode, | ||
shouldDelete: false | ||
}; | ||
} // Build up the code block with a line break and the children. | ||
if (scanningContext.currentElementNode != null) { | ||
const codeBlockNode = scanningContext.currentElementNode; | ||
const lineBreakNode = lexical.$createLineBreakNode(); | ||
codeBlockNode.append(lineBreakNode); | ||
if (children.length) { | ||
codeBlockNode.append(lineBreakNode); | ||
} | ||
codeBlockNode.append(...children); | ||
} | ||
} | ||
return { | ||
newNode: null, | ||
shouldDelete: true | ||
}; | ||
} | ||
if (scanningContext.triggerState != null && scanningContext.triggerState.isCodeBlock) { | ||
newNode = lexical.$createParagraphNode(); | ||
} else { | ||
newNode = code.$createCodeNode(); | ||
const codingLanguage = patternMatchResults.regExCaptureGroups.length >= 3 ? patternMatchResults.regExCaptureGroups[2].text : null; | ||
if (codingLanguage != null && codingLanguage.length > 0) { | ||
newNode.setLanguage(codingLanguage); | ||
} | ||
} | ||
newNode.append(...children); | ||
return { | ||
newNode, | ||
shouldDelete | ||
}; | ||
} | ||
case 'horizontalRule': | ||
{ | ||
if (createHorizontalRuleNode != null) { | ||
// return null for newNode. Insert the HR here. | ||
const horizontalRuleNode = createHorizontalRuleNode(); | ||
element.insertBefore(horizontalRuleNode); | ||
} | ||
break; | ||
} | ||
if (!hasFormat(nextNode, format)) { | ||
output += tag; | ||
} | ||
} | ||
} | ||
return { | ||
newNode, | ||
shouldDelete | ||
}; | ||
} | ||
return output; | ||
} // Get next or previous text sibling a text node, including cases | ||
// when it's a child of inline element (e.g. link) | ||
function createListOrMergeWithPrevious(element, children, patternMatchResults, tag, start) { | ||
const listItem = list.$createListItemNode(); | ||
const indentMatch = patternMatchResults.regExCaptureGroups[0].text.match(/^\s*/); | ||
const indent = indentMatch ? Math.floor(indentMatch[0].length / 4) : 0; | ||
listItem.append(...children); // Checking if previous element is a list, and if so append | ||
// new list item inside instead of creating new list | ||
const prevElement = element.getPreviousSibling(); | ||
function getTextSibling(node, backward) { | ||
let sibling = backward ? node.getPreviousSibling() : node.getNextSibling(); | ||
if (list.$isListNode(prevElement) && prevElement.getTag() === tag) { | ||
prevElement.append(listItem); | ||
element.remove(); | ||
} else { | ||
const list$1 = list.$createListNode(tag, start); | ||
list$1.append(listItem); | ||
element.replace(list$1); | ||
} | ||
if (!sibling) { | ||
const parent = node.getParentOrThrow(); | ||
if (indent) { | ||
listItem.setIndent(indent); | ||
if (parent.isInline()) { | ||
sibling = backward ? parent.getPreviousSibling() : parent.getNextSibling(); | ||
} | ||
} | ||
} | ||
function transformTextNodeForMarkdownCriteria(scanningContext, elementNode, createHorizontalRuleNode) { | ||
if (scanningContext.markdownCriteria.requiresParagraphStart === true) { | ||
transformTextNodeForElementNode(elementNode, scanningContext, createHorizontalRuleNode); | ||
} else { | ||
transformTextNodeForText(scanningContext, elementNode); | ||
} | ||
} | ||
while (sibling) { | ||
if (lexical.$isElementNode(sibling)) { | ||
if (!sibling.isInline()) { | ||
break; | ||
} | ||
function transformTextNodeForElementNode(elementNode, scanningContext, createHorizontalRuleNode) { | ||
if (scanningContext.textNodeWithOffset != null) { | ||
const textNodeWithOffset = getTextNodeWithOffsetOrThrow(scanningContext); | ||
const descendant = backward ? sibling.getLastDescendant() : sibling.getFirstDescendant(); | ||
if (hasPatternMatchResults(scanningContext)) { | ||
const text = scanningContext.patternMatchResults.regExCaptureGroups[0].text; // Remove the text which we matched. | ||
const textNode = textNodeWithOffset.node.spliceText(0, text.length, '', true); | ||
if (textNode.getTextContent() === '') { | ||
textNode.selectPrevious(); | ||
textNode.remove(); | ||
if (lexical.$isTextNode(descendant)) { | ||
return descendant; | ||
} else { | ||
sibling = backward ? sibling.getPreviousSibling() : sibling.getNextSibling(); | ||
} | ||
} | ||
} // Transform the current element kind to the new element kind. | ||
const { | ||
newNode, | ||
shouldDelete | ||
} = getNewNodeForCriteria(scanningContext, elementNode, createHorizontalRuleNode); | ||
if (shouldDelete) { | ||
elementNode.remove(); | ||
} else if (newNode !== null) { | ||
elementNode.replace(newNode); | ||
} | ||
} | ||
function transformTextNodeForText(scanningContext, parentElementNode) { | ||
const markdownCriteria = scanningContext.markdownCriteria; | ||
if (markdownCriteria.markdownFormatKind != null) { | ||
const formatting = getTextFormatType(markdownCriteria.markdownFormatKind); | ||
if (formatting != null) { | ||
transformTextNodeWithFormatting(formatting, scanningContext, parentElementNode); | ||
return; | ||
if (lexical.$isTextNode(sibling)) { | ||
return sibling; | ||
} | ||
if (markdownCriteria.markdownFormatKind === 'link') { | ||
transformTextNodeWithLink(scanningContext, parentElementNode); | ||
} | ||
} | ||
} | ||
function transformTextNodeWithFormatting(formatting, scanningContext, parentElementNode) { | ||
const patternMatchResults = scanningContext.patternMatchResults; | ||
const groupCount = patternMatchResults.regExCaptureGroups.length; | ||
if (groupCount !== 7) { | ||
// For BIUS and similar formats which have a pattern + text + pattern: | ||
// given '*italic* ' below are the capture groups by index: | ||
// 0. '*italic* ' | ||
// 1. '*' | ||
// 2. whitespace // typically this is "". | ||
// 3. 'italic' | ||
// 4. whitespace // typicallly this is "". | ||
// 5. '*' | ||
// 6. ' ' | ||
return; | ||
} // Remove unwanted text in reg ex pattern. | ||
// Remove group 5. | ||
removeTextByCaptureGroups(5, 5, scanningContext, parentElementNode); // Remove group 1. | ||
removeTextByCaptureGroups(1, 1, scanningContext, parentElementNode); // Apply the formatting. | ||
formatTextInCaptureGroupIndex(formatting, 3, scanningContext, parentElementNode); // Place caret at end of final capture group. | ||
selectAfterFinalCaptureGroup(scanningContext, parentElementNode); | ||
return null; | ||
} | ||
function transformTextNodeWithLink(scanningContext, parentElementNode) { | ||
const patternMatchResults = scanningContext.patternMatchResults; | ||
const regExCaptureGroups = patternMatchResults.regExCaptureGroups; | ||
const groupCount = regExCaptureGroups.length; | ||
if (groupCount !== 7) { | ||
// For links and similar formats which have: pattern + text + pattern + pattern2 text2 + pattern2: | ||
// Given '[title](url) ', below are the capture groups by index: | ||
// 0. '[title](url) ' | ||
// 1. '[' | ||
// 2. 'title' | ||
// 3. '](' | ||
// 4. 'url' | ||
// 5. ')' | ||
// 6. ' ' | ||
return; | ||
} | ||
const title = regExCaptureGroups[2].text; | ||
const url = regExCaptureGroups[4].text; | ||
if (title.length === 0 || url.length === 0) { | ||
return; | ||
} // Remove the initial pattern through to the final pattern. | ||
removeTextByCaptureGroups(1, 5, scanningContext, parentElementNode); | ||
insertTextPriorToCaptureGroup(1, // Insert at the beginning of the meaningful capture groups, namely index 1. Index 0 refers to the whole matched string. | ||
title, scanningContext, parentElementNode); | ||
const newSelectionForLink = createSelectionWithCaptureGroups(1, 1, false, true, scanningContext, parentElementNode); | ||
if (newSelectionForLink == null) { | ||
return; | ||
} | ||
lexical.$setSelection(newSelectionForLink); | ||
scanningContext.editor.dispatchCommand(link.TOGGLE_LINK_COMMAND, url); // Place caret at end of final capture group. | ||
selectAfterFinalCaptureGroup(scanningContext, parentElementNode); | ||
} // Below are lower level helper functions. | ||
function getParentElementNodeOrThrow(scanningContext) { | ||
return getTextNodeWithOffsetOrThrow(scanningContext).node.getParentOrThrow(); | ||
function hasFormat(node, format) { | ||
return lexical.$isTextNode(node) && node.hasFormat(format); | ||
} | ||
function getJoinedTextLength(patternMatchResults) { | ||
const groupCount = patternMatchResults.regExCaptureGroups.length; | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
* | ||
* | ||
*/ | ||
const CODE_BLOCK_REG_EXP = /^```(\w{1,10})?\s?$/; | ||
function createMarkdownImport(transformers) { | ||
const byType = transformersByType(transformers); | ||
const textFormatTransformersIndex = createTextFormatTransformersIndex(byType.textFormat); | ||
return markdownString => { | ||
const lines = markdownString.split('\n'); | ||
const linesLength = lines.length; | ||
const root = lexical.$getRoot(); | ||
root.clear(); | ||
if (groupCount < 2) { | ||
// Ignore capture group 0, as regEx defaults the 0th one to the entire matched string. | ||
return 0; | ||
} | ||
for (let i = 0; i < linesLength; i++) { | ||
const lineText = lines[i]; // Codeblocks are processed first as anything inside such block | ||
// is ignored for further processing | ||
// TODO: | ||
// Abstract it to be dynamic as other transformers (add multiline match option) | ||
const lastGroupIndex = groupCount - 1; | ||
return patternMatchResults.regExCaptureGroups[lastGroupIndex].offsetInParent + patternMatchResults.regExCaptureGroups[lastGroupIndex].text.length; | ||
} | ||
const [codeBlockNode, shiftedIndex] = importCodeBlock(lines, i, root); | ||
function getTextFormatType(markdownFormatKind) { | ||
switch (markdownFormatKind) { | ||
case 'italic': | ||
case 'bold': | ||
case 'underline': | ||
case 'strikethrough': | ||
case 'code': | ||
return [markdownFormatKind]; | ||
case 'strikethrough_italic_bold': | ||
{ | ||
return ['strikethrough', 'italic', 'bold']; | ||
if (codeBlockNode != null) { | ||
i = shiftedIndex; | ||
continue; | ||
} | ||
case 'italic_bold': | ||
{ | ||
return ['italic', 'bold']; | ||
} | ||
importBlocks(lineText, root, byType.element, textFormatTransformersIndex, byType.textMatch); | ||
} | ||
case 'strikethrough_italic': | ||
{ | ||
return ['strikethrough', 'italic']; | ||
} | ||
case 'strikethrough_bold': | ||
{ | ||
return ['strikethrough', 'bold']; | ||
} | ||
} | ||
return null; | ||
root.selectEnd(); | ||
}; | ||
} | ||
function createSelectionWithCaptureGroups(anchorCaptureGroupIndex, focusCaptureGroupIndex, startAtEndOfAnchor, finishAtEndOfFocus, scanningContext, parentElementNode) { | ||
const patternMatchResults = scanningContext.patternMatchResults; | ||
const regExCaptureGroups = patternMatchResults.regExCaptureGroups; | ||
const regExCaptureGroupsCount = regExCaptureGroups.length; | ||
function importBlocks(lineText, rootNode, elementTransformers, textFormatTransformersIndex, textMatchTransformers) { | ||
const textNode = lexical.$createTextNode(lineText); | ||
const elementNode = lexical.$createParagraphNode(); | ||
elementNode.append(textNode); | ||
rootNode.append(elementNode); | ||
if (anchorCaptureGroupIndex >= regExCaptureGroupsCount || focusCaptureGroupIndex >= regExCaptureGroupsCount) { | ||
return null; | ||
} | ||
for (const { | ||
regExp, | ||
replace | ||
} of elementTransformers) { | ||
const match = lineText.match(regExp); | ||
const joinedTextLength = getJoinedTextLength(patternMatchResults); | ||
const anchorCaptureGroupDetail = regExCaptureGroups[anchorCaptureGroupIndex]; | ||
const focusCaptureGroupDetail = regExCaptureGroups[focusCaptureGroupIndex]; | ||
const anchorLocation = startAtEndOfAnchor ? anchorCaptureGroupDetail.offsetInParent + anchorCaptureGroupDetail.text.length : anchorCaptureGroupDetail.offsetInParent; | ||
const focusLocation = finishAtEndOfFocus ? focusCaptureGroupDetail.offsetInParent + focusCaptureGroupDetail.text.length : focusCaptureGroupDetail.offsetInParent; | ||
const anchorTextNodeWithOffset = text.$findNodeWithOffsetFromJoinedText(anchorLocation, joinedTextLength, SEPARATOR_LENGTH, parentElementNode); | ||
const focusTextNodeWithOffset = text.$findNodeWithOffsetFromJoinedText(focusLocation, joinedTextLength, SEPARATOR_LENGTH, parentElementNode); | ||
if (anchorTextNodeWithOffset == null && focusTextNodeWithOffset == null && parentElementNode.getChildren().length === 0) { | ||
const emptyElementSelection = lexical.$createRangeSelection(); | ||
emptyElementSelection.anchor.set(parentElementNode.getKey(), 0, 'element'); | ||
emptyElementSelection.focus.set(parentElementNode.getKey(), 0, 'element'); | ||
return emptyElementSelection; | ||
if (match) { | ||
textNode.setTextContent(lineText.slice(match[0].length)); | ||
replace(elementNode, [textNode], match, true); | ||
break; | ||
} | ||
} | ||
if (anchorTextNodeWithOffset == null || focusTextNodeWithOffset == null) { | ||
return null; | ||
} | ||
const selection = lexical.$createRangeSelection(); | ||
selection.anchor.set(anchorTextNodeWithOffset.node.getKey(), anchorTextNodeWithOffset.offset, 'text'); | ||
selection.focus.set(focusTextNodeWithOffset.node.getKey(), focusTextNodeWithOffset.offset, 'text'); | ||
return selection; | ||
importTextFormatTransformers(textNode, textFormatTransformersIndex, textMatchTransformers); | ||
} | ||
function removeTextByCaptureGroups(anchorCaptureGroupIndex, focusCaptureGroupIndex, scanningContext, parentElementNode) { | ||
const patternMatchResults = scanningContext.patternMatchResults; | ||
const regExCaptureGroups = patternMatchResults.regExCaptureGroups; | ||
const newSelection = createSelectionWithCaptureGroups(anchorCaptureGroupIndex, focusCaptureGroupIndex, false, true, scanningContext, parentElementNode); | ||
function importCodeBlock(lines, startLineIndex, rootNode) { | ||
const openMatch = lines[startLineIndex].match(CODE_BLOCK_REG_EXP); | ||
if (newSelection != null) { | ||
lexical.$setSelection(newSelection); | ||
const currentSelection = lexical.$getSelection(); | ||
if (openMatch) { | ||
let endLineIndex = startLineIndex; | ||
const linesLength = lines.length; | ||
if (currentSelection != null && lexical.$isRangeSelection(currentSelection) && currentSelection.isCollapsed() === false) { | ||
currentSelection.removeText(); // Shift all group offsets and clear out group text. | ||
while (++endLineIndex < linesLength) { | ||
const closeMatch = lines[endLineIndex].match(CODE_BLOCK_REG_EXP); | ||
let runningLength = 0; | ||
const groupCount = regExCaptureGroups.length; | ||
for (let i = anchorCaptureGroupIndex; i < groupCount; i++) { | ||
const captureGroupDetail = regExCaptureGroups[i]; | ||
if (i > anchorCaptureGroupIndex) { | ||
captureGroupDetail.offsetInParent -= runningLength; | ||
} | ||
if (i <= focusCaptureGroupIndex) { | ||
runningLength += captureGroupDetail.text.length; | ||
captureGroupDetail.text = ''; | ||
} | ||
if (closeMatch) { | ||
const codeBlockNode = code.$createCodeNode(openMatch[1]); | ||
const textNode = lexical.$createTextNode(lines.slice(startLineIndex + 1, endLineIndex).join('\n')); | ||
codeBlockNode.append(textNode); | ||
rootNode.append(codeBlockNode); | ||
return [codeBlockNode, endLineIndex]; | ||
} | ||
} | ||
} | ||
} | ||
function insertTextPriorToCaptureGroup(captureGroupIndex, text, scanningContext, parentElementNode) { | ||
const patternMatchResults = scanningContext.patternMatchResults; | ||
const regExCaptureGroups = patternMatchResults.regExCaptureGroups; | ||
const regExCaptureGroupsCount = regExCaptureGroups.length; | ||
return [null, startLineIndex]; | ||
} // Processing text content and replaces text format tags. | ||
// It takes outermost tag match and its content, creates text node with | ||
// format based on tag and then recursively executed over node's content | ||
// | ||
// E.g. for "*Hello **world**!*" string it will create text node with | ||
// "Hello **world**!" content and italic format and run recursively over | ||
// its content to transform "**world**" part | ||
if (captureGroupIndex >= regExCaptureGroupsCount) { | ||
function importTextFormatTransformers(textNode, textFormatTransformersIndex, textMatchTransformers) { | ||
const textContent = textNode.getTextContent(); | ||
const match = findOutermostMatch(textContent, textFormatTransformersIndex); | ||
if (!match) { | ||
// Once text format processing is done run text match transformers, as it | ||
// only can span within single text node (unline formats that can cover multiple nodes) | ||
importTextMatchTransformers(textNode, textMatchTransformers); | ||
return; | ||
} | ||
const captureGroupDetail = regExCaptureGroups[captureGroupIndex]; | ||
const newCaptureGroupDetail = { | ||
offsetInParent: captureGroupDetail.offsetInParent, | ||
text | ||
}; | ||
const newSelection = createSelectionWithCaptureGroups(captureGroupIndex, captureGroupIndex, false, false, scanningContext, parentElementNode); | ||
let currentNode, remainderNode; // If matching full content there's no need to run splitText and can reuse existing textNode | ||
// to update its content and apply format. E.g. for **_Hello_** string after applying bold | ||
// format (**) it will reuse the same text node to apply italic (_) | ||
if (newSelection != null) { | ||
lexical.$setSelection(newSelection); | ||
const currentSelection = lexical.$getSelection(); | ||
if (match[0] === textContent) { | ||
currentNode = textNode; | ||
} else { | ||
const startIndex = match.index; | ||
const endIndex = startIndex + match[0].length; | ||
if (currentSelection != null && lexical.$isRangeSelection(currentSelection) && currentSelection.isCollapsed()) { | ||
currentSelection.insertText(newCaptureGroupDetail.text); // Update the capture groups. | ||
if (startIndex === 0) { | ||
[currentNode, remainderNode] = textNode.splitText(endIndex); | ||
} else { | ||
[, currentNode, remainderNode] = textNode.splitText(startIndex, endIndex); | ||
} | ||
} | ||
regExCaptureGroups.splice(captureGroupIndex, 0, newCaptureGroupDetail); | ||
const textLength = newCaptureGroupDetail.text.length; | ||
const newGroupCount = regExCaptureGroups.length; | ||
currentNode.setTextContent(match[2]); | ||
const transformer = textFormatTransformersIndex.transformersByTag[match[1]]; | ||
for (let i = captureGroupIndex + 1; i < newGroupCount; i++) { | ||
const currentCaptureGroupDetail = regExCaptureGroups[i]; | ||
currentCaptureGroupDetail.offsetInParent += textLength; | ||
if (transformer) { | ||
for (const format of transformer.format) { | ||
if (!currentNode.hasFormat(format)) { | ||
currentNode.toggleFormat(format); | ||
} | ||
} | ||
} | ||
} | ||
} // Recursively run over inner text if it's not inline code | ||
function formatTextInCaptureGroupIndex(formatTypes, captureGroupIndex, scanningContext, parentElementNode) { | ||
const patternMatchResults = scanningContext.patternMatchResults; | ||
const regExCaptureGroups = patternMatchResults.regExCaptureGroups; | ||
const regExCaptureGroupsCount = regExCaptureGroups.length; | ||
if (!(captureGroupIndex < regExCaptureGroupsCount)) { | ||
throw Error(`The capture group count in the RegEx does match the actual capture group count.`); | ||
} | ||
if (!currentNode.hasFormat('code')) { | ||
importTextFormatTransformers(currentNode, textFormatTransformersIndex, textMatchTransformers); | ||
} // Run over remaining text if any | ||
const captureGroupDetail = regExCaptureGroups[captureGroupIndex]; | ||
if (captureGroupDetail.text.length === 0) { | ||
return; | ||
if (remainderNode) { | ||
importTextFormatTransformers(remainderNode, textFormatTransformersIndex, textMatchTransformers); | ||
} | ||
} | ||
const newSelection = createSelectionWithCaptureGroups(captureGroupIndex, captureGroupIndex, false, true, scanningContext, parentElementNode); | ||
function importTextMatchTransformers(textNode_, textMatchTransformers) { | ||
let textNode = textNode_; | ||
if (newSelection != null) { | ||
lexical.$setSelection(newSelection); | ||
const currentSelection = lexical.$getSelection(); | ||
mainLoop: while (textNode) { | ||
for (const transformer of textMatchTransformers) { | ||
const match = textNode.getTextContent().match(transformer.importRegExp); | ||
if (lexical.$isRangeSelection(currentSelection)) { | ||
for (let i = 0; i < formatTypes.length; i++) { | ||
currentSelection.formatText(formatTypes[i]); | ||
if (!match) { | ||
continue; | ||
} | ||
} | ||
} | ||
} // Place caret at end of final capture group. | ||
const startIndex = match.index; | ||
const endIndex = startIndex + match[0].length; | ||
let replaceNode; | ||
function selectAfterFinalCaptureGroup(scanningContext, parentElementNode) { | ||
const patternMatchResults = scanningContext.patternMatchResults; | ||
const groupCount = patternMatchResults.regExCaptureGroups.length; | ||
if (startIndex === 0) { | ||
[replaceNode, textNode] = textNode.splitText(endIndex); | ||
} else { | ||
[, replaceNode, textNode] = textNode.splitText(startIndex, endIndex); | ||
} | ||
if (groupCount < 2) { | ||
// Ignore capture group 0, as regEx defaults the 0th one to the entire matched string. | ||
return; | ||
} | ||
transformer.replace(replaceNode, match); | ||
continue mainLoop; | ||
} | ||
const lastGroupIndex = groupCount - 1; | ||
const newSelection = createSelectionWithCaptureGroups(lastGroupIndex, lastGroupIndex, true, true, scanningContext, parentElementNode); | ||
if (newSelection != null) { | ||
lexical.$setSelection(newSelection); | ||
break; | ||
} | ||
} | ||
} // Finds first "<tag>content<tag>" match that is not nested into another tag | ||
function createHeadingExport(level) { | ||
return (node, exportChildren) => { | ||
return richText.$isHeadingNode(node) && node.getTag() === 'h' + level ? '#'.repeat(level) + ' ' + exportChildren(node) : null; | ||
}; | ||
} | ||
function listExport(node, exportChildren) { | ||
return list.$isListNode(node) ? processNestedLists(node, exportChildren, 0) : null; | ||
} // TODO: should be param | ||
function findOutermostMatch(textContent, textTransformersIndex) { | ||
const openTagsMatch = textContent.match(textTransformersIndex.openTagsRegExp); | ||
if (openTagsMatch == null) { | ||
return null; | ||
} | ||
const LIST_INDENT_SIZE = 4; | ||
for (const match of openTagsMatch) { | ||
// Open tags reg exp might capture leading space so removing it | ||
// before using match to find transformer | ||
const fullMatchRegExp = textTransformersIndex.fullMatchRegExpByTag[match.replace(/^\s/, '')]; | ||
function processNestedLists(listNode, exportChildren, depth) { | ||
const output = []; | ||
const children = listNode.getChildren(); | ||
let index = 0; | ||
if (fullMatchRegExp == null) { | ||
continue; | ||
} | ||
for (const listItemNode of children) { | ||
if (list.$isListItemNode(listItemNode)) { | ||
if (listItemNode.getChildrenSize() === 1) { | ||
const firstChild = listItemNode.getFirstChild(); | ||
const fullMatch = textContent.match(fullMatchRegExp); | ||
if (list.$isListNode(firstChild)) { | ||
output.push(processNestedLists(firstChild, exportChildren, depth + 1)); | ||
continue; | ||
} | ||
} | ||
const indent = ' '.repeat(depth * LIST_INDENT_SIZE); | ||
const prefix = listNode.getTag() === 'ul' ? '- ' : `${listNode.getStart() + index}. `; | ||
output.push(indent + prefix + exportChildren(listItemNode)); | ||
index++; | ||
if (fullMatch != null) { | ||
return fullMatch; | ||
} | ||
} | ||
return output.join('\n'); | ||
return null; | ||
} | ||
function blockQuoteExport(node, exportChildren) { | ||
return richText.$isQuoteNode(node) ? '> ' + exportChildren(node) : null; | ||
} | ||
function createTextFormatTransformersIndex(textTransformers) { | ||
const transformersByTag = {}; | ||
const fullMatchRegExpByTag = {}; | ||
const openTagsRegExp = []; | ||
function codeBlockExport(node, exportChildren) { | ||
if (!code.$isCodeNode(node)) { | ||
return null; | ||
for (const transformer of textTransformers) { | ||
const { | ||
tag | ||
} = transformer; | ||
transformersByTag[tag] = transformer; | ||
const tagRegExp = tag.replace(/(\*|\^)/g, '\\$1'); | ||
openTagsRegExp.push(tagRegExp); | ||
fullMatchRegExpByTag[tag] = new RegExp(`(${tagRegExp})(?![${tagRegExp}\\s])(.*?[^${tagRegExp}\\s])${tagRegExp}(?!${tagRegExp})`); | ||
} | ||
const textContent = node.getTextContent(); | ||
return '```' + (node.getLanguage() || '') + (textContent ? '\n' + textContent : '') + '\n' + '```'; | ||
return { | ||
// Reg exp to find open tag + content + close tag | ||
fullMatchRegExpByTag, | ||
// Reg exp to find opening tags | ||
openTagsRegExp: new RegExp('(' + openTagsRegExp.join('|') + ')', 'g'), | ||
transformersByTag | ||
}; | ||
} | ||
@@ -942,380 +420,267 @@ | ||
function getTextNodeForAutoFormatting(selection) { | ||
if (!lexical.$isRangeSelection(selection)) { | ||
return null; | ||
function runElementTransformers(parentNode, anchorNode, anchorOffset, elementTransformers) { | ||
const grandParentNode = parentNode.getParent(); | ||
if (!lexical.$isRootNode(grandParentNode) || parentNode.getFirstChild() !== anchorNode) { | ||
return false; | ||
} | ||
const node = selection.anchor.getNode(); | ||
const textContent = anchorNode.getTextContent(); // Checking for anchorOffset position to prevent any checks for cases when caret is too far | ||
// from a line start to be a part of block-level markdown trigger. | ||
// | ||
// TODO: | ||
// Can have a quick check if caret is close enough to the beginning of the string (e.g. offset less than 10-20) | ||
// since otherwise it won't be a markdown shortcut, but tables are exception | ||
if (!lexical.$isTextNode(node)) { | ||
return null; | ||
if (textContent[anchorOffset - 1] !== ' ') { | ||
return false; | ||
} | ||
return { | ||
node, | ||
offset: selection.anchor.offset | ||
}; | ||
} | ||
for (const { | ||
regExp, | ||
replace | ||
} of elementTransformers) { | ||
const match = textContent.match(regExp); | ||
function updateAutoFormatting(editor, scanningContext, createHorizontalRuleNode) { | ||
editor.update(() => { | ||
const elementNode = getTextNodeWithOffsetOrThrow(scanningContext).node.getParentOrThrow(); | ||
transformTextNodeForMarkdownCriteria(scanningContext, elementNode, createHorizontalRuleNode); | ||
}, { | ||
tag: 'history-push' | ||
}); | ||
} | ||
function getCriteriaWithPatternMatchResults(markdownCriteriaArray, scanningContext) { | ||
const currentTriggerState = scanningContext.triggerState; | ||
const count = markdownCriteriaArray.length; | ||
for (let i = 0; i < count; i++) { | ||
const markdownCriteria = markdownCriteriaArray[i]; // Skip code block nodes, unless the autoFormatKind calls for toggling the code block. | ||
if (currentTriggerState != null && currentTriggerState.isCodeBlock === false || markdownCriteria.markdownFormatKind === 'paragraphCodeBlock') { | ||
const patternMatchResults = getPatternMatchResultsForCriteria(markdownCriteria, scanningContext, getParentElementNodeOrThrow(scanningContext)); | ||
if (patternMatchResults != null) { | ||
return { | ||
markdownCriteria, | ||
patternMatchResults | ||
}; | ||
} | ||
if (match && match[0].length === anchorOffset) { | ||
const nextSiblings = anchorNode.getNextSiblings(); | ||
const [leadingNode, remainderNode] = anchorNode.splitText(anchorOffset); | ||
leadingNode.remove(); | ||
const siblings = remainderNode ? [remainderNode, ...nextSiblings] : nextSiblings; | ||
replace(parentNode, siblings, match, false); | ||
return true; | ||
} | ||
} | ||
return { | ||
markdownCriteria: null, | ||
patternMatchResults: null | ||
}; | ||
return false; | ||
} | ||
function findScanningContextWithValidMatch(editor, currentTriggerState) { | ||
let scanningContext = null; | ||
editor.getEditorState().read(() => { | ||
const textNodeWithOffset = getTextNodeForAutoFormatting(lexical.$getSelection()); | ||
function runTextMatchTransformers(anchorNode, anchorOffset, transformersByTrigger) { | ||
let textContent = anchorNode.getTextContent(); | ||
const lastChar = textContent[anchorOffset - 1]; | ||
const transformers = transformersByTrigger[lastChar]; | ||
if (textNodeWithOffset === null) { | ||
return; | ||
} // Please see the declaration of ScanningContext for a detailed explanation. | ||
if (transformers == null) { | ||
return false; | ||
} // If typing in the middle of content, remove the tail to do | ||
// reg exp match up to a string end (caret position) | ||
const initialScanningContext = getInitialScanningContext(editor, true, textNodeWithOffset, currentTriggerState); | ||
const criteriaWithPatternMatchResults = getCriteriaWithPatternMatchResults( // Do not apply paragraph node changes like blockQuote or H1 to listNodes. Also, do not attempt to transform a list into a list using * or -. | ||
currentTriggerState.isParentAListItemNode === false ? getAllMarkdownCriteria() : getAllMarkdownCriteriaForTextNodes(), initialScanningContext); | ||
if (anchorOffset < textContent.length) { | ||
textContent = textContent.slice(0, anchorOffset); | ||
} | ||
if (criteriaWithPatternMatchResults.markdownCriteria === null || criteriaWithPatternMatchResults.patternMatchResults === null) { | ||
return; | ||
for (const transformer of transformers) { | ||
const match = textContent.match(transformer.regExp); | ||
if (match === null) { | ||
continue; | ||
} | ||
scanningContext = initialScanningContext; // Lazy fill-in the particular format criteria and any matching result information. | ||
const startIndex = match.index; | ||
const endIndex = startIndex + match[0].length; | ||
let replaceNode; | ||
scanningContext.markdownCriteria = criteriaWithPatternMatchResults.markdownCriteria; | ||
scanningContext.patternMatchResults = criteriaWithPatternMatchResults.patternMatchResults; | ||
}); | ||
return scanningContext; | ||
} | ||
function getTriggerState(editorState) { | ||
let criteria = null; | ||
editorState.read(() => { | ||
const selection = lexical.$getSelection(); | ||
if (!lexical.$isRangeSelection(selection) || !selection.isCollapsed()) { | ||
return; | ||
if (startIndex === 0) { | ||
[replaceNode] = anchorNode.splitText(endIndex); | ||
} else { | ||
[, replaceNode] = anchorNode.splitText(startIndex, endIndex); | ||
} | ||
const node = selection.anchor.getNode(); | ||
const parentNode = node.getParent(); | ||
const isParentAListItemNode = list.$isListItemNode(parentNode); | ||
const hasParentNode = parentNode !== null; | ||
criteria = { | ||
anchorOffset: selection.anchor.offset, | ||
hasParentNode, | ||
isCodeBlock: code.$isCodeNode(node), | ||
isParentAListItemNode, | ||
isSelectionCollapsed: true, | ||
isSimpleText: lexical.$isTextNode(node) && node.isSimpleText(), | ||
nodeKey: node.getKey(), | ||
textContent: node.getTextContent() | ||
}; | ||
}); | ||
return criteria; | ||
} | ||
function findScanningContext(editor, currentTriggerState, priorTriggerState) { | ||
if (currentTriggerState == null || priorTriggerState == null) { | ||
return null; | ||
replaceNode.selectNext(); | ||
transformer.replace(replaceNode, match); | ||
return true; | ||
} | ||
const triggerArray = getAllTriggers(); | ||
const triggerCount = triggers.length; | ||
return false; | ||
} | ||
for (let ti = 0; ti < triggerCount; ti++) { | ||
const triggerString = triggerArray[ti].triggerString; // The below checks needs to execute relativey quickly, so perform the light-weight ones first. | ||
// The substr check is a quick way to avoid autoformat parsing in that it looks for the autoformat | ||
// trigger which is the trigger string (" "). | ||
function runTextFormatTransformers(editor, anchorNode, anchorOffset, textFormatTransformers) { | ||
const textContent = anchorNode.getTextContent(); | ||
const closeTagEndIndex = anchorOffset - 1; | ||
const closeChar = textContent[closeTagEndIndex]; // Quick check if we're possibly at the end of inline markdown style | ||
const triggerStringLength = triggerString.length; | ||
const currentTextContentLength = currentTriggerState.textContent.length; | ||
const triggerOffset = currentTriggerState.anchorOffset - triggerStringLength; // Todo: these checks help w/ performance, yet we can do more. | ||
// We might consider looking for ** + space or __ + space and so on to boost performance | ||
// even further. Make sure the patter is driven from the trigger state type. | ||
const matchers = textFormatTransformers[closeChar]; | ||
if ((currentTriggerState.hasParentNode === true && currentTriggerState.isSimpleText && currentTriggerState.isSelectionCollapsed && currentTriggerState.anchorOffset !== priorTriggerState.anchorOffset && triggerOffset >= 0 && triggerOffset + triggerStringLength <= currentTextContentLength && currentTriggerState.textContent.substr(triggerOffset, triggerStringLength) === triggerString && // Some code differentiation needed if trigger kind is not a simple space character. | ||
currentTriggerState.textContent !== priorTriggerState.textContent) === false) { | ||
return null; | ||
} | ||
if (!matchers) { | ||
return false; | ||
} | ||
return findScanningContextWithValidMatch(editor, currentTriggerState); | ||
} | ||
for (const matcher of matchers) { | ||
const { | ||
tag | ||
} = matcher; | ||
const tagLength = tag.length; | ||
const closeTagStartIndex = closeTagEndIndex - tagLength + 1; // If tag is not single char check if rest of it matches with text content | ||
/** | ||
* Copyright (c) Meta Platforms, Inc. and affiliates. | ||
* | ||
* This source code is licensed under the MIT license found in the | ||
* LICENSE file in the root directory of this source tree. | ||
* | ||
* | ||
*/ | ||
function convertStringToLexical(text, editor) { | ||
if (!text.length) { | ||
return null; | ||
} | ||
if (tagLength > 1) { | ||
if (!isEqualSubString(textContent, closeTagStartIndex, tag, 0, tagLength)) { | ||
continue; | ||
} | ||
} // Space before closing tag cancels inline markdown | ||
const nodes = []; | ||
const splitLines = text.split('\n'); | ||
const splitLinesCount = splitLines.length; | ||
for (let i = 0; i < splitLinesCount; i++) { | ||
if (splitLines[i].length > 0) { | ||
nodes.push(lexical.$createParagraphNode().append(lexical.$createTextNode(splitLines[i]))); | ||
} else { | ||
nodes.push(lexical.$createParagraphNode()); | ||
if (textContent[closeTagStartIndex - 1] === ' ') { | ||
continue; | ||
} | ||
} | ||
if (nodes.length) { | ||
const root = lexical.$getRoot(); | ||
root.clear(); | ||
root.append(...nodes); | ||
return root; | ||
} | ||
const closeNode = anchorNode; | ||
let openNode = closeNode; | ||
let openTagStartIndex = getOpenTagStartIndex(textContent, closeTagStartIndex, tag); // Go through text node siblings and search for opening tag | ||
// if haven't found it within the same text node as closing tag | ||
return null; | ||
} | ||
function convertMarkdownForElementNodes(editor, createHorizontalRuleNode) { | ||
// Please see the declaration of ScanningContext for a detailed explanation. | ||
const scanningContext = getInitialScanningContext(editor, false, null, null); | ||
const root = lexical.$getRoot(); | ||
let done = false; | ||
let startIndex = 0; // Handle the paragraph level markdown. | ||
let sibling = openNode; | ||
while (!done) { | ||
done = true; | ||
const elementNodes = root.getChildren(); | ||
const countOfElementNodes = elementNodes.length; | ||
for (let i = startIndex; i < countOfElementNodes; i++) { | ||
const elementNode = elementNodes[i]; | ||
if (lexical.$isElementNode(elementNode)) { | ||
convertParagraphLevelMarkdown(scanningContext, elementNode, createHorizontalRuleNode); | ||
} // Reset the scanning information that relates to the particular element node. | ||
resetScanningContext(scanningContext); | ||
if (root.getChildren().length !== countOfElementNodes) { | ||
// The conversion added or removed an from root's children. | ||
startIndex = i; | ||
done = false; | ||
while (openTagStartIndex < 0 && (sibling = sibling.getPreviousSibling())) { | ||
if (lexical.$isLineBreakNode(sibling)) { | ||
break; | ||
} | ||
} | ||
} // while | ||
if (lexical.$isTextNode(sibling)) { | ||
const siblingTextContent = sibling.getTextContent(); | ||
openNode = sibling; | ||
openTagStartIndex = getOpenTagStartIndex(siblingTextContent, siblingTextContent.length, tag); | ||
} | ||
} // Opening tag is not found | ||
done = false; | ||
startIndex = 0; // Handle the text level markdown. | ||
while (!done) { | ||
done = true; | ||
const elementNodes = root.getChildren(); | ||
const countOfElementNodes = elementNodes.length; | ||
if (openTagStartIndex < 0) { | ||
continue; | ||
} // No content between opening and closing tag | ||
for (let i = startIndex; i < countOfElementNodes; i++) { | ||
const elementNode = elementNodes[i]; | ||
if (lexical.$isElementNode(elementNode)) { | ||
convertTextLevelMarkdown(scanningContext, elementNode, createHorizontalRuleNode); | ||
} // Reset the scanning information that relates to the particular element node. | ||
if (openNode === closeNode && openTagStartIndex + tagLength === closeTagStartIndex) { | ||
continue; | ||
} // Checking longer tags for repeating chars (e.g. *** vs **) | ||
resetScanningContext(scanningContext); | ||
} | ||
} // while | ||
const prevOpenNodeText = openNode.getTextContent(); | ||
} | ||
if (openTagStartIndex > 0 && prevOpenNodeText[openTagStartIndex - 1] === closeChar) { | ||
continue; | ||
} // Clean text from opening and closing tags (starting from closing tag | ||
// to prevent any offset shifts if we start from opening one) | ||
function convertParagraphLevelMarkdown(scanningContext, elementNode, createHorizontalRuleNode) { | ||
const textContent = elementNode.getTextContent(); // Handle paragraph nodes below. | ||
if (lexical.$isParagraphNode(elementNode)) { | ||
const paragraphNode = elementNode; | ||
const firstChild = paragraphNode.getFirstChild(); | ||
const firstChildIsTextNode = lexical.$isTextNode(firstChild); // Handle conversion to code block. | ||
const prevCloseNodeText = closeNode.getTextContent(); | ||
const closeNodeText = prevCloseNodeText.slice(0, closeTagStartIndex) + prevCloseNodeText.slice(closeTagEndIndex + 1); | ||
closeNode.setTextContent(closeNodeText); | ||
const openNodeText = openNode === closeNode ? closeNodeText : prevOpenNodeText; | ||
openNode.setTextContent(openNodeText.slice(0, openTagStartIndex) + openNodeText.slice(openTagStartIndex + tagLength)); | ||
const nextSelection = lexical.$createRangeSelection(); | ||
lexical.$setSelection(nextSelection); // Adjust offset based on deleted chars | ||
if (scanningContext.isWithinCodeBlock === true) { | ||
if (firstChild != null && firstChildIsTextNode) { | ||
// Test if we encounter ending code block. | ||
scanningContext.textNodeWithOffset = { | ||
node: firstChild, | ||
offset: 0 | ||
}; | ||
const patternMatchResults = getPatternMatchResultsForCodeBlock(scanningContext, textContent); | ||
const newOffset = closeTagEndIndex - tagLength * (openNode === closeNode ? 2 : 1) + 1; | ||
nextSelection.anchor.set(openNode.__key, openTagStartIndex, 'text'); | ||
nextSelection.focus.set(closeNode.__key, newOffset, 'text'); // Apply formatting to selected text | ||
if (patternMatchResults != null) { | ||
// Toggle transform to or from code block. | ||
scanningContext.patternMatchResults = patternMatchResults; | ||
} | ||
for (const format of matcher.format) { | ||
if (!nextSelection.hasFormat(format)) { | ||
nextSelection.formatText(format); | ||
} | ||
} // Collapse selection up to the focus point | ||
scanningContext.markdownCriteria = getCodeBlockCriteria(); // Perform text transformation here. | ||
transformTextNodeForMarkdownCriteria(scanningContext, elementNode, createHorizontalRuleNode); | ||
return; | ||
} | ||
nextSelection.anchor.set(nextSelection.focus.key, nextSelection.focus.offset, nextSelection.focus.type); // Remove formatting from collapsed selection | ||
if (elementNode.getChildren().length) { | ||
const allCriteria = getAllMarkdownCriteriaForParagraphs(); | ||
const count = allCriteria.length; | ||
scanningContext.joinedText = paragraphNode.getTextContent(); | ||
if (!(firstChild != null && firstChildIsTextNode)) { | ||
throw Error(`Expect paragraph containing only text nodes.`); | ||
for (const format of matcher.format) { | ||
if (nextSelection.hasFormat(format)) { | ||
nextSelection.toggleFormat(format); | ||
} | ||
} | ||
scanningContext.textNodeWithOffset = { | ||
node: firstChild, | ||
offset: 0 | ||
}; | ||
return true; | ||
} | ||
for (let i = 0; i < count; i++) { | ||
const criteria = allCriteria[i]; | ||
return false; | ||
} | ||
if (criteria.requiresParagraphStart === false) { | ||
return; | ||
} | ||
function getOpenTagStartIndex(string, maxIndex, tag) { | ||
const tagLength = tag.length; | ||
const patternMatchResults = getPatternMatchResultsForCriteria(criteria, scanningContext, getParentElementNodeOrThrow(scanningContext)); | ||
for (let i = maxIndex; i >= tagLength; i--) { | ||
const startIndex = i - tagLength; | ||
if (patternMatchResults != null) { | ||
scanningContext.markdownCriteria = criteria; | ||
scanningContext.patternMatchResults = patternMatchResults; // Perform text transformation here. | ||
transformTextNodeForMarkdownCriteria(scanningContext, elementNode, createHorizontalRuleNode); | ||
return; | ||
} | ||
} | ||
if (isEqualSubString(string, startIndex, tag, 0, tagLength) && // Space after opening tag cancels transformation | ||
string[startIndex + tagLength] !== ' ') { | ||
return startIndex; | ||
} | ||
} | ||
return -1; | ||
} | ||
function convertTextLevelMarkdown(scanningContext, elementNode, createHorizontalRuleNode) { | ||
const firstChild = elementNode.getFirstChild(); | ||
if (lexical.$isTextNode(firstChild)) { | ||
// This function will convert all text nodes within the elementNode. | ||
convertMarkdownForTextCriteria(scanningContext, elementNode, createHorizontalRuleNode); | ||
return; | ||
} // Handle the case where the elementNode has child elementNodes like lists. | ||
// Since we started at a text import, we don't need to worry about anything but textNodes. | ||
const children = elementNode.getChildren(); | ||
const countOfChildren = children.length; | ||
for (let i = 0; i < countOfChildren; i++) { | ||
const node = children[i]; | ||
if (lexical.$isElementNode(node)) { | ||
// Recurse down until we find a text node. | ||
convertTextLevelMarkdown(scanningContext, node, createHorizontalRuleNode); | ||
function isEqualSubString(stringA, aStart, stringB, bStart, length) { | ||
for (let i = 0; i < length; i++) { | ||
if (stringA[aStart + i] !== stringB[bStart + i]) { | ||
return false; | ||
} | ||
} | ||
return true; | ||
} | ||
function convertMarkdownForTextCriteria(scanningContext, elementNode, createHorizontalRuleNode) { | ||
resetScanningContext(scanningContext); // Cycle through all the criteria and convert all text patterns in the parent element. | ||
function registerMarkdownShortcuts(editor, transformers) { | ||
const byType = transformersByType(transformers); | ||
const textFormatTransformersIndex = indexBy(byType.textFormat, ({ | ||
tag | ||
}) => tag[tag.length - 1]); | ||
const textMatchTransformersIndex = indexBy(byType.textMatch, ({ | ||
trigger | ||
}) => trigger); | ||
const allCriteria = getAllMarkdownCriteriaForTextNodes(); | ||
const count = allCriteria.length; | ||
let textContent = elementNode.getTextContent(); | ||
let done = textContent.length === 0; | ||
let startIndex = 0; | ||
const transform = (parentNode, anchorNode, anchorOffset) => { | ||
if (runElementTransformers(parentNode, anchorNode, anchorOffset, byType.element)) { | ||
return; | ||
} | ||
while (!done) { | ||
done = true; | ||
if (runTextMatchTransformers(anchorNode, anchorOffset, textMatchTransformersIndex)) { | ||
return; | ||
} | ||
for (let i = startIndex; i < count; i++) { | ||
const criteria = allCriteria[i]; | ||
runTextFormatTransformers(editor, anchorNode, anchorOffset, textFormatTransformersIndex); | ||
}; | ||
if (scanningContext.textNodeWithOffset == null) { | ||
// Need to search through the very last text node in the element. | ||
const lastTextNode = getLastTextNodeInElementNode(elementNode); | ||
return editor.registerUpdateListener(({ | ||
tags, | ||
dirtyLeaves, | ||
editorState, | ||
prevEditorState | ||
}) => { | ||
// Ignore updates from undo/redo (as changes already calculated) | ||
if (tags.has('historic')) { | ||
return; | ||
} | ||
if (lastTextNode == null) { | ||
// If we have no more text nodes, then there's nothing to search and transform. | ||
return; | ||
} | ||
const selection = editorState.read(lexical.$getSelection); | ||
const prevSelection = prevEditorState.read(lexical.$getSelection); | ||
scanningContext.textNodeWithOffset = { | ||
node: lastTextNode, | ||
offset: lastTextNode.getTextContent().length | ||
}; | ||
} | ||
if (!lexical.$isRangeSelection(prevSelection) || !lexical.$isRangeSelection(selection) || !selection.isCollapsed()) { | ||
return; | ||
} | ||
const patternMatchResults = getPatternMatchResultsForCriteria(criteria, scanningContext, elementNode); | ||
const anchorKey = selection.anchor.key; | ||
const anchorOffset = selection.anchor.offset; | ||
if (patternMatchResults != null) { | ||
scanningContext.markdownCriteria = criteria; | ||
scanningContext.patternMatchResults = patternMatchResults; // Perform text transformation here. | ||
const anchorNode = editorState._nodeMap.get(anchorKey); | ||
transformTextNodeForMarkdownCriteria(scanningContext, elementNode, createHorizontalRuleNode); | ||
resetScanningContext(scanningContext); | ||
const currentTextContent = elementNode.getTextContent(); | ||
if (!lexical.$isTextNode(anchorNode) || !dirtyLeaves.has(anchorKey) || anchorOffset !== 1 && anchorOffset !== prevSelection.anchor.offset + 1) { | ||
return; | ||
} | ||
if (currentTextContent.length === 0) { | ||
// Nothing left to convert. | ||
return; | ||
} | ||
editor.update(() => { | ||
// Markdown is not available inside code | ||
if (anchorNode.hasFormat('code')) { | ||
return; | ||
} | ||
if (currentTextContent === textContent) { | ||
// Nothing was changed by this transformation, so move on to the next crieteria. | ||
continue; | ||
} // The text was changed. Perhaps there is another hit for the same criteria. | ||
const parentNode = anchorNode.getParent(); | ||
textContent = currentTextContent; | ||
startIndex = i; | ||
done = false; | ||
break; | ||
if (parentNode === null || code.$isCodeNode(parentNode)) { | ||
return; | ||
} | ||
} | ||
} | ||
} | ||
function getLastTextNodeInElementNode(elementNode) { | ||
const children = elementNode.getChildren(); | ||
const countOfChildren = children.length; | ||
for (let i = countOfChildren - 1; i >= 0; i--) { | ||
if (lexical.$isTextNode(children[i])) { | ||
return children[i]; | ||
} | ||
} | ||
return null; | ||
transform(parentNode, anchorNode, selection.anchor.offset); | ||
}); | ||
}); | ||
} | ||
@@ -1331,135 +696,205 @@ | ||
*/ | ||
function $convertToMarkdownString() { | ||
const output = []; | ||
const children = lexical.$getRoot().getChildren(); | ||
for (const child of children) { | ||
const result = exportTopLevelElementOrDecorator(child); | ||
const replaceWithBlock = createNode => { | ||
return (parentNode, children, match) => { | ||
const node = createNode(match); | ||
node.append(...children); | ||
parentNode.replace(node); | ||
node.select(0, 0); | ||
}; | ||
}; // Amount of spaces that define indentation level | ||
// TODO: should be an option | ||
if (result != null) { | ||
output.push(result); | ||
} | ||
} | ||
return output.join('\n'); | ||
} | ||
const LIST_INDENT_SIZE = 4; | ||
function exportTopLevelElementOrDecorator(node) { | ||
const blockTransformers = getAllMarkdownCriteriaForParagraphs(); | ||
const listReplace = listType => { | ||
return (parentNode, children, match) => { | ||
const previousNode = parentNode.getPreviousSibling(); | ||
const listItem = list.$createListItemNode(listType === 'check' ? match[3] === 'x' : undefined); | ||
for (const transformer of blockTransformers) { | ||
if (transformer.export != null) { | ||
const result = transformer.export(node, _node => exportChildren(_node)); | ||
if (result != null) { | ||
return result; | ||
} | ||
if (list.$isListNode(previousNode) && previousNode.getListType() === listType) { | ||
previousNode.append(listItem); | ||
parentNode.remove(); | ||
} else { | ||
const list$1 = list.$createListNode(listType, listType === 'number' ? Number(match[2]) : undefined); | ||
list$1.append(listItem); | ||
parentNode.replace(list$1); | ||
} | ||
} | ||
return lexical.$isElementNode(node) ? exportChildren(node) : null; | ||
} | ||
listItem.append(...children); | ||
listItem.select(0, 0); | ||
const indent = Math.floor(match[1].length / LIST_INDENT_SIZE); | ||
function exportChildren(node) { | ||
const output = []; | ||
const children = node.getChildren(); | ||
for (const child of children) { | ||
if (lexical.$isLineBreakNode(child)) { | ||
output.push('\n'); | ||
} else if (lexical.$isTextNode(child)) { | ||
output.push(exportTextNode(child, child.getTextContent())); | ||
} else if (link.$isLinkNode(child)) { | ||
const linkContent = `[${child.getTextContent()}](${child.getURL()})`; | ||
const firstChild = child.getFirstChild(); // Add text styles only if link has single text node inside. If it's more | ||
// then one we either ignore it and have single <a> to cover whole link, | ||
// or process them, but then have link cut into multiple <a>. | ||
// For now chosing the first option. | ||
if (child.getChildrenSize() === 1 && lexical.$isTextNode(firstChild)) { | ||
output.push(exportTextNode(firstChild, linkContent)); | ||
} else { | ||
output.push(linkContent); | ||
} | ||
} else if (lexical.$isElementNode(child)) { | ||
output.push(exportChildren(child)); | ||
if (indent) { | ||
listItem.setIndent(indent); | ||
} | ||
} | ||
}; | ||
}; | ||
return output.join(''); | ||
} | ||
const listExport = (listNode, exportChildren, depth) => { | ||
const output = []; | ||
const children = listNode.getChildren(); | ||
let index = 0; | ||
function exportTextNode(node, textContent, parentNode) { | ||
let output = textContent; | ||
const applied = new Set(); | ||
const textTransformers = getAllMarkdownCriteriaForTextNodes(); | ||
for (const listItemNode of children) { | ||
if (list.$isListItemNode(listItemNode)) { | ||
if (listItemNode.getChildrenSize() === 1) { | ||
const firstChild = listItemNode.getFirstChild(); | ||
for (const transformer of textTransformers) { | ||
const { | ||
exportFormat: format, | ||
exportTag: tag, | ||
exportTagClose: tagClose = tag | ||
} = transformer; | ||
if (format != null && tag != null && tagClose != null && hasFormat(node, format) && !applied.has(format)) { | ||
// Multiple tags might be used for the same format (*, _) | ||
applied.add(format); // Prevent adding extra wrapping tags if it's already | ||
// added by a previous sibling (or will be closed by the next one) | ||
const previousNode = getTextSibling(node, true); | ||
if (!hasFormat(previousNode, format)) { | ||
output = tag + output; | ||
if (list.$isListNode(firstChild)) { | ||
output.push(listExport(firstChild, exportChildren, depth + 1)); | ||
continue; | ||
} | ||
} | ||
const nextNode = getTextSibling(node, false); | ||
if (!hasFormat(nextNode, format)) { | ||
output += tagClose; | ||
} | ||
const indent = ' '.repeat(depth * LIST_INDENT_SIZE); | ||
const listType = listNode.getListType(); | ||
const prefix = listType === 'number' ? `${listNode.getStart() + index}. ` : listType === 'check' ? `- [${listItemNode.getChecked() ? 'x' : ' '}] ` : '- '; | ||
output.push(indent + prefix + exportChildren(listItemNode)); | ||
index++; | ||
} | ||
} | ||
return output; | ||
} // Finds text sibling including cases for inline elements | ||
return output.join('\n'); | ||
}; | ||
const HEADING = { | ||
export: (node, exportChildren) => { | ||
if (!richText.$isHeadingNode(node)) { | ||
return null; | ||
} | ||
function getTextSibling(node, backward) { | ||
let sibling = backward ? node.getPreviousSibling() : node.getNextSibling(); | ||
const level = Number(node.getTag().slice(1)); | ||
return '#'.repeat(level) + ' ' + exportChildren(node); | ||
}, | ||
regExp: /^(#{1,6})\s/, | ||
replace: replaceWithBlock(match => { | ||
// $FlowFixMe[incompatible-cast] | ||
const tag = 'h' + match[1].length; | ||
return richText.$createHeadingNode(tag); | ||
}), | ||
type: 'element' | ||
}; | ||
const QUOTE = { | ||
export: (node, exportChildren) => { | ||
return richText.$isQuoteNode(node) ? '> ' + exportChildren(node) : null; | ||
}, | ||
regExp: /^>\s/, | ||
replace: replaceWithBlock(() => richText.$createQuoteNode()), | ||
type: 'element' | ||
}; | ||
const CODE = { | ||
export: node => { | ||
if (!code.$isCodeNode(node)) { | ||
return null; | ||
} | ||
if (!sibling) { | ||
const parent = node.getParentOrThrow(); | ||
const textContent = node.getTextContent(); | ||
return '```' + (node.getLanguage() || '') + (textContent ? '\n' + textContent : '') + '\n' + '```'; | ||
}, | ||
regExp: /^```(\w{1,10})?\s/, | ||
replace: replaceWithBlock(match => { | ||
return code.$createCodeNode(match ? match[1] : undefined); | ||
}), | ||
type: 'element' | ||
}; | ||
const UNORDERED_LIST = { | ||
export: (node, exportChildren) => { | ||
return list.$isListNode(node) ? listExport(node, exportChildren, 0) : null; | ||
}, | ||
regExp: /^(\s*)[-*+]\s/, | ||
replace: listReplace('bullet'), | ||
type: 'element' | ||
}; | ||
const CHECK_LIST = { | ||
export: (node, exportChildren) => { | ||
return list.$isListNode(node) ? listExport(node, exportChildren, 0) : null; | ||
}, | ||
regExp: /^(\s*)(?:-\s)?\s?(\[(\s|x)?\])\s/i, | ||
replace: listReplace('check'), | ||
type: 'element' | ||
}; | ||
const ORDERED_LIST = { | ||
export: (node, exportChildren) => { | ||
return list.$isListNode(node) ? listExport(node, exportChildren, 0) : null; | ||
}, | ||
regExp: /^(\s*)(\d{1,})\.\s/, | ||
replace: listReplace('number'), | ||
type: 'element' | ||
}; | ||
const INLINE_CODE = { | ||
format: ['code'], | ||
tag: '`', | ||
type: 'text-format' | ||
}; | ||
const BOLD_ITALIC_STAR = { | ||
format: ['bold', 'italic'], | ||
tag: '***', | ||
type: 'text-format' | ||
}; | ||
const BOLD_ITALIC_UNDERSCORE = { | ||
format: ['bold', 'italic'], | ||
tag: '___', | ||
type: 'text-format' | ||
}; | ||
const BOLD_STAR = { | ||
format: ['bold'], | ||
tag: '**', | ||
type: 'text-format' | ||
}; | ||
const BOLD_UNDERSCORE = { | ||
format: ['bold'], | ||
tag: '__', | ||
type: 'text-format' | ||
}; | ||
const STRIKETHROUGH = { | ||
format: ['strikethrough'], | ||
tag: '~~', | ||
type: 'text-format' | ||
}; | ||
const ITALIC_STAR = { | ||
format: ['italic'], | ||
tag: '*', | ||
type: 'text-format' | ||
}; | ||
const ITALIC_UNDERSCORE = { | ||
format: ['italic'], | ||
tag: '_', | ||
type: 'text-format' | ||
}; // Order of text transformers matters: | ||
// | ||
// - code should go first as it prevents any transformations inside | ||
// - then longer tags match (e.g. ** or __ should go before * or _) | ||
if (parent.isInline()) { | ||
sibling = backward ? parent.getPreviousSibling() : parent.getNextSibling(); | ||
const LINK = { | ||
export: (node, exportChildren, exportFormat) => { | ||
if (!link.$isLinkNode(node)) { | ||
return null; | ||
} | ||
} | ||
while (sibling) { | ||
if (lexical.$isElementNode(sibling)) { | ||
if (!sibling.isInline()) { | ||
break; | ||
} | ||
const linkContent = `[${node.getTextContent()}](${node.getURL()})`; | ||
const firstChild = node.getFirstChild(); // Add text styles only if link has single text node inside. If it's more | ||
// then one we ignore it as markdown does not support nested styles for links | ||
const descendant = backward ? sibling.getLastDescendant() : sibling.getFirstDescendant(); | ||
if (lexical.$isTextNode(descendant)) { | ||
return descendant; | ||
} else { | ||
sibling = backward ? sibling.getPreviousSibling() : sibling.getNextSibling(); | ||
} | ||
if (node.getChildrenSize() === 1 && lexical.$isTextNode(firstChild)) { | ||
return exportFormat(firstChild, linkContent); | ||
} else { | ||
return linkContent; | ||
} | ||
}, | ||
importRegExp: /(?:\[([^[]+)\])(?:\(([^(]+)\))/, | ||
regExp: /(?:\[([^[]+)\])(?:\(([^(]+)\))$/, | ||
replace: (textNode, match) => { | ||
const [, linkText, linkUrl] = match; | ||
const linkNode = link.$createLinkNode(linkUrl); | ||
const linkTextNode = lexical.$createTextNode(linkText); | ||
linkTextNode.setFormat(textNode.getFormat()); | ||
linkNode.append(linkTextNode); | ||
textNode.replace(linkNode); | ||
}, | ||
trigger: ')', | ||
type: 'text-match' | ||
}; | ||
if (lexical.$isTextNode(sibling)) { | ||
return sibling; | ||
} | ||
} | ||
return null; | ||
} | ||
function hasFormat(node, format) { | ||
return lexical.$isTextNode(node) && node.hasFormat(format); | ||
} | ||
/** | ||
@@ -1473,30 +908,19 @@ * Copyright (c) Meta Platforms, Inc. and affiliates. | ||
*/ | ||
function registerMarkdownShortcuts(editor, createHorizontalRuleNode) { | ||
// The priorTriggerState is compared against the currentTriggerState to determine | ||
// if the user has performed some typing event that warrants an auto format. | ||
// For example, typing "#" and then " ", shoud trigger an format. | ||
// However, given "#A B", where the user delets "A" should not. | ||
let priorTriggerState = null; | ||
return editor.registerUpdateListener(({ | ||
tags | ||
}) => { | ||
// Examine historic so that we are not running autoformatting within markdown. | ||
if (tags.has('historic') === false) { | ||
const currentTriggerState = getTriggerState(editor.getEditorState()); | ||
const scanningContext = currentTriggerState == null ? null : findScanningContext(editor, currentTriggerState, priorTriggerState); | ||
const ELEMENT_TRANSFORMERS = [HEADING, QUOTE, CODE, UNORDERED_LIST, ORDERED_LIST]; // Order of text format transformers matters: | ||
// | ||
// - code should go first as it prevents any transformations inside | ||
// - then longer tags match (e.g. ** or __ should go before * or _) | ||
if (scanningContext != null) { | ||
updateAutoFormatting(editor, scanningContext, createHorizontalRuleNode); | ||
} | ||
const TEXT_FORMAT_TRANSFORMERS = [INLINE_CODE, BOLD_ITALIC_STAR, BOLD_ITALIC_UNDERSCORE, BOLD_STAR, BOLD_UNDERSCORE, ITALIC_STAR, ITALIC_UNDERSCORE, STRIKETHROUGH]; | ||
const TEXT_MATCH_TRANSFORMERS = [LINK]; | ||
const TRANSFORMERS = [...ELEMENT_TRANSFORMERS, ...TEXT_FORMAT_TRANSFORMERS, ...TEXT_MATCH_TRANSFORMERS]; | ||
priorTriggerState = currentTriggerState; | ||
} else { | ||
priorTriggerState = null; | ||
} | ||
}); | ||
function $convertFromMarkdownString(markdown, transformers = TRANSFORMERS) { | ||
const importMarkdown = createMarkdownImport(transformers); | ||
return importMarkdown(markdown); | ||
} | ||
function $convertFromMarkdownString(markdownString, editor, createHorizontalRuleNode) { | ||
if (convertStringToLexical(markdownString) != null) { | ||
convertMarkdownForElementNodes(editor, createHorizontalRuleNode); | ||
} | ||
function $convertToMarkdownString(transformers = TRANSFORMERS) { | ||
const exportMarkdown = createMarkdownExport(transformers); | ||
return exportMarkdown(); | ||
} | ||
@@ -1506,2 +930,21 @@ | ||
exports.$convertToMarkdownString = $convertToMarkdownString; | ||
exports.BOLD_ITALIC_STAR = BOLD_ITALIC_STAR; | ||
exports.BOLD_ITALIC_UNDERSCORE = BOLD_ITALIC_UNDERSCORE; | ||
exports.BOLD_STAR = BOLD_STAR; | ||
exports.BOLD_UNDERSCORE = BOLD_UNDERSCORE; | ||
exports.CHECK_LIST = CHECK_LIST; | ||
exports.CODE = CODE; | ||
exports.ELEMENT_TRANSFORMERS = ELEMENT_TRANSFORMERS; | ||
exports.HEADING = HEADING; | ||
exports.INLINE_CODE = INLINE_CODE; | ||
exports.ITALIC_STAR = ITALIC_STAR; | ||
exports.ITALIC_UNDERSCORE = ITALIC_UNDERSCORE; | ||
exports.LINK = LINK; | ||
exports.ORDERED_LIST = ORDERED_LIST; | ||
exports.QUOTE = QUOTE; | ||
exports.STRIKETHROUGH = STRIKETHROUGH; | ||
exports.TEXT_FORMAT_TRANSFORMERS = TEXT_FORMAT_TRANSFORMERS; | ||
exports.TEXT_MATCH_TRANSFORMERS = TEXT_MATCH_TRANSFORMERS; | ||
exports.TRANSFORMERS = TRANSFORMERS; | ||
exports.UNORDERED_LIST = UNORDERED_LIST; | ||
exports.registerMarkdownShortcuts = registerMarkdownShortcuts; |
@@ -7,40 +7,21 @@ /** | ||
*/ | ||
var h=require("@lexical/code"),n=require("@lexical/list"),p=require("lexical"),u=require("@lexical/link"),v=require("@lexical/rich-text"),w=require("@lexical/text");function y(a){throw Error(`Minified Lexical error #${a}; see codes.json for the full message or `+"use the non-minified dev environment for full errors and additional helpful warnings.");} | ||
const z=[{triggerKind:"space_trigger",triggerString:" "}],A={markdownFormatKind:null,regEx:/(?:)/,regExForAutoFormatting:/(?:)/,requiresParagraphStart:!1},B={...A,requiresParagraphStart:!0},D={...B,export:C(1),markdownFormatKind:"paragraphH1",regEx:/^(?:# )/,regExForAutoFormatting:/^(?:# )/},E={...B,export:C(2),markdownFormatKind:"paragraphH2",regEx:/^(?:## )/,regExForAutoFormatting:/^(?:## )/},aa={...B,export:C(3),markdownFormatKind:"paragraphH3",regEx:/^(?:### )/,regExForAutoFormatting:/^(?:### )/}, | ||
ba={...B,export:C(4),markdownFormatKind:"paragraphH4",regEx:/^(?:#### )/,regExForAutoFormatting:/^(?:#### )/},ca={...B,export:C(5),markdownFormatKind:"paragraphH5",regEx:/^(?:##### )/,regExForAutoFormatting:/^(?:##### )/},F={...B,export:da,markdownFormatKind:"paragraphCodeBlock",regEx:/^(```)$/,regExForAutoFormatting:/^(```)([a-z]*)( )/},G=[{...A,markdownFormatKind:"strikethrough_italic_bold",regEx:/(~~_\*\*)(\s*\b)([^~~_\*\*][^\*\*_~~]*)(\b\s*)(\*\*_~~)()/,regExForAutoFormatting:/(~~_\*\*)(\s*\b)([^~~_\*\*][^\*\*_~~]*)(\b\s*)(\*\*_~~)(\s)$/}, | ||
{...A,markdownFormatKind:"italic_bold",regEx:/(_\*\*)(\s*\b)([^_\*\*][^\*\*_]*)(\b\s*)(\*\*_)/,regExForAutoFormatting:/(_\*\*)(\s*\b)([^_\*\*][^\*\*_]*)(\b\s*)(\*\*_)(\s)$/},{...A,markdownFormatKind:"strikethrough_italic",regEx:/(~~_)(\s*)([^~~_][^_~~]*)(\s*)(_~~)/,regExForAutoFormatting:/(~~_)(\s*)([^~~_][^_~~]*)(\s*)(_~~)(\s)$/},{...A,markdownFormatKind:"strikethrough_bold",regEx:/(~~\*\*)(\s*\b)([^~~\*\*][^\*\*~~]*)(\b\s*)(\*\*~~)/,regExForAutoFormatting:/(~~\*\*)(\s*\b)([^~~\*\*][^\*\*~~]*)(\b\s*)(\*\*~~)(\s)$/}, | ||
{...A,exportFormat:"code",exportTag:"`",markdownFormatKind:"code",regEx:/(`)(\s*)([^`]*)(\s*)(`)()/,regExForAutoFormatting:/(`)(\s*\b)([^`]*)(\b\s*)(`)(\s)$/},{...A,exportFormat:"bold",exportTag:"**",markdownFormatKind:"bold",regEx:/(\*\*)(\s*)([^\*\*]*)(\s*)(\*\*)()/,regExForAutoFormatting:/(\*\*)(\s*\b)([^\*\*]*)(\b\s*)(\*\*)(\s)$/},{...A,exportFormat:"italic",exportTag:"*",markdownFormatKind:"italic",regEx:/(\*)(\s*)([^\*]*)(\s*)(\*)()/,regExForAutoFormatting:/(\*)(\s*\b)([^\*]*)(\b\s*)(\*)(\s)$/}, | ||
{...A,exportFormat:"bold",exportTag:"_",markdownFormatKind:"bold",regEx:/(__)(\s*)([^__]*)(\s*)(__)()/,regExForAutoFormatting:/(__)(\s*)([^__]*)(\s*)(__)(\s)$/},{...A,exportFormat:"italic",exportTag:"_",markdownFormatKind:"italic",regEx:/(_)()([^_]*)()(_)()/,regExForAutoFormatting:/(_)()([^_]*)()(_)(\s)$/},{...A,exportFormat:"underline",exportTag:"<u>",exportTagClose:"</u>",markdownFormatKind:"underline",regEx:/(<u>)(\s*)([^<]*)(\s*)(<\/u>)()/,regExForAutoFormatting:/(<u>)(\s*\b)([^<]*)(\b\s*)(<\/u>)(\s)$/}, | ||
{...A,exportFormat:"strikethrough",exportTag:"~~",markdownFormatKind:"strikethrough",regEx:/(~~)(\s*)([^~~]*)(\s*)(~~)()/,regExForAutoFormatting:/(~~)(\s*\b)([^~~]*)(\b\s*)(~~)(\s)$/},{...A,markdownFormatKind:"link",regEx:/(\[)([^\]]*)(\]\()([^)]*)(\)*)()/,regExForAutoFormatting:/(\[)([^\]]*)(\]\()([^)]*)(\)*)(\s)$/}],I=[D,E,aa,ba,ca,{...B,export:ea,markdownFormatKind:"paragraphBlockQuote",regEx:/^(?:> )/,regExForAutoFormatting:/^(?:> )/},{...B,export:H,markdownFormatKind:"paragraphUnorderedList", | ||
regEx:/^(\s{0,10})(?:- )/,regExForAutoFormatting:/^(\s{0,10})(?:- )/},{...B,export:H,markdownFormatKind:"paragraphUnorderedList",regEx:/^(\s{0,10})(?:\* )/,regExForAutoFormatting:/^(\s{0,10})(?:\* )/},{...B,export:H,markdownFormatKind:"paragraphOrderedList",regEx:/^(\s{0,10})(\d+)\.\s/,regExForAutoFormatting:/^(\s{0,10})(\d+)\.\s/},F,{...B,markdownFormatKind:"horizontalRule",regEx:/^(?:\*\*\*)$/,regExForAutoFormatting:/^(?:\*\*\* )/},{...B,markdownFormatKind:"horizontalRule",regEx:/^(?:---)$/,regExForAutoFormatting:/^(?:--- )/}], | ||
fa=[...I,...G];function J(a,d,c,b){return{currentElementNode:null,editor:a,isAutoFormatting:d,isWithinCodeBlock:!1,joinedText:null,markdownCriteria:{markdownFormatKind:"noTransformation",regEx:/(?:)/,regExForAutoFormatting:/(?:)/,requiresParagraphStart:null},patternMatchResults:{regExCaptureGroups:[]},textNodeWithOffset:c,triggerState:b}} | ||
function K(a){a.joinedText=null;a.markdownCriteria={markdownFormatKind:"noTransformation",regEx:/(?:)/,regExForAutoFormatting:/(?:)/,requiresParagraphStart:null};a.patternMatchResults={regExCaptureGroups:[]};a.triggerState=null;a.textNodeWithOffset=null;return a} | ||
function L(a,d,c){if(!0===a.requiresParagraphStart)return c=M(d),null===c.node.getPreviousSibling()?(c=c.node.getTextContent(),a=N(c,!0,!1,d.isAutoFormatting?a.regExForAutoFormatting:a.regEx)):a=null,a;null==d.joinedText&&(p.$isElementNode(c)?null==d.joinedText&&(d.joinedText=w.$joinTextNodesInElementNode(c,"\u0004",M(d))):y(52,c.__key));return N(d.joinedText,!1,!0===a.regExForAutoFormatting,d.isAutoFormatting?a.regExForAutoFormatting:a.regEx)} | ||
function N(a,d,c,b){const e={regExCaptureGroups:[]};b=a.match(b);if(null!==b&&0<b.length&&(!1===d||0===b.index)&&(!1===c||b.index+b[0].length===a.length)){a=b.length;d=b.index;for(c=0;c<a;c++){const f=b[c];e.regExCaptureGroups.push({offsetInParent:d,text:f});0<c&&(d+=f.length)}return e}return null}function M(a){a=a.textNodeWithOffset;null==a&&y(82);return a} | ||
function ha(a,d,c){var b=null,e=d.getChildren();const f=a.markdownCriteria,g=a.patternMatchResults;if(null!=f.markdownFormatKind)switch(f.markdownFormatKind){case "paragraphH1":b=v.$createHeadingNode("h1");b.append(...e);break;case "paragraphH2":b=v.$createHeadingNode("h2");b.append(...e);break;case "paragraphH3":b=v.$createHeadingNode("h3");b.append(...e);break;case "paragraphH4":b=v.$createHeadingNode("h4");b.append(...e);break;case "paragraphH5":b=v.$createHeadingNode("h5");b.append(...e);break; | ||
case "paragraphBlockQuote":b=v.$createQuoteNode();b.append(...e);break;case "paragraphUnorderedList":return O(d,e,g,"ul"),{newNode:null,shouldDelete:!1};case "paragraphOrderedList":return b=1<g.regExCaptureGroups.length?g.regExCaptureGroups[g.regExCaptureGroups.length-1].text:"1",a=a.isAutoFormatting?parseInt(b,10):void 0,O(d,e,g,"ol",a),{newNode:null,shouldDelete:!1};case "paragraphCodeBlock":if(!1===a.isAutoFormatting){if(0<a.patternMatchResults.regExCaptureGroups.length)return a.isWithinCodeBlock= | ||
!0!==a.isWithinCodeBlock,a.currentElementNode=null,{newNode:null,shouldDelete:!0};if(a.isWithinCodeBlock){if(null==a.currentElementNode)return d=h.$createCodeNode(),d.append(...e),a.currentElementNode=d,{newNode:d,shouldDelete:!1};null!=a.currentElementNode&&(d=a.currentElementNode,a=p.$createLineBreakNode(),d.append(a),e.length&&d.append(a),d.append(...e))}return{newNode:null,shouldDelete:!0}}null!=a.triggerState&&a.triggerState.isCodeBlock?b=p.$createParagraphNode():(b=h.$createCodeNode(),d=3<= | ||
g.regExCaptureGroups.length?g.regExCaptureGroups[2].text:null,null!=d&&0<d.length&&b.setLanguage(d));b.append(...e);break;case "horizontalRule":null!=c&&(e=c(),d.insertBefore(e))}return{newNode:b,shouldDelete:!1}} | ||
function O(a,d,c,b,e){const f=n.$createListItemNode();c=(c=c.regExCaptureGroups[0].text.match(/^\s*/))?Math.floor(c[0].length/4):0;f.append(...d);d=a.getPreviousSibling();n.$isListNode(d)&&d.getTag()===b?(d.append(f),a.remove()):(b=n.$createListNode(b,e),b.append(f),a.replace(b));c&&f.setIndent(c)} | ||
function P(a,d,c){if(!0===a.markdownCriteria.requiresParagraphStart){if(null!=a.textNodeWithOffset){var b=M(a);0<a.patternMatchResults.regExCaptureGroups.length&&(b=b.node.spliceText(0,a.patternMatchResults.regExCaptureGroups[0].text.length,"",!0),""===b.getTextContent()&&(b.selectPrevious(),b.remove()))}const {newNode:g,shouldDelete:k}=ha(a,d,c);k?d.remove():null!==g&&d.replace(g)}else if(c=a.markdownCriteria,null!=c.markdownFormatKind)if(b=ia(c.markdownFormatKind),null!=b){if(c=b,7===a.patternMatchResults.regExCaptureGroups.length){Q(5, | ||
5,a,d);Q(1,1,a,d);b=a.patternMatchResults.regExCaptureGroups;3<b.length||y(65);if(0!==b[3].text.length&&(b=R(3,3,!1,!0,a,d),null!=b&&(p.$setSelection(b),b=p.$getSelection(),p.$isRangeSelection(b))))for(var e=0;e<c.length;e++)b.formatText(c[e]);S(a,d)}}else if("link"===c.markdownFormatKind&&(c=a.patternMatchResults.regExCaptureGroups,7===c.length&&(e=c[2].text,c=c[4].text,0!==e.length&&0!==c.length))){Q(1,5,a,d);b=a.patternMatchResults.regExCaptureGroups;if(!(1>=b.length)){e={offsetInParent:b[1].offsetInParent, | ||
text:e};var f=R(1,1,!1,!1,a,d);if(null!=f&&(p.$setSelection(f),f=p.$getSelection(),null!=f&&p.$isRangeSelection(f)&&f.isCollapsed())){f.insertText(e.text);b.splice(1,0,e);e=e.text.length;f=b.length;for(let g=2;g<f;g++)b[g].offsetInParent+=e}}b=R(1,1,!1,!0,a,d);null!=b&&(p.$setSelection(b),a.editor.dispatchCommand(u.TOGGLE_LINK_COMMAND,c),S(a,d))}} | ||
function ia(a){switch(a){case "italic":case "bold":case "underline":case "strikethrough":case "code":return[a];case "strikethrough_italic_bold":return["strikethrough","italic","bold"];case "italic_bold":return["italic","bold"];case "strikethrough_italic":return["strikethrough","italic"];case "strikethrough_bold":return["strikethrough","bold"]}return null} | ||
function R(a,d,c,b,e,f){var g=e.patternMatchResults;e=g.regExCaptureGroups;var k=e.length;if(a>=k||d>=k)return null;k=g.regExCaptureGroups.length;2>k?g=0:(--k,g=g.regExCaptureGroups[k].offsetInParent+g.regExCaptureGroups[k].text.length);a=e[a];d=e[d];b=b?d.offsetInParent+d.text.length:d.offsetInParent;c=w.$findNodeWithOffsetFromJoinedText(c?a.offsetInParent+a.text.length:a.offsetInParent,g,1,f);b=w.$findNodeWithOffsetFromJoinedText(b,g,1,f);if(null==c&&null==b&&0===f.getChildren().length)return c= | ||
p.$createRangeSelection(),c.anchor.set(f.getKey(),0,"element"),c.focus.set(f.getKey(),0,"element"),c;if(null==c||null==b)return null;f=p.$createRangeSelection();f.anchor.set(c.node.getKey(),c.offset,"text");f.focus.set(b.node.getKey(),b.offset,"text");return f} | ||
function Q(a,d,c,b){const e=c.patternMatchResults.regExCaptureGroups;c=R(a,d,!1,!0,c,b);if(null!=c&&(p.$setSelection(c),c=p.$getSelection(),null!=c&&p.$isRangeSelection(c)&&!1===c.isCollapsed())){c.removeText();c=0;b=e.length;for(let f=a;f<b;f++){const g=e[f];f>a&&(g.offsetInParent-=c);f<=d&&(c+=g.text.length,g.text="")}}}function S(a,d){var c=a.patternMatchResults.regExCaptureGroups.length;2>c||(--c,a=R(c,c,!0,!0,a,d),null!=a&&p.$setSelection(a))} | ||
function C(a){return(d,c)=>v.$isHeadingNode(d)&&d.getTag()==="h"+a?"#".repeat(a)+" "+c(d):null}function H(a,d){return n.$isListNode(a)?T(a,d,0):null}function T(a,d,c){const b=[];var e=a.getChildren();let f=0;for(const g of e)if(n.$isListItemNode(g)){if(1===g.getChildrenSize()&&(e=g.getFirstChild(),n.$isListNode(e))){b.push(T(e,d,c+1));continue}e=" ".repeat(4*c);const k="ul"===a.getTag()?"- ":`${a.getStart()+f}. `;b.push(e+k+d(g));f++}return b.join("\n")} | ||
function ea(a,d){return v.$isQuoteNode(a)?"> "+d(a):null}function da(a){if(!h.$isCodeNode(a))return null;const d=a.getTextContent();return"```"+(a.getLanguage()||"")+(d?"\n"+d:"")+"\n```"}function ja(a,d,c){a.update(()=>{const b=M(d).node.getParentOrThrow();P(d,b,c)},{tag:"history-push"})} | ||
function ka(a,d){let c=null;a.getEditorState().read(()=>{var b=p.$getSelection();if(p.$isRangeSelection(b)){var e=b.anchor.getNode();b=p.$isTextNode(e)?{node:e,offset:b.anchor.offset}:null}else b=null;if(null!==b){b=J(a,!0,b,d);a:{e=!1===d.isParentAListItemNode?fa:G;const f=b.triggerState,g=e.length;for(let k=0;k<g;k++){const l=e[k];if(null!=f&&!1===f.isCodeBlock||"paragraphCodeBlock"===l.markdownFormatKind){const m=L(l,b,M(b).node.getParentOrThrow());if(null!=m){e={markdownCriteria:l,patternMatchResults:m}; | ||
break a}}}e={markdownCriteria:null,patternMatchResults:null}}null!==e.markdownCriteria&&null!==e.patternMatchResults&&(c=b,c.markdownCriteria=e.markdownCriteria,c.patternMatchResults=e.patternMatchResults)}});return c} | ||
function la(a){let d=null;a.read(()=>{const c=p.$getSelection();if(p.$isRangeSelection(c)&&c.isCollapsed()){var b=c.anchor.getNode(),e=b.getParent(),f=n.$isListItemNode(e);d={anchorOffset:c.anchor.offset,hasParentNode:null!==e,isCodeBlock:h.$isCodeNode(b),isParentAListItemNode:f,isSelectionCollapsed:!0,isSimpleText:p.$isTextNode(b)&&b.isSimpleText(),nodeKey:b.getKey(),textContent:b.getTextContent()}}});return d} | ||
function U(a,d,c){var b=d.getFirstChild();if(p.$isTextNode(b))a:{K(a);b=G.length;var e=d.getTextContent(),f=0===e.length;let m=0;for(;!f;){f=!0;for(let t=m;t<b;t++){var g=G[t];if(null==a.textNodeWithOffset){b:{var k=d.getChildren();var l=k.length;for(--l;0<=l;l--)if(p.$isTextNode(k[l])){k=k[l];break b}k=null}if(null==k)break a;a.textNodeWithOffset={node:k,offset:k.getTextContent().length}}k=L(g,a,d);if(null!=k){a.markdownCriteria=g;a.patternMatchResults=k;P(a,d,c);K(a);g=d.getTextContent();if(0=== | ||
g.length)break a;if(g!==e){e=g;m=t;f=!1;break}}}}}else for(d=d.getChildren(),b=d.length,e=0;e<b;e++)f=d[e],p.$isElementNode(f)&&U(a,f,c)}function ma(a){for(const d of I)if(null!=d.export){const c=d.export(a,b=>V(b));if(null!=c)return c}return p.$isElementNode(a)?V(a):null} | ||
function V(a){const d=[];a=a.getChildren();for(const c of a)if(p.$isLineBreakNode(c))d.push("\n");else if(p.$isTextNode(c))d.push(W(c,c.getTextContent()));else if(u.$isLinkNode(c)){a=`[${c.getTextContent()}](${c.getURL()})`;const b=c.getFirstChild();1===c.getChildrenSize()&&p.$isTextNode(b)?d.push(W(b,a)):d.push(a)}else p.$isElementNode(c)&&d.push(V(c));return d.join("")} | ||
function W(a,d){const c=new Set;for(const e of G){const {exportFormat:f,exportTag:g,exportTagClose:k=g}=e;if(null!=f&&null!=g&&null!=k&&Y(a,f)&&!c.has(f)){c.add(f);var b=Z(a,!0);Y(b,f)||(d=g+d);b=Z(a,!1);Y(b,f)||(d+=k)}}return d} | ||
function Z(a,d){let c=d?a.getPreviousSibling():a.getNextSibling();c||(a=a.getParentOrThrow(),a.isInline()&&(c=d?a.getPreviousSibling():a.getNextSibling()));for(;c;){if(p.$isElementNode(c)){if(!c.isInline())break;a=d?c.getLastDescendant():c.getFirstDescendant();if(p.$isTextNode(a))return a;c=d?c.getPreviousSibling():c.getNextSibling()}if(p.$isTextNode(c))return c}return null}function Y(a,d){return p.$isTextNode(a)&&a.hasFormat(d)} | ||
exports.$convertFromMarkdownString=function(a,d,c){if(a.length){var b=[];a=a.split("\n");var e=a.length;for(var f=0;f<e;f++)0<a[f].length?b.push(p.$createParagraphNode().append(p.$createTextNode(a[f]))):b.push(p.$createParagraphNode());b.length?(a=p.$getRoot(),a.clear(),a.append(...b),b=a):b=null}else b=null;if(null!=b){d=J(d,!1,null,null);b=p.$getRoot();a=!1;for(e=0;!a;){a=!0;var g=b.getChildren(),k=g.length;for(var l=e;l<k;l++){var m=g[l];if(p.$isElementNode(m)){f=d;var t=c;var r=m.getTextContent(); | ||
if(p.$isParagraphNode(m)){var q=m.getFirstChild(),x=p.$isTextNode(q);if(!0===f.isWithinCodeBlock)null!=q&&x&&(f.textNodeWithOffset={node:q,offset:0},q=F,r=N(r,!0,!1,f.isAutoFormatting?q.regExForAutoFormatting:q.regEx),null!=r&&(f.patternMatchResults=r)),f.markdownCriteria=F,P(f,m,t);else if(m.getChildren().length)for(r=I.length,f.joinedText=m.getTextContent(),null!=q&&x||y(80),f.textNodeWithOffset={node:q,offset:0},q=0;q<r;q++){x=I[q];if(!1===x.requiresParagraphStart)break;const X=L(x,f,M(f).node.getParentOrThrow()); | ||
if(null!=X){f.markdownCriteria=x;f.patternMatchResults=X;P(f,m,t);break}}}}K(d);if(b.getChildren().length!==k){e=l;a=!1;break}}}a=!1;for(e=0;!a;)for(a=!0,f=b.getChildren(),g=f.length,k=e;k<g;k++)l=f[k],p.$isElementNode(l)&&U(d,l,c),K(d)}};exports.$convertToMarkdownString=function(){const a=[];var d=p.$getRoot().getChildren();for(const c of d)d=ma(c),null!=d&&a.push(d);return a.join("\n")}; | ||
exports.registerMarkdownShortcuts=function(a,d){let c=null;return a.registerUpdateListener(({tags:b})=>{if(!1===b.has("historic")){b=la(a.getEditorState());if(null==b)var e=null;else a:{e=b;var f=c;if(null==e||null==f)e=null;else{var g=z.length;for(let k=0;k<g;k++){const l=z[k].triggerString,m=l.length,t=e.textContent.length,r=e.anchorOffset-m;if(!1===(!0===e.hasParentNode&&e.isSimpleText&&e.isSelectionCollapsed&&e.anchorOffset!==f.anchorOffset&&0<=r&&r+m<=t&&e.textContent.substr(r,m)===l&&e.textContent!== | ||
f.textContent)){e=null;break a}}e=ka(a,e)}}null!=e&&ja(a,e,d);c=b}else c=null})}; | ||
var n=require("lexical"),y=require("@lexical/code"),D=require("@lexical/link"),E=require("@lexical/list"),F=require("@lexical/rich-text");function G(a,b){const c={};for(const d of a)a=b(d),c[a]?c[a].push(d):c[a]=[d];return c}function I(a){a=G(a,b=>b.type);return{element:a.element,textFormat:a["text-format"],textMatch:a["text-match"]}} | ||
function aa(a){const b=I(a),c=b.textFormat.filter(d=>1===d.format.length);return()=>{const d=[];var e=n.$getRoot().getChildren();for(const f of e)e=ba(f,b.element,c,b.textMatch),null!=e&&d.push(e);return d.join("\n")}}function ba(a,b,c,d){for(const e of b)if(b=e.export(a,f=>J(f,c,d)),null!=b)return b;return n.$isElementNode(a)?J(a,c,d):null} | ||
function J(a,b,c){const d=[];a=a.getChildren();a:for(const e of a)if(n.$isLineBreakNode(e))d.push("\n");else if(n.$isTextNode(e))d.push(K(e,e.getTextContent(),b));else{for(const f of c)if(a=f.export(e,k=>J(k,b,c),(k,h)=>K(k,h,b)),null!=a){d.push(a);continue a}n.$isElementNode(e)&&d.push(J(e,b,c))}return d.join("")}function K(a,b,c){const d=new Set;for(const f of c){c=f.format[0];const k=f.tag;if(L(a,c)&&!d.has(c)){d.add(c);var e=M(a,!0);L(e,c)||(b=k+b);e=M(a,!1);L(e,c)||(b+=k)}}return b} | ||
function M(a,b){let c=b?a.getPreviousSibling():a.getNextSibling();c||(a=a.getParentOrThrow(),a.isInline()&&(c=b?a.getPreviousSibling():a.getNextSibling()));for(;c;){if(n.$isElementNode(c)){if(!c.isInline())break;a=b?c.getLastDescendant():c.getFirstDescendant();if(n.$isTextNode(a))return a;c=b?c.getPreviousSibling():c.getNextSibling()}if(n.$isTextNode(c))return c}return null}function L(a,b){return n.$isTextNode(a)&&a.hasFormat(b)}const N=/^```(\w{1,10})?\s?$/; | ||
function ca(a){const b=I(a),c=da(b.textFormat);return d=>{d=d.split("\n");const e=d.length,f=n.$getRoot();f.clear();for(let q=0;q<e;q++){var k=d[q];a:{var h=d,r=q;var u=f;var p=h[r].match(N);if(p)for(var t=r,m=h.length;++t<m;)if(h[t].match(N)){p=y.$createCodeNode(p[1]);h=n.$createTextNode(h.slice(r+1,t).join("\n"));p.append(h);u.append(p);u=[p,t];break a}u=[null,r]}const [l,g]=u;if(null!=l)q=g;else{p=f;m=b.element;u=c;t=b.textMatch;h=n.$createTextNode(k);r=n.$createParagraphNode();r.append(h);p.append(r); | ||
for(const {regExp:w,replace:v}of m)if(p=k.match(w)){h.setTextContent(k.slice(p[0].length));v(r,[h],p,!0);break}O(h,u,t)}}f.selectEnd()}} | ||
function O(a,b,c){const d=a.getTextContent();a:{var e=d.match(b.openTagsRegExp);if(null!=e)for(f of e)if(e=b.fullMatchRegExpByTag[f.replace(/^\s/,"")],null!=e&&(e=d.match(e),null!=e)){var f=e;break a}f=null}if(f){if(f[0]===d)var k=a;else{var h=f.index,r=h+f[0].length;0===h?[k,p]=a.splitText(r):[,k,p]=a.splitText(h,r)}k.setTextContent(f[2]);if(h=b.transformersByTag[f[1]])for(var u of h.format)k.hasFormat(u)||k.toggleFormat(u);k.hasFormat("code")||O(k,b,c);p&&O(p,b,c)}else a:for(b=a;b;){for(h of c)if(k= | ||
b.getTextContent().match(h.importRegExp)){var p=k.index;u=p+k[0].length;0===p?[r,b]=b.splitText(u):[,r,b]=b.splitText(p,u);h.replace(r,k);continue a}break}}function da(a){const b={},c={},d=[];for(const e of a){({tag:a}=e);b[a]=e;const f=a.replace(/(\*|\^)/g,"\\$1");d.push(f);c[a]=new RegExp(`(${f})(?![${f}\\s])(.*?[^${f}\\s])${f}(?!${f})`)}return{fullMatchRegExpByTag:c,openTagsRegExp:new RegExp("("+d.join("|")+")","g"),transformersByTag:b}} | ||
function P(a,b,c){const d=c.length;for(;b>=d;b--){const e=b-d;if(Q(a,e,c,0,d)&&" "!==a[e+d])return e}return-1}function Q(a,b,c,d,e){for(let f=0;f<e;f++)if(a[b+f]!==c[d+f])return!1;return!0} | ||
const R=a=>(b,c,d)=>{d=a(d);d.append(...c);b.replace(d);d.select(0,0)},S=a=>(b,c,d)=>{var e=b.getPreviousSibling();const f=E.$createListItemNode("check"===a?"x"===d[3]:void 0);E.$isListNode(e)&&e.getListType()===a?(e.append(f),b.remove()):(e=E.$createListNode(a,"number"===a?Number(d[2]):void 0),e.append(f),b.replace(e));f.append(...c);f.select(0,0);(b=Math.floor(d[1].length/4))&&f.setIndent(b)},T=(a,b,c)=>{const d=[];var e=a.getChildren();let f=0;for(const h of e)if(E.$isListItemNode(h)){if(1===h.getChildrenSize()&& | ||
(e=h.getFirstChild(),E.$isListNode(e))){d.push(T(e,b,c+1));continue}e=" ".repeat(4*c);var k=a.getListType();k="number"===k?`${a.getStart()+f}. `:"check"===k?`- [${h.getChecked()?"x":" "}] `:"- ";d.push(e+k+b(h));f++}return d.join("\n")},U={export:(a,b)=>{if(!F.$isHeadingNode(a))return null;const c=Number(a.getTag().slice(1));return"#".repeat(c)+" "+b(a)},regExp:/^(#{1,6})\s/,replace:R(a=>F.$createHeadingNode("h"+a[1].length)),type:"element"},V={export:(a,b)=>F.$isQuoteNode(a)?"> "+b(a):null,regExp:/^>\s/, | ||
replace:R(()=>F.$createQuoteNode()),type:"element"},W={export:a=>{if(!y.$isCodeNode(a))return null;const b=a.getTextContent();return"```"+(a.getLanguage()||"")+(b?"\n"+b:"")+"\n```"},regExp:/^```(\w{1,10})?\s/,replace:R(a=>y.$createCodeNode(a?a[1]:void 0)),type:"element"},X={export:(a,b)=>E.$isListNode(a)?T(a,b,0):null,regExp:/^(\s*)[-*+]\s/,replace:S("bullet"),type:"element"},ea={export:(a,b)=>E.$isListNode(a)?T(a,b,0):null,regExp:/^(\s*)(?:-\s)?\s?(\[(\s|x)?\])\s/i,replace:S("check"),type:"element"}, | ||
Y={export:(a,b)=>E.$isListNode(a)?T(a,b,0):null,regExp:/^(\s*)(\d{1,})\.\s/,replace:S("number"),type:"element"},fa={format:["code"],tag:"`",type:"text-format"},ha={format:["bold","italic"],tag:"***",type:"text-format"},ia={format:["bold","italic"],tag:"___",type:"text-format"},ja={format:["bold"],tag:"**",type:"text-format"},ka={format:["bold"],tag:"__",type:"text-format"},la={format:["strikethrough"],tag:"~~",type:"text-format"},ma={format:["italic"],tag:"*",type:"text-format"},na={format:["italic"], | ||
tag:"_",type:"text-format"},oa={export:(a,b,c)=>{if(!D.$isLinkNode(a))return null;b=`[${a.getTextContent()}](${a.getURL()})`;const d=a.getFirstChild();return 1===a.getChildrenSize()&&n.$isTextNode(d)?c(d,b):b},importRegExp:/(?:\[([^[]+)\])(?:\(([^(]+)\))/,regExp:/(?:\[([^[]+)\])(?:\(([^(]+)\))$/,replace:(a,b)=>{const [,c,d]=b;b=D.$createLinkNode(d);const e=n.$createTextNode(c);e.setFormat(a.getFormat());b.append(e);a.replace(b)},trigger:")",type:"text-match"},pa=[U,V,W,X,Y],qa=[fa,ha,ia,ja,ka,ma, | ||
na,la],ra=[oa],Z=[...pa,...qa,...ra];exports.$convertFromMarkdownString=function(a,b=Z){return ca(b)(a)};exports.$convertToMarkdownString=function(a=Z){return aa(a)()};exports.BOLD_ITALIC_STAR=ha;exports.BOLD_ITALIC_UNDERSCORE=ia;exports.BOLD_STAR=ja;exports.BOLD_UNDERSCORE=ka;exports.CHECK_LIST=ea;exports.CODE=W;exports.ELEMENT_TRANSFORMERS=pa;exports.HEADING=U;exports.INLINE_CODE=fa;exports.ITALIC_STAR=ma;exports.ITALIC_UNDERSCORE=na;exports.LINK=oa;exports.ORDERED_LIST=Y;exports.QUOTE=V; | ||
exports.STRIKETHROUGH=la;exports.TEXT_FORMAT_TRANSFORMERS=qa;exports.TEXT_MATCH_TRANSFORMERS=ra;exports.TRANSFORMERS=Z;exports.UNORDERED_LIST=X; | ||
exports.registerMarkdownShortcuts=function(a,b){const c=I(b),d=G(c.textFormat,({tag:f})=>f[f.length-1]),e=G(c.textMatch,({trigger:f})=>f);return a.registerUpdateListener(({tags:f,dirtyLeaves:k,editorState:h,prevEditorState:r})=>{if(!f.has("historic")){var u=h.read(n.$getSelection);f=r.read(n.$getSelection);if(n.$isRangeSelection(f)&&n.$isRangeSelection(u)&&u.isCollapsed()){r=u.anchor.key;var p=u.anchor.offset,t=h._nodeMap.get(r);n.$isTextNode(t)&&k.has(r)&&(1===p||p===f.anchor.offset+1)&&a.update(()=> | ||
{if(!t.hasFormat("code")){var m=t.getParent();if(null!==m&&!y.$isCodeNode(m)){var q=u.anchor.offset;b:{var l=c.element,g=m.getParent();if(n.$isRootNode(g)&&m.getFirstChild()===t&&(g=t.getTextContent()," "===g[q-1]))for(const {regExp:H,replace:B}of l)if((l=g.match(H))&&l[0].length===q){g=t.getNextSiblings();const [z,x]=t.splitText(q);z.remove();g=x?[x,...g]:g;B(m,g,l,!1);m=!0;break b}m=!1}if(!m){b:{l=t.getTextContent();m=e[l[q-1]];if(null!=m){q<l.length&&(l=l.slice(0,q));for(v of m)if(m=l.match(v.regExp), | ||
null!==m){l=m.index;g=l+m[0].length;var w=void 0;0===l?[w]=t.splitText(g):[,w]=t.splitText(l,g);w.selectNext();v.replace(w,m);var v=!0;break b}}v=!1}if(!v)b:{g=t.getTextContent();--q;const H=g[q];if(v=d[H])for(const B of v){var {tag:A}=B;m=A.length;const z=q-m+1;if(1<m&&!Q(g,z,A,0,m))continue;if(" "===g[z-1])continue;w=v=t;l=P(g,z,A);let x=w;for(;0>l&&(x=x.getPreviousSibling())&&!n.$isLineBreakNode(x);)n.$isTextNode(x)&&(l=x.getTextContent(),w=x,l=P(l,l.length,A));if(!(0>l||w===v&&l+m===z||(A=w.getTextContent(), | ||
0<l&&A[l-1]===H))){g=v.getTextContent();g=g.slice(0,z)+g.slice(q+1);v.setTextContent(g);g=w===v?g:A;w.setTextContent(g.slice(0,l)+g.slice(l+m));g=n.$createRangeSelection();n.$setSelection(g);q=q-m*(w===v?2:1)+1;g.anchor.set(w.__key,l,"text");g.focus.set(v.__key,q,"text");for(const C of B.format)g.hasFormat(C)||g.formatText(C);g.anchor.set(g.focus.key,g.focus.offset,g.focus.type);for(const C of B.format)g.hasFormat(C)&&g.toggleFormat(C);break b}}}}}}})}}})}; |
@@ -11,14 +11,14 @@ { | ||
"license": "MIT", | ||
"version": "0.2.5", | ||
"version": "0.2.6", | ||
"main": "LexicalMarkdown.js", | ||
"peerDependencies": { | ||
"lexical": "0.2.5" | ||
"lexical": "0.2.6" | ||
}, | ||
"dependencies": { | ||
"@lexical/utils": "0.2.5", | ||
"@lexical/code": "0.2.5", | ||
"@lexical/text": "0.2.5", | ||
"@lexical/rich-text": "0.2.5", | ||
"@lexical/list": "0.2.5", | ||
"@lexical/link": "0.2.5" | ||
"@lexical/utils": "0.2.6", | ||
"@lexical/code": "0.2.6", | ||
"@lexical/text": "0.2.6", | ||
"@lexical/rich-text": "0.2.6", | ||
"@lexical/list": "0.2.6", | ||
"@lexical/link": "0.2.6" | ||
}, | ||
@@ -25,0 +25,0 @@ "repository": { |
# `@lexical/markdown` | ||
This package contains markdown helpers and functionality for Lexical. | ||
This package contains markdown helpers for Lexical: import, export and shortcuts. | ||
The package focuses on markdown conversion. | ||
## Import and export | ||
```js | ||
import { | ||
$convertFromMarkdownString, | ||
$convertToMarkdownString, | ||
TRANSFORMERS, | ||
} from '@lexical/markdown'; | ||
The package has 3 main functions: | ||
editor.update(() => { | ||
const markdown = $convertToMarkdownString(TRANSFORMERS); | ||
... | ||
}); | ||
1. It imports a string and converts into Lexical and then converts markup within the imported nodes. See convertFromPlainTextUtils.js | ||
2. It exports Lexical to a plain text with markup. See convertToPlainTextUtils.js | ||
3. It autoformats newly typed text by converting the markdown + some trigger to the appropriate stylized text. See autoFormatUtils.js | ||
editor.update(() => { | ||
$convertFromMarkdownString(markdown, TRANSFORMERS); | ||
}); | ||
``` | ||
It can also be used for initializing editor's state from markdown string. Here's an example with react `<RichTextPlugin>` | ||
```jsx | ||
<LexicalComposer> | ||
<RichTextPlugin initialEditorState={() => { | ||
$convertFromMarkdownString(markdown, TRANSFORMERS); | ||
}} /> | ||
</LexicalComposer> | ||
``` | ||
## Shortcuts | ||
Can use `<LexicalMarkdownShortcutPlugin>` if using React | ||
```jsx | ||
import { TRANSFORMERS } from '@lexical/markdown'; | ||
import LexicalMarkdownShortcutPlugin from '@lexical/react/LexicalMarkdownShortcutPlugin'; | ||
<LexicalComposer> | ||
<LexicalMarkdownShortcutPlugin transformers={TRANSFORMERS} /> | ||
</LexicalComposer> | ||
``` | ||
Or `registerMarkdownShortcuts` to register it manually: | ||
```js | ||
import { | ||
registerMarkdownShortcuts, | ||
TRANSFORMERS, | ||
} from '@lexical/markdown'; | ||
const editor = createEditor(...); | ||
registerMarkdownShortcuts(editor, TRANSFORMERS); | ||
``` | ||
## Transformers | ||
Markdown functionality relies on transformers configuration. It's an array of objects that define how certain text or nodes | ||
are processed during import, export or while typing. `@lexical/markdown` package provides set of built-in transformers: | ||
```js | ||
// Element transformers | ||
UNORDERED_LIST | ||
CODE | ||
HEADING | ||
ORDERED_LIST | ||
QUOTE | ||
// Text format transformers | ||
BOLD_ITALIC_STAR | ||
BOLD_ITALIC_UNDERSCORE | ||
BOLD_STAR | ||
BOLD_UNDERSCORE | ||
INLINE_CODE | ||
ITALIC_STAR | ||
ITALIC_UNDERSCORE | ||
STRIKETHROUGH | ||
// Text match transformers | ||
LINK | ||
``` | ||
And bundles of commonly used transformers: | ||
- `TRANSFORMERS` - all built-in transformers | ||
- `ELEMENT_TRANSFORMERS` - all built-in element transformers | ||
- `TEXT_FORMAT_TRANSFORMERS` - all built-in text format trasnformers | ||
- `TEXT_MATCH_TRANSFORMERS` - all built-in text match trasnformers | ||
Transformers are explicitly passed to markdown API allowing application-specific subset of markdown or custom transformers. | ||
There're three types of transformers: | ||
- **Element transformer** handles top level elements (lists, headings, quotes, tables or code blocks) | ||
- **Text format transformer** applies text range formats defined in `TextFormatType` (bold, italic, underline, strikethrough, code, subscript and superscript) | ||
- **Text match transformer** relies on matching leaf text node content | ||
See `MarkdownTransformers.js` for transformer implementation examples |
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
94
48099
908
1
+ Added@lexical/clipboard@0.2.6(transitive)
+ Added@lexical/code@0.2.6(transitive)
+ Added@lexical/link@0.2.6(transitive)
+ Added@lexical/list@0.2.6(transitive)
+ Added@lexical/rich-text@0.2.6(transitive)
+ Added@lexical/selection@0.2.6(transitive)
+ Added@lexical/table@0.2.6(transitive)
+ Added@lexical/text@0.2.6(transitive)
+ Added@lexical/utils@0.2.6(transitive)
+ Addedlexical@0.2.6(transitive)
- Removed@lexical/clipboard@0.2.5(transitive)
- Removed@lexical/code@0.2.5(transitive)
- Removed@lexical/link@0.2.5(transitive)
- Removed@lexical/list@0.2.5(transitive)
- Removed@lexical/rich-text@0.2.5(transitive)
- Removed@lexical/selection@0.2.5(transitive)
- Removed@lexical/table@0.2.5(transitive)
- Removed@lexical/text@0.2.5(transitive)
- Removed@lexical/utils@0.2.5(transitive)
- Removedlexical@0.2.5(transitive)
Updated@lexical/code@0.2.6
Updated@lexical/link@0.2.6
Updated@lexical/list@0.2.6
Updated@lexical/rich-text@0.2.6
Updated@lexical/text@0.2.6
Updated@lexical/utils@0.2.6