fountain-js
Advanced tools
Comparing version 1.2.2 to 1.2.3
@@ -13,2 +13,22 @@ # Change Log | ||
## [1.2.3] - 2023-11-13 | ||
### Added | ||
- Keyword `Revision(s)` is now a recognized title page keyword. | ||
### Fixed | ||
- Fixed issue with `end_of_lines` rule not splitting properly when new lines have horizontal whitespace. | ||
- Title page is only recognized at the beginning of script (leading whitespace is ignored), any other title pages after the start becomes action as per Fountain specification. | ||
- Fixed longstanding issue (even from the original version) where any additional lines after certain tokens like scene headings, for example `EXT. BRICK'S GARAGE - DAY\nTrailing action...`, is consumed and lost by the lexer. Now both token and additional lines fall to action instead since these specific tokens require newliens after them to be valid. | ||
### Changed | ||
- Rule `end_of_lines` is now `blank_lines`. | ||
- `Scanner` class is now `Lexer` and is importable. | ||
- `Lexer` and `InlineLexer` classes are now static. | ||
- `tokenize` method in `Lexer` class now returns a tuple composed title page tokens and script tokens for quicker access, no need to filter by `is_title` anymore. | ||
- `Fountain` class property `tokens` still presents all tokens in a single array as before (no changes). One can still filter by `is_title` as desired as previously. | ||
## [1.2.2] - 2023-10-28 | ||
@@ -15,0 +35,0 @@ |
@@ -12,7 +12,4 @@ import { Token } from './token'; | ||
tokens: Token[]; | ||
private scanner; | ||
private inlineLex; | ||
constructor(); | ||
parse(script: string, getTokens?: boolean): Script; | ||
to_html(token: Token): string | undefined; | ||
} |
@@ -1,9 +0,4 @@ | ||
import { Scanner } from './scanner'; | ||
import { InlineLexer } from './lexer'; | ||
import { Lexer, InlineLexer } from './lexer'; | ||
import { unEscapeHTML } from './utilities'; | ||
export class Fountain { | ||
constructor() { | ||
this.scanner = new Scanner; | ||
this.inlineLex = new InlineLexer; | ||
} | ||
parse(script, getTokens) { | ||
@@ -17,7 +12,8 @@ // throw an error if given script source isn't a string | ||
let title = ''; | ||
this.tokens = this.scanner.tokenize(script); | ||
const titleToken = this.tokens.find(token => token.type === 'title'); | ||
const [titlePageTokens, scriptTokens] = Lexer.tokenize(script); | ||
this.tokens = titlePageTokens.concat(scriptTokens); | ||
const titleToken = titlePageTokens.find(token => token.type === 'title'); | ||
if (titleToken === null || titleToken === void 0 ? void 0 : titleToken.text) { | ||
// lexes any inlines on the title then removes any HTML / line breaks | ||
title = unEscapeHTML(this.inlineLex.reconstruct(titleToken.text) | ||
title = unEscapeHTML(InlineLexer.reconstruct(titleToken.text) | ||
.replace('<br />', ' ') | ||
@@ -29,5 +25,5 @@ .replace(/<(?:.|\n)*?>/g, '')); | ||
html: { | ||
title_page: this.tokens.filter(token => token.is_title) | ||
title_page: titlePageTokens | ||
.map(token => this.to_html(token)).join(''), | ||
script: this.tokens.filter(token => !token.is_title) | ||
script: scriptTokens | ||
.map(token => this.to_html(token)).join('') | ||
@@ -47,15 +43,17 @@ }, | ||
if (token === null || token === void 0 ? void 0 : token.text) { | ||
lexedText = this.inlineLex.reconstruct(token.text, token.type === 'action'); | ||
lexedText = InlineLexer | ||
.reconstruct(token.text, token.type === 'action'); | ||
} | ||
switch (token.type) { | ||
case 'title': return `<h1>${lexedText}</h1>`; | ||
case 'credit': return `<p class="credit">${lexedText}</p>`; | ||
case 'author': return `<p class="authors">${lexedText}</p>`; | ||
case 'author': | ||
case 'authors': return `<p class="authors">${lexedText}</p>`; | ||
case 'source': return `<p class="source">${lexedText}</p>`; | ||
case 'notes': return `<p class="notes">${lexedText}</p>`; | ||
case 'draft_date': return `<p class="draft-date">${lexedText}</p>`; | ||
case 'date': return `<p class="date">${lexedText}</p>`; | ||
case 'contact': return `<p class="contact">${lexedText}</p>`; | ||
case 'copyright': return `<p class="copyright">${lexedText}</p>`; | ||
case 'contact': | ||
case 'copyright': | ||
case 'credit': | ||
case 'date': | ||
case 'draft_date': | ||
case 'notes': | ||
case 'revision': | ||
case 'source': return `<p class="${token.type.replace(/_/g, '-')}">${lexedText}</p>`; | ||
case 'scene_heading': return `<h3${(token.scene_number ? ` id="${token.scene_number}">` : `>`) + lexedText}</h3>`; | ||
@@ -62,0 +60,0 @@ case 'transition': return `<h2>${lexedText}</h2>`; |
export * from './fountain'; | ||
export { Token } from './token'; | ||
export { FountainTypes, rules } from './rules'; | ||
export { InlineTypes, InlineLexer } from './lexer'; | ||
export { Lexer, InlineTypes, InlineLexer } from './lexer'; |
export * from './fountain'; | ||
export { rules } from './rules'; | ||
export { InlineLexer } from './lexer'; | ||
export { Lexer, InlineLexer } from './lexer'; | ||
//# sourceMappingURL=index.js.map |
@@ -0,5 +1,38 @@ | ||
import { Token } from './token'; | ||
export declare type InlineTypes = 'note' | 'line_break' | 'bold_italic_underline' | 'bold_underline' | 'italic_underline' | 'bold_italic' | 'bold' | 'italic' | 'underline' | 'escape'; | ||
export declare class Lexer { | ||
private static lastLineWasDualDialogue; | ||
private static scanIndex; | ||
/** | ||
* Replaces boneyard with an empty string. If a boneyard token exists | ||
* at the start of a line, it preserves token continuity by adding blank lines | ||
* for the lexer to parse. | ||
* @param match | ||
* @returns {string} empty string or blank lines | ||
*/ | ||
static boneyardStripper(match: string): string; | ||
/** | ||
* Tokenizes the script. | ||
* @param {string} script | ||
* @returns {[Token<Array>, Token<Array>]} tuple of title page and script token arrays | ||
*/ | ||
static tokenize(script: string): [Token[], Token[]]; | ||
/** | ||
* Tokenizes the title page. Tests for title page keywords then lexes going forward. | ||
* If no keywords are found and empty array is returned. | ||
* @param {string} source | ||
* @returns {Token<Array>} | ||
*/ | ||
static tokenizeTitlePage(source: string): Token[]; | ||
/** | ||
* Tokenizes all Fountain tokens except Title Page. Splits the script based on | ||
* blank lines then lexes in reverse to account for dual dialogue tokens. | ||
* @param {string} source | ||
* @returns {Token<Array>} | ||
*/ | ||
static tokenizeScript(source: string): Token[]; | ||
} | ||
export declare class InlineLexer { | ||
inline: Record<InlineTypes, string>; | ||
reconstruct(line: string, escapeSpaces?: boolean): string; | ||
static inline: Record<InlineTypes, string>; | ||
static reconstruct(line: string, escapeSpaces?: boolean): string; | ||
} |
import { rules } from './rules'; | ||
import { escapeHTML } from './utilities'; | ||
import { ActionToken, CenteredToken, DialogueBlock, LyricsToken, NoteToken, PageBreakToken, SceneHeadingToken, SectionToken, SpacesToken, SynopsisToken, TitlePageBlock, TransitionToken } from './token'; | ||
export class Lexer { | ||
/** | ||
* Replaces boneyard with an empty string. If a boneyard token exists | ||
* at the start of a line, it preserves token continuity by adding blank lines | ||
* for the lexer to parse. | ||
* @param match | ||
* @returns {string} empty string or blank lines | ||
*/ | ||
static boneyardStripper(match) { | ||
const endAtStrStart = /^[^\S\n]*\*\//m; | ||
let boneyardEnd = ''; | ||
if (endAtStrStart.test(match)) { | ||
boneyardEnd = '\n\n'; | ||
} | ||
return boneyardEnd; | ||
} | ||
/** | ||
* Tokenizes the script. | ||
* @param {string} script | ||
* @returns {[Token<Array>, Token<Array>]} tuple of title page and script token arrays | ||
*/ | ||
static tokenize(script) { | ||
let source = script | ||
.replace(rules.boneyard, this.boneyardStripper) | ||
.replace(/\r\n|\r/g, '\n'); // convert carriage return / returns to newline | ||
this.scanIndex = 0; | ||
const titlePageTokens = this.tokenizeTitlePage(source); | ||
source = source.substring(this.scanIndex); | ||
const scriptTokens = Lexer.tokenizeScript(source); | ||
return [titlePageTokens, scriptTokens]; | ||
} | ||
/** | ||
* Tokenizes the title page. Tests for title page keywords then lexes going forward. | ||
* If no keywords are found and empty array is returned. | ||
* @param {string} source | ||
* @returns {Token<Array>} | ||
*/ | ||
static tokenizeTitlePage(source) { | ||
let titlePageTokens = []; | ||
if (TitlePageBlock.matchedBy(source)) { | ||
const titlePageBlock = new TitlePageBlock(source); | ||
this.scanIndex = titlePageBlock.scanIndex; | ||
titlePageTokens = titlePageBlock.addTo(titlePageTokens); | ||
} | ||
return titlePageTokens; | ||
} | ||
/** | ||
* Tokenizes all Fountain tokens except Title Page. Splits the script based on | ||
* blank lines then lexes in reverse to account for dual dialogue tokens. | ||
* @param {string} source | ||
* @returns {Token<Array>} | ||
*/ | ||
static tokenizeScript(source) { | ||
const lines = source | ||
.split(rules.blank_lines) | ||
.reverse(); | ||
const scriptTokens = lines.reduce((previous, line) => { | ||
if (!line) { | ||
return previous; | ||
} | ||
/** spaces */ | ||
if (SpacesToken.matchedBy(line)) { | ||
return new SpacesToken().addTo(previous); | ||
} | ||
/** scene headings */ | ||
if (SceneHeadingToken.matchedBy(line)) { | ||
return new SceneHeadingToken(line).addTo(previous); | ||
} | ||
/** centered */ | ||
if (CenteredToken.matchedBy(line)) { | ||
return new CenteredToken(line).addTo(previous); | ||
} | ||
/** transitions */ | ||
if (TransitionToken.matchedBy(line)) { | ||
return new TransitionToken(line).addTo(previous); | ||
} | ||
/** dialogue blocks - characters, parentheticals and dialogue */ | ||
if (DialogueBlock.matchedBy(line)) { | ||
const dialogueBlock = new DialogueBlock(line, this.lastLineWasDualDialogue); | ||
this.lastLineWasDualDialogue = dialogueBlock.dual; | ||
return dialogueBlock.addTo(previous); | ||
} | ||
/** section */ | ||
if (SectionToken.matchedBy(line)) { | ||
return new SectionToken(line).addTo(previous); | ||
} | ||
/** synopsis */ | ||
if (SynopsisToken.matchedBy(line)) { | ||
return new SynopsisToken(line).addTo(previous); | ||
} | ||
/** notes */ | ||
if (NoteToken.matchedBy(line)) { | ||
return new NoteToken(line).addTo(previous); | ||
} | ||
/** lyrics */ | ||
if (LyricsToken.matchedBy(line)) { | ||
return new LyricsToken(line).addTo(previous); | ||
} | ||
/** page breaks */ | ||
if (PageBreakToken.matchedBy(line)) { | ||
return new PageBreakToken().addTo(previous); | ||
} | ||
/** action */ | ||
return new ActionToken(line).addTo(previous); | ||
}, []); | ||
return scriptTokens.reverse(); | ||
} | ||
} | ||
export class InlineLexer { | ||
constructor() { | ||
this.inline = { | ||
note: '<!-- $1 -->', | ||
line_break: '<br />', | ||
bold_italic_underline: '<span class="bold italic underline">$1</span>', | ||
bold_underline: '<span class="bold underline">$1</span>', | ||
italic_underline: '<span class="italic underline">$1</span>', | ||
bold_italic: '<span class="bold italic">$1</span>', | ||
bold: '<span class="bold">$1</span>', | ||
italic: '<span class="italic">$1</span>', | ||
underline: '<span class="underline">$1</span>', | ||
escape: '$1' | ||
}; | ||
} | ||
reconstruct(line, escapeSpaces = false) { | ||
static reconstruct(line, escapeSpaces = false) { | ||
const styles = [ | ||
@@ -48,2 +143,14 @@ 'bold_italic_underline', | ||
} | ||
InlineLexer.inline = { | ||
note: '<!-- $1 -->', | ||
line_break: '<br />', | ||
bold_italic_underline: '<span class="bold italic underline">$1</span>', | ||
bold_underline: '<span class="bold underline">$1</span>', | ||
italic_underline: '<span class="italic underline">$1</span>', | ||
bold_italic: '<span class="bold italic">$1</span>', | ||
bold: '<span class="bold">$1</span>', | ||
italic: '<span class="italic">$1</span>', | ||
underline: '<span class="underline">$1</span>', | ||
escape: '$1' | ||
}; | ||
//# sourceMappingURL=lexer.js.map |
@@ -1,2 +0,2 @@ | ||
export declare type FountainTypes = 'title_page' | 'scene_heading' | 'scene_number' | 'transition' | 'dialogue' | 'parenthetical' | 'action' | 'centered' | 'lyrics' | 'synopsis' | 'section' | 'note' | 'note_inline' | 'boneyard' | 'page_break' | 'line_break' | 'bold_italic_underline' | 'bold_underline' | 'italic_underline' | 'bold_italic' | 'bold' | 'italic' | 'underline' | 'escape' | 'blank_line' | 'end_of_lines'; | ||
export declare type FountainTypes = 'title_page' | 'scene_heading' | 'scene_number' | 'transition' | 'dialogue' | 'parenthetical' | 'action' | 'centered' | 'lyrics' | 'synopsis' | 'section' | 'note' | 'note_inline' | 'boneyard' | 'page_break' | 'line_break' | 'bold_italic_underline' | 'bold_underline' | 'italic_underline' | 'bold_italic' | 'bold' | 'italic' | 'underline' | 'escape' | 'blank_lines'; | ||
export declare const rules: Record<FountainTypes, RegExp>; |
export const rules = { | ||
title_page: /^\s*((?:title|credit|authors?|source|notes|draft date|date|contact|copyright)\:)/gim, | ||
scene_heading: /^\s*((?:\*{0,3}_?)?(?:(?:int|i)\.?\/(?:ext|e)|int|ext|est)[. ].+)|^\s*\.(?!\.+)(\S.*)/i, | ||
scene_number: /( *#(.+)# *)/, | ||
transition: /^\s*((?:FADE (?:TO BLACK|OUT)|CUT TO BLACK)\.|.+ TO\:)\s*$|^\s*> *(.+)$/, | ||
dialogue: /(?!^\s*\\@|^\s*!)(?!^\s*[0-9 _*]+(?:\(.*\))?[*_]*(?:\^?)?\s*\n)(^\s*(?:@[^^()\n]+|[^^()\na-z]+)(?: *\(.*\))?[ *_]*)(\^?)?\s*\n(?!\n+)([\s\S]+)/, | ||
title_page: /^\s*((?:title|credit|authors?|source|notes|draft ?date|date|contact|copyright|revisions?)\:)(?=[^\S\n]*(?:\n(?: {3,}|\t))?\S.*)/i, | ||
scene_heading: /^\s*((?:\*{0,3}_?)?(?:(?:int|i)\.?\/(?:ext|e)|int|ext|est)[. ].+$)|^\s*\.(?!\.+)(\S.*)$/i, | ||
scene_number: /\s*#([\w.-]+?)#\s*$/, | ||
transition: /^\s*((?:FADE (?:TO BLACK|OUT)|CUT TO BLACK)\.|.+ TO\:)[^\S\n]*$|^\s*> *(.+)$/, | ||
dialogue: /(?!^\s*\\@|^\s*[!.>~=#])(?!^\s*[0-9 _*]+(?:\(.*\))?[*_]*(?:\^?)?\s*\n)(^\s*(?:@[^^()\n]+|[^^()\na-z]+(?<!(?:BLACK|OUT)\.|TO:\s*))(?:\(.*\))?[ *_]*)(\^?)?\s*\n(?!\n+)([\s\S]+)/, | ||
parenthetical: /^ *(?:(?<u1>_{0,1})(?<s1>\*{0,3})(?=.+\k<s1>\k<u1>)|(?<s2>\*{0,3})(?<u2>_{0,1})(?=.+\k<u2>\k<s2>))(\(.+?\))(\k<s1>\k<u1>|\k<u2>\k<s2>) *$/, | ||
action: /^(.+)/g, | ||
centered: /^\s*>.+<[^\S\r\n]*(?:\s*>.+<[^\S\r\n]*)*/g, | ||
lyrics: /^\s*~(?! ).+(?:\n\s*~(?! ).+)*/, | ||
section: /^\s*(#+) *(.*)/, | ||
synopsis: /^\s*=(?!=+) *(.*)/, | ||
centered: /^\s*>.+<[^\S\r\n]*(?:\s*>.+<[^\S\r\n]*)*$/, | ||
lyrics: /^\s*~(?! ).+(?:\n\s*~(?! ).+)*$/, | ||
section: /^\s*(#+) *(.*)$/, | ||
synopsis: /^\s*=(?!=+) *(.*)$/, | ||
note: /^\[{2}(?!\[+)(.+)]{2}(?!\[+)$/, | ||
@@ -26,5 +26,4 @@ note_inline: /\[{2}(?!\[+)([\s\S]+?)]{2}(?!\[+)/g, | ||
escape: /\\([@#!*_$~`+=.><\\\/])/g, | ||
blank_line: /^(?: *(?:\n|$))+/, | ||
end_of_lines: /(?:\n|$){2,}/g | ||
blank_lines: /\n(?:(?! {2}\n)(?:[^\S\n]*| {3,}[^\S\n]*)(?:\n|$))+|^[^\S\n]*(?:\n|$)/g | ||
}; | ||
//# sourceMappingURL=rules.js.map |
@@ -12,2 +12,3 @@ export interface Token { | ||
tokens: Token[]; | ||
scanIndex: number; | ||
addTo(tokens: Token[]): Token[]; | ||
@@ -17,5 +18,6 @@ } | ||
readonly tokens: Token[]; | ||
constructor(line: string); | ||
readonly scanIndex: number; | ||
constructor(source: string); | ||
addTo(tokens: Token[]): Token[]; | ||
static matchedBy(line: string): boolean; | ||
static matchedBy(source: string): boolean; | ||
} | ||
@@ -26,3 +28,3 @@ export declare class TitlePageToken implements Token { | ||
readonly text: string; | ||
constructor(item: string); | ||
constructor(pair: string); | ||
addTo(tokens: Token[]): Token[]; | ||
@@ -55,3 +57,3 @@ } | ||
readonly dual: boolean; | ||
readonly too_short: boolean; | ||
readonly scanIndex: number; | ||
constructor(line: string, dual: boolean); | ||
@@ -58,0 +60,0 @@ addTo(tokens: Token[]): Token[]; |
import { rules } from './rules'; | ||
export class TitlePageBlock { | ||
constructor(line) { | ||
constructor(source) { | ||
this.tokens = []; | ||
const match = line | ||
.replace(/^([^\n]+:)/gm, '\n$1') | ||
.split(rules.end_of_lines) | ||
.reverse(); | ||
this.tokens = match.reduce((previous, item) => new TitlePageToken(item).addTo(previous), []); | ||
const titlePageBlock = /^\s*(?:[\w ]+(?<!\\)\:[^\S\n]*(?:(?:\n(?: {3}|\t))?[^\S\n]*\S.*)+(?:\n|$))+/; | ||
const keyValuePair = /^\s*[\w ]+(?<!\\)\:[^\S\n]*(?:(?:\n(?![\w ]+\:)(?: {3}|\t))?[^\S\n]*\S.*)+(?:\n|$)/; | ||
let scanPosition = 0; | ||
const match = source.match(titlePageBlock); | ||
if (match) { | ||
let titlePageData = match[0]; | ||
this.scanIndex = titlePageData.length; | ||
while (scanPosition < this.scanIndex) { | ||
const pair = titlePageData.match(keyValuePair); | ||
if (pair) { | ||
this.tokens = new TitlePageToken(pair[0]).addTo(this.tokens); | ||
titlePageData = titlePageData.substring(pair[0].length); | ||
scanPosition += pair[0].length; | ||
} | ||
} | ||
} | ||
} | ||
@@ -14,12 +25,12 @@ addTo(tokens) { | ||
} | ||
static matchedBy(line) { | ||
return rules.title_page.test(line); | ||
static matchedBy(source) { | ||
return rules.title_page.test(source); | ||
} | ||
} | ||
export class TitlePageToken { | ||
constructor(item) { | ||
constructor(pair) { | ||
this.is_title = true; | ||
const [key, value] = item.split(/\:\n*/, 2); | ||
const [key, delimeter, value] = pair.split(/(\:[^\S\n]*\n?)/, 3); | ||
this.type = key.trim().toLowerCase().replace(/ /g, '_'); | ||
this.text = value.replace(/^\s*/gm, ''); | ||
this.text = value.replace(/^\s*|\s*$/gm, ''); | ||
} | ||
@@ -35,7 +46,7 @@ addTo(tokens) { | ||
if (match) { | ||
this.text = match[1] || match[2]; | ||
this.text = (match[1] || match[2]).trim(); | ||
} | ||
const meta = this.text.match(rules.scene_number); | ||
if (meta) { | ||
this.scene_number = meta[2]; | ||
this.scene_number = meta[1]; | ||
this.text = this.text.replace(rules.scene_number, ''); | ||
@@ -71,3 +82,3 @@ } | ||
if (match) { | ||
this.text = match[1] || match[2]; | ||
this.text = (match[1] || match[2]).trim(); | ||
} | ||
@@ -87,2 +98,3 @@ } | ||
if (match) { | ||
this.scanIndex = match.length; | ||
let name = match[1].trim(); | ||
@@ -279,3 +291,3 @@ // iterating from the bottom up, so push dialogue blocks in reverse order | ||
static matchedBy(line) { | ||
return rules.blank_line.test(line); | ||
return rules.blank_lines.test(line); | ||
} | ||
@@ -286,3 +298,4 @@ } | ||
this.type = 'action'; | ||
this.text = line.replace(/^(\s*)!(?! )/gm, '$1') | ||
this.text = line | ||
.replace(/^(\s*)!(?! )/gm, '$1') | ||
.replace(/^( *)(\t+)/gm, (_, leading, tabs) => { | ||
@@ -289,0 +302,0 @@ return leading + ' '.repeat(tabs.length); |
@@ -12,7 +12,4 @@ import { Token } from './token'; | ||
tokens: Token[]; | ||
private scanner; | ||
private inlineLex; | ||
constructor(); | ||
parse(script: string, getTokens?: boolean): Script; | ||
to_html(token: Token): string | undefined; | ||
} |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.Fountain = void 0; | ||
const scanner_1 = require("./scanner"); | ||
const lexer_1 = require("./lexer"); | ||
const utilities_1 = require("./utilities"); | ||
class Fountain { | ||
constructor() { | ||
this.scanner = new scanner_1.Scanner; | ||
this.inlineLex = new lexer_1.InlineLexer; | ||
} | ||
parse(script, getTokens) { | ||
@@ -20,7 +15,8 @@ // throw an error if given script source isn't a string | ||
let title = ''; | ||
this.tokens = this.scanner.tokenize(script); | ||
const titleToken = this.tokens.find(token => token.type === 'title'); | ||
const [titlePageTokens, scriptTokens] = lexer_1.Lexer.tokenize(script); | ||
this.tokens = titlePageTokens.concat(scriptTokens); | ||
const titleToken = titlePageTokens.find(token => token.type === 'title'); | ||
if (titleToken === null || titleToken === void 0 ? void 0 : titleToken.text) { | ||
// lexes any inlines on the title then removes any HTML / line breaks | ||
title = utilities_1.unEscapeHTML(this.inlineLex.reconstruct(titleToken.text) | ||
title = utilities_1.unEscapeHTML(lexer_1.InlineLexer.reconstruct(titleToken.text) | ||
.replace('<br />', ' ') | ||
@@ -32,5 +28,5 @@ .replace(/<(?:.|\n)*?>/g, '')); | ||
html: { | ||
title_page: this.tokens.filter(token => token.is_title) | ||
title_page: titlePageTokens | ||
.map(token => this.to_html(token)).join(''), | ||
script: this.tokens.filter(token => !token.is_title) | ||
script: scriptTokens | ||
.map(token => this.to_html(token)).join('') | ||
@@ -50,15 +46,17 @@ }, | ||
if (token === null || token === void 0 ? void 0 : token.text) { | ||
lexedText = this.inlineLex.reconstruct(token.text, token.type === 'action'); | ||
lexedText = lexer_1.InlineLexer | ||
.reconstruct(token.text, token.type === 'action'); | ||
} | ||
switch (token.type) { | ||
case 'title': return `<h1>${lexedText}</h1>`; | ||
case 'credit': return `<p class="credit">${lexedText}</p>`; | ||
case 'author': return `<p class="authors">${lexedText}</p>`; | ||
case 'author': | ||
case 'authors': return `<p class="authors">${lexedText}</p>`; | ||
case 'source': return `<p class="source">${lexedText}</p>`; | ||
case 'notes': return `<p class="notes">${lexedText}</p>`; | ||
case 'draft_date': return `<p class="draft-date">${lexedText}</p>`; | ||
case 'date': return `<p class="date">${lexedText}</p>`; | ||
case 'contact': return `<p class="contact">${lexedText}</p>`; | ||
case 'copyright': return `<p class="copyright">${lexedText}</p>`; | ||
case 'contact': | ||
case 'copyright': | ||
case 'credit': | ||
case 'date': | ||
case 'draft_date': | ||
case 'notes': | ||
case 'revision': | ||
case 'source': return `<p class="${token.type.replace(/_/g, '-')}">${lexedText}</p>`; | ||
case 'scene_heading': return `<h3${(token.scene_number ? ` id="${token.scene_number}">` : `>`) + lexedText}</h3>`; | ||
@@ -65,0 +63,0 @@ case 'transition': return `<h2>${lexedText}</h2>`; |
export * from './fountain'; | ||
export { Token } from './token'; | ||
export { FountainTypes, rules } from './rules'; | ||
export { InlineTypes, InlineLexer } from './lexer'; | ||
export { Lexer, InlineTypes, InlineLexer } from './lexer'; |
@@ -17,3 +17,4 @@ "use strict"; | ||
var lexer_1 = require("./lexer"); | ||
Object.defineProperty(exports, "Lexer", { enumerable: true, get: function () { return lexer_1.Lexer; } }); | ||
Object.defineProperty(exports, "InlineLexer", { enumerable: true, get: function () { return lexer_1.InlineLexer; } }); | ||
//# sourceMappingURL=index.js.map |
@@ -0,5 +1,38 @@ | ||
import { Token } from './token'; | ||
export declare type InlineTypes = 'note' | 'line_break' | 'bold_italic_underline' | 'bold_underline' | 'italic_underline' | 'bold_italic' | 'bold' | 'italic' | 'underline' | 'escape'; | ||
export declare class Lexer { | ||
private static lastLineWasDualDialogue; | ||
private static scanIndex; | ||
/** | ||
* Replaces boneyard with an empty string. If a boneyard token exists | ||
* at the start of a line, it preserves token continuity by adding blank lines | ||
* for the lexer to parse. | ||
* @param match | ||
* @returns {string} empty string or blank lines | ||
*/ | ||
static boneyardStripper(match: string): string; | ||
/** | ||
* Tokenizes the script. | ||
* @param {string} script | ||
* @returns {[Token<Array>, Token<Array>]} tuple of title page and script token arrays | ||
*/ | ||
static tokenize(script: string): [Token[], Token[]]; | ||
/** | ||
* Tokenizes the title page. Tests for title page keywords then lexes going forward. | ||
* If no keywords are found and empty array is returned. | ||
* @param {string} source | ||
* @returns {Token<Array>} | ||
*/ | ||
static tokenizeTitlePage(source: string): Token[]; | ||
/** | ||
* Tokenizes all Fountain tokens except Title Page. Splits the script based on | ||
* blank lines then lexes in reverse to account for dual dialogue tokens. | ||
* @param {string} source | ||
* @returns {Token<Array>} | ||
*/ | ||
static tokenizeScript(source: string): Token[]; | ||
} | ||
export declare class InlineLexer { | ||
inline: Record<InlineTypes, string>; | ||
reconstruct(line: string, escapeSpaces?: boolean): string; | ||
static inline: Record<InlineTypes, string>; | ||
static reconstruct(line: string, escapeSpaces?: boolean): string; | ||
} |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.InlineLexer = void 0; | ||
exports.InlineLexer = exports.Lexer = void 0; | ||
const rules_1 = require("./rules"); | ||
const utilities_1 = require("./utilities"); | ||
const token_1 = require("./token"); | ||
class Lexer { | ||
/** | ||
* Replaces boneyard with an empty string. If a boneyard token exists | ||
* at the start of a line, it preserves token continuity by adding blank lines | ||
* for the lexer to parse. | ||
* @param match | ||
* @returns {string} empty string or blank lines | ||
*/ | ||
static boneyardStripper(match) { | ||
const endAtStrStart = /^[^\S\n]*\*\//m; | ||
let boneyardEnd = ''; | ||
if (endAtStrStart.test(match)) { | ||
boneyardEnd = '\n\n'; | ||
} | ||
return boneyardEnd; | ||
} | ||
/** | ||
* Tokenizes the script. | ||
* @param {string} script | ||
* @returns {[Token<Array>, Token<Array>]} tuple of title page and script token arrays | ||
*/ | ||
static tokenize(script) { | ||
let source = script | ||
.replace(rules_1.rules.boneyard, this.boneyardStripper) | ||
.replace(/\r\n|\r/g, '\n'); // convert carriage return / returns to newline | ||
this.scanIndex = 0; | ||
const titlePageTokens = this.tokenizeTitlePage(source); | ||
source = source.substring(this.scanIndex); | ||
const scriptTokens = Lexer.tokenizeScript(source); | ||
return [titlePageTokens, scriptTokens]; | ||
} | ||
/** | ||
* Tokenizes the title page. Tests for title page keywords then lexes going forward. | ||
* If no keywords are found and empty array is returned. | ||
* @param {string} source | ||
* @returns {Token<Array>} | ||
*/ | ||
static tokenizeTitlePage(source) { | ||
let titlePageTokens = []; | ||
if (token_1.TitlePageBlock.matchedBy(source)) { | ||
const titlePageBlock = new token_1.TitlePageBlock(source); | ||
this.scanIndex = titlePageBlock.scanIndex; | ||
titlePageTokens = titlePageBlock.addTo(titlePageTokens); | ||
} | ||
return titlePageTokens; | ||
} | ||
/** | ||
* Tokenizes all Fountain tokens except Title Page. Splits the script based on | ||
* blank lines then lexes in reverse to account for dual dialogue tokens. | ||
* @param {string} source | ||
* @returns {Token<Array>} | ||
*/ | ||
static tokenizeScript(source) { | ||
const lines = source | ||
.split(rules_1.rules.blank_lines) | ||
.reverse(); | ||
const scriptTokens = lines.reduce((previous, line) => { | ||
if (!line) { | ||
return previous; | ||
} | ||
/** spaces */ | ||
if (token_1.SpacesToken.matchedBy(line)) { | ||
return new token_1.SpacesToken().addTo(previous); | ||
} | ||
/** scene headings */ | ||
if (token_1.SceneHeadingToken.matchedBy(line)) { | ||
return new token_1.SceneHeadingToken(line).addTo(previous); | ||
} | ||
/** centered */ | ||
if (token_1.CenteredToken.matchedBy(line)) { | ||
return new token_1.CenteredToken(line).addTo(previous); | ||
} | ||
/** transitions */ | ||
if (token_1.TransitionToken.matchedBy(line)) { | ||
return new token_1.TransitionToken(line).addTo(previous); | ||
} | ||
/** dialogue blocks - characters, parentheticals and dialogue */ | ||
if (token_1.DialogueBlock.matchedBy(line)) { | ||
const dialogueBlock = new token_1.DialogueBlock(line, this.lastLineWasDualDialogue); | ||
this.lastLineWasDualDialogue = dialogueBlock.dual; | ||
return dialogueBlock.addTo(previous); | ||
} | ||
/** section */ | ||
if (token_1.SectionToken.matchedBy(line)) { | ||
return new token_1.SectionToken(line).addTo(previous); | ||
} | ||
/** synopsis */ | ||
if (token_1.SynopsisToken.matchedBy(line)) { | ||
return new token_1.SynopsisToken(line).addTo(previous); | ||
} | ||
/** notes */ | ||
if (token_1.NoteToken.matchedBy(line)) { | ||
return new token_1.NoteToken(line).addTo(previous); | ||
} | ||
/** lyrics */ | ||
if (token_1.LyricsToken.matchedBy(line)) { | ||
return new token_1.LyricsToken(line).addTo(previous); | ||
} | ||
/** page breaks */ | ||
if (token_1.PageBreakToken.matchedBy(line)) { | ||
return new token_1.PageBreakToken().addTo(previous); | ||
} | ||
/** action */ | ||
return new token_1.ActionToken(line).addTo(previous); | ||
}, []); | ||
return scriptTokens.reverse(); | ||
} | ||
} | ||
exports.Lexer = Lexer; | ||
class InlineLexer { | ||
constructor() { | ||
this.inline = { | ||
note: '<!-- $1 -->', | ||
line_break: '<br />', | ||
bold_italic_underline: '<span class="bold italic underline">$1</span>', | ||
bold_underline: '<span class="bold underline">$1</span>', | ||
italic_underline: '<span class="italic underline">$1</span>', | ||
bold_italic: '<span class="bold italic">$1</span>', | ||
bold: '<span class="bold">$1</span>', | ||
italic: '<span class="italic">$1</span>', | ||
underline: '<span class="underline">$1</span>', | ||
escape: '$1' | ||
}; | ||
} | ||
reconstruct(line, escapeSpaces = false) { | ||
static reconstruct(line, escapeSpaces = false) { | ||
const styles = [ | ||
@@ -52,2 +148,14 @@ 'bold_italic_underline', | ||
exports.InlineLexer = InlineLexer; | ||
InlineLexer.inline = { | ||
note: '<!-- $1 -->', | ||
line_break: '<br />', | ||
bold_italic_underline: '<span class="bold italic underline">$1</span>', | ||
bold_underline: '<span class="bold underline">$1</span>', | ||
italic_underline: '<span class="italic underline">$1</span>', | ||
bold_italic: '<span class="bold italic">$1</span>', | ||
bold: '<span class="bold">$1</span>', | ||
italic: '<span class="italic">$1</span>', | ||
underline: '<span class="underline">$1</span>', | ||
escape: '$1' | ||
}; | ||
//# sourceMappingURL=lexer.js.map |
@@ -1,2 +0,2 @@ | ||
export declare type FountainTypes = 'title_page' | 'scene_heading' | 'scene_number' | 'transition' | 'dialogue' | 'parenthetical' | 'action' | 'centered' | 'lyrics' | 'synopsis' | 'section' | 'note' | 'note_inline' | 'boneyard' | 'page_break' | 'line_break' | 'bold_italic_underline' | 'bold_underline' | 'italic_underline' | 'bold_italic' | 'bold' | 'italic' | 'underline' | 'escape' | 'blank_line' | 'end_of_lines'; | ||
export declare type FountainTypes = 'title_page' | 'scene_heading' | 'scene_number' | 'transition' | 'dialogue' | 'parenthetical' | 'action' | 'centered' | 'lyrics' | 'synopsis' | 'section' | 'note' | 'note_inline' | 'boneyard' | 'page_break' | 'line_break' | 'bold_italic_underline' | 'bold_underline' | 'italic_underline' | 'bold_italic' | 'bold' | 'italic' | 'underline' | 'escape' | 'blank_lines'; | ||
export declare const rules: Record<FountainTypes, RegExp>; |
@@ -5,13 +5,13 @@ "use strict"; | ||
exports.rules = { | ||
title_page: /^\s*((?:title|credit|authors?|source|notes|draft date|date|contact|copyright)\:)/gim, | ||
scene_heading: /^\s*((?:\*{0,3}_?)?(?:(?:int|i)\.?\/(?:ext|e)|int|ext|est)[. ].+)|^\s*\.(?!\.+)(\S.*)/i, | ||
scene_number: /( *#(.+)# *)/, | ||
transition: /^\s*((?:FADE (?:TO BLACK|OUT)|CUT TO BLACK)\.|.+ TO\:)\s*$|^\s*> *(.+)$/, | ||
dialogue: /(?!^\s*\\@|^\s*!)(?!^\s*[0-9 _*]+(?:\(.*\))?[*_]*(?:\^?)?\s*\n)(^\s*(?:@[^^()\n]+|[^^()\na-z]+)(?: *\(.*\))?[ *_]*)(\^?)?\s*\n(?!\n+)([\s\S]+)/, | ||
title_page: /^\s*((?:title|credit|authors?|source|notes|draft ?date|date|contact|copyright|revisions?)\:)(?=[^\S\n]*(?:\n(?: {3,}|\t))?\S.*)/i, | ||
scene_heading: /^\s*((?:\*{0,3}_?)?(?:(?:int|i)\.?\/(?:ext|e)|int|ext|est)[. ].+$)|^\s*\.(?!\.+)(\S.*)$/i, | ||
scene_number: /\s*#([\w.-]+?)#\s*$/, | ||
transition: /^\s*((?:FADE (?:TO BLACK|OUT)|CUT TO BLACK)\.|.+ TO\:)[^\S\n]*$|^\s*> *(.+)$/, | ||
dialogue: /(?!^\s*\\@|^\s*[!.>~=#])(?!^\s*[0-9 _*]+(?:\(.*\))?[*_]*(?:\^?)?\s*\n)(^\s*(?:@[^^()\n]+|[^^()\na-z]+(?<!(?:BLACK|OUT)\.|TO:\s*))(?:\(.*\))?[ *_]*)(\^?)?\s*\n(?!\n+)([\s\S]+)/, | ||
parenthetical: /^ *(?:(?<u1>_{0,1})(?<s1>\*{0,3})(?=.+\k<s1>\k<u1>)|(?<s2>\*{0,3})(?<u2>_{0,1})(?=.+\k<u2>\k<s2>))(\(.+?\))(\k<s1>\k<u1>|\k<u2>\k<s2>) *$/, | ||
action: /^(.+)/g, | ||
centered: /^\s*>.+<[^\S\r\n]*(?:\s*>.+<[^\S\r\n]*)*/g, | ||
lyrics: /^\s*~(?! ).+(?:\n\s*~(?! ).+)*/, | ||
section: /^\s*(#+) *(.*)/, | ||
synopsis: /^\s*=(?!=+) *(.*)/, | ||
centered: /^\s*>.+<[^\S\r\n]*(?:\s*>.+<[^\S\r\n]*)*$/, | ||
lyrics: /^\s*~(?! ).+(?:\n\s*~(?! ).+)*$/, | ||
section: /^\s*(#+) *(.*)$/, | ||
synopsis: /^\s*=(?!=+) *(.*)$/, | ||
note: /^\[{2}(?!\[+)(.+)]{2}(?!\[+)$/, | ||
@@ -30,5 +30,4 @@ note_inline: /\[{2}(?!\[+)([\s\S]+?)]{2}(?!\[+)/g, | ||
escape: /\\([@#!*_$~`+=.><\\\/])/g, | ||
blank_line: /^(?: *(?:\n|$))+/, | ||
end_of_lines: /(?:\n|$){2,}/g | ||
blank_lines: /\n(?:(?! {2}\n)(?:[^\S\n]*| {3,}[^\S\n]*)(?:\n|$))+|^[^\S\n]*(?:\n|$)/g | ||
}; | ||
//# sourceMappingURL=rules.js.map |
@@ -12,2 +12,3 @@ export interface Token { | ||
tokens: Token[]; | ||
scanIndex: number; | ||
addTo(tokens: Token[]): Token[]; | ||
@@ -17,5 +18,6 @@ } | ||
readonly tokens: Token[]; | ||
constructor(line: string); | ||
readonly scanIndex: number; | ||
constructor(source: string); | ||
addTo(tokens: Token[]): Token[]; | ||
static matchedBy(line: string): boolean; | ||
static matchedBy(source: string): boolean; | ||
} | ||
@@ -26,3 +28,3 @@ export declare class TitlePageToken implements Token { | ||
readonly text: string; | ||
constructor(item: string); | ||
constructor(pair: string); | ||
addTo(tokens: Token[]): Token[]; | ||
@@ -55,3 +57,3 @@ } | ||
readonly dual: boolean; | ||
readonly too_short: boolean; | ||
readonly scanIndex: number; | ||
constructor(line: string, dual: boolean); | ||
@@ -58,0 +60,0 @@ addTo(tokens: Token[]): Token[]; |
@@ -6,9 +6,20 @@ "use strict"; | ||
class TitlePageBlock { | ||
constructor(line) { | ||
constructor(source) { | ||
this.tokens = []; | ||
const match = line | ||
.replace(/^([^\n]+:)/gm, '\n$1') | ||
.split(rules_1.rules.end_of_lines) | ||
.reverse(); | ||
this.tokens = match.reduce((previous, item) => new TitlePageToken(item).addTo(previous), []); | ||
const titlePageBlock = /^\s*(?:[\w ]+(?<!\\)\:[^\S\n]*(?:(?:\n(?: {3}|\t))?[^\S\n]*\S.*)+(?:\n|$))+/; | ||
const keyValuePair = /^\s*[\w ]+(?<!\\)\:[^\S\n]*(?:(?:\n(?![\w ]+\:)(?: {3}|\t))?[^\S\n]*\S.*)+(?:\n|$)/; | ||
let scanPosition = 0; | ||
const match = source.match(titlePageBlock); | ||
if (match) { | ||
let titlePageData = match[0]; | ||
this.scanIndex = titlePageData.length; | ||
while (scanPosition < this.scanIndex) { | ||
const pair = titlePageData.match(keyValuePair); | ||
if (pair) { | ||
this.tokens = new TitlePageToken(pair[0]).addTo(this.tokens); | ||
titlePageData = titlePageData.substring(pair[0].length); | ||
scanPosition += pair[0].length; | ||
} | ||
} | ||
} | ||
} | ||
@@ -18,4 +29,4 @@ addTo(tokens) { | ||
} | ||
static matchedBy(line) { | ||
return rules_1.rules.title_page.test(line); | ||
static matchedBy(source) { | ||
return rules_1.rules.title_page.test(source); | ||
} | ||
@@ -25,7 +36,7 @@ } | ||
class TitlePageToken { | ||
constructor(item) { | ||
constructor(pair) { | ||
this.is_title = true; | ||
const [key, value] = item.split(/\:\n*/, 2); | ||
const [key, delimeter, value] = pair.split(/(\:[^\S\n]*\n?)/, 3); | ||
this.type = key.trim().toLowerCase().replace(/ /g, '_'); | ||
this.text = value.replace(/^\s*/gm, ''); | ||
this.text = value.replace(/^\s*|\s*$/gm, ''); | ||
} | ||
@@ -42,7 +53,7 @@ addTo(tokens) { | ||
if (match) { | ||
this.text = match[1] || match[2]; | ||
this.text = (match[1] || match[2]).trim(); | ||
} | ||
const meta = this.text.match(rules_1.rules.scene_number); | ||
if (meta) { | ||
this.scene_number = meta[2]; | ||
this.scene_number = meta[1]; | ||
this.text = this.text.replace(rules_1.rules.scene_number, ''); | ||
@@ -80,3 +91,3 @@ } | ||
if (match) { | ||
this.text = match[1] || match[2]; | ||
this.text = (match[1] || match[2]).trim(); | ||
} | ||
@@ -97,2 +108,3 @@ } | ||
if (match) { | ||
this.scanIndex = match.length; | ||
let name = match[1].trim(); | ||
@@ -302,3 +314,3 @@ // iterating from the bottom up, so push dialogue blocks in reverse order | ||
static matchedBy(line) { | ||
return rules_1.rules.blank_line.test(line); | ||
return rules_1.rules.blank_lines.test(line); | ||
} | ||
@@ -310,3 +322,4 @@ } | ||
this.type = 'action'; | ||
this.text = line.replace(/^(\s*)!(?! )/gm, '$1') | ||
this.text = line | ||
.replace(/^(\s*)!(?! )/gm, '$1') | ||
.replace(/^( *)(\t+)/gm, (_, leading, tabs) => { | ||
@@ -313,0 +326,0 @@ return leading + ' '.repeat(tabs.length); |
{ | ||
"name": "fountain-js", | ||
"version": "1.2.2", | ||
"version": "1.2.3", | ||
"description": "A simple parser for Fountain, a markup language for formatting screenplays.", | ||
@@ -5,0 +5,0 @@ "main": "dist/index.js", |
@@ -58,8 +58,8 @@ # Fountain-js | ||
interface Script { | ||
title: string, | ||
title: string; | ||
html: { | ||
title_page: string, | ||
script: string | ||
}, | ||
tokens: Token[] | ||
}; | ||
tokens: Token[]; | ||
} | ||
@@ -150,8 +150,10 @@ ``` | ||
interface Token { | ||
type: string, | ||
is_title?: boolean, | ||
text?: string, | ||
scene_number?: string, | ||
dual?: string, | ||
depth?: number | ||
type: string; | ||
is_title?: boolean; | ||
text?: string; | ||
scene_number?: string; | ||
dual?: string; | ||
depth?: number; | ||
addTo(tokens: Token[]): Token[]; | ||
} | ||
@@ -158,0 +160,0 @@ ``` |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
108953
1610
181
40