fountain-js
Advanced tools
Comparing version 1.1.3 to 1.1.4
@@ -9,2 +9,21 @@ # Change Log | ||
- Improve rendering per spec like stripping sections and synopses since these should not be rendered. | ||
- Return more explicit null values and enable strict null checking. | ||
- Add line numbers to tokens. | ||
- Allow any title field attributes. | ||
- Better title page parsing in general. | ||
## [1.1.4] - 2023-10-01 | ||
### Added | ||
- Tokens ``@ # ! $ \ / ~ ` + = . > <`` are all escapable in addition to `*` and `_`. Additionally, all escapes are also respected when used to break the tokens' intended function. | ||
- Symbols `< > & "` now escape to their HTML-safe variants. | ||
- Types have been added to the regex patterns and the regex objects are now typed as a `Record<T, U>` for the lexers. These types have been included for import and manipulation as desired. | ||
### Changed | ||
- Italic, bold, underline and all combinations now behave more in line, but not perfectly, with [Commonmark Specifications](https://spec.commonmark.org/0.30/#emphasis-and-strong-emphasis) for emphasis. This includes preserving HTML nesting of other forms of emphasis as best as possible. | ||
- More work is needed to have it behave exactly the same but for the current limitations, it is much closer and behaves more like one would expect particularly when it comes to non-flanking delimiters. | ||
## [1.1.3] - 2023-09-24 | ||
@@ -11,0 +30,0 @@ |
import { Scanner } from './scanner'; | ||
import { InlineLexer } from './lexer'; | ||
import { unEscapeHTML } from './utilities'; | ||
export class Fountain { | ||
@@ -21,5 +22,5 @@ constructor() { | ||
// lexes any inlines on the title then removes any HTML / line breaks | ||
title = this.inlineLex.reconstruct(titleToken.text) | ||
title = unEscapeHTML(this.inlineLex.reconstruct(titleToken.text) | ||
.replace('<br />', ' ') | ||
.replace(/<(?:.|\n)*?>/g, ''); | ||
.replace(/<(?:.|\n)*?>/g, '')); | ||
} | ||
@@ -26,0 +27,0 @@ return { |
export * from './fountain'; | ||
export { Token } from './token'; | ||
export { regex } from './regex'; | ||
export { Lexer, InlineLexer } from './lexer'; | ||
export { FountainTypes, rules } from './rules'; | ||
export { Lexer, InlineTypes, InlineLexer } from './lexer'; |
export * from './fountain'; | ||
export { regex } from './regex'; | ||
export { rules } from './rules'; | ||
export { Lexer, InlineLexer } from './lexer'; | ||
//# sourceMappingURL=index.js.map |
@@ -0,1 +1,2 @@ | ||
export declare type InlineTypes = 'note' | 'line_break' | 'bold_italic_underline' | 'bold_underline' | 'italic_underline' | 'bold_italic' | 'bold' | 'italic' | 'underline' | 'escape'; | ||
export declare class Lexer { | ||
@@ -5,4 +6,4 @@ reconstruct(script: string): string; | ||
export declare class InlineLexer extends Lexer { | ||
private inline; | ||
inline: Record<InlineTypes, string>; | ||
reconstruct(line: string): string; | ||
} |
@@ -1,8 +0,9 @@ | ||
import { regex } from './regex'; | ||
import { rules } from './rules'; | ||
import { escapeHTML } from './utilities'; | ||
export class Lexer { | ||
reconstruct(script) { | ||
return script.replace(regex.boneyard, '\n$1\n') | ||
.replace(regex.standardizer, '\n') | ||
.replace(regex.cleaner, '') | ||
.replace(regex.whitespacer, ''); | ||
return script.replace(rules.boneyard, '\n$1\n') | ||
.replace(rules.standardizer, '\n') | ||
.replace(rules.cleaner, '') | ||
.replace(rules.whitespacer, ''); | ||
} | ||
@@ -16,9 +17,10 @@ } | ||
line_break: '<br />', | ||
bold_italic_underline: '<span class="bold italic underline">$2</span>', | ||
bold_underline: '<span class="bold underline">$2</span>', | ||
italic_underline: '<span class="italic underline">$2</span>', | ||
bold_italic: '<span class="bold italic">$2</span>', | ||
bold: '<span class="bold">$2</span>', | ||
italic: '<span class="italic">$2</span>', | ||
underline: '<span class="underline">$2</span>' | ||
bold_italic_underline: '<span class="bold italic underline">$1</span>', | ||
bold_underline: '<span class="bold underline">$1</span>', | ||
italic_underline: '<span class="italic underline">$1</span>', | ||
bold_italic: '<span class="bold italic">$1</span>', | ||
bold: '<span class="bold">$1</span>', | ||
italic: '<span class="italic">$1</span>', | ||
underline: '<span class="underline">$1</span>', | ||
escape: '$1' | ||
}; | ||
@@ -31,8 +33,8 @@ } | ||
const styles = ['bold_italic_underline', 'bold_underline', 'italic_underline', 'bold_italic', 'bold', 'italic', 'underline']; | ||
line = line.replace(regex.note_inline, this.inline.note) | ||
.replace(/\\\*/g, '[star]') | ||
.replace(/\\_/g, '[underline]') | ||
.replace(/\n/g, this.inline.line_break); | ||
line = escapeHTML(line | ||
.replace(rules.note_inline, this.inline.note) | ||
.replace(rules.escape, '[{{{$&}}}]') // perserve escaped characters | ||
); | ||
for (let style of styles) { | ||
match = regex[style]; | ||
match = rules[style]; | ||
if (match.test(line)) { | ||
@@ -42,5 +44,8 @@ line = line.replace(match, this.inline[style]); | ||
} | ||
return line.replace(/\[star]/g, '*').replace(/\[underline]/g, '_').trim(); | ||
return line | ||
.replace(/\n/g, this.inline.line_break) | ||
.replace(/\[{{{\\(&.+?;|.)}}}]/g, this.inline.escape) // restore escaped chars to intended sequence | ||
.trim(); | ||
} | ||
} | ||
//# sourceMappingURL=lexer.js.map |
@@ -1,2 +0,2 @@ | ||
import { regex } from './regex'; | ||
import { rules } from './rules'; | ||
import { ActionToken, BoneyardToken, CenteredToken, DialogueBlock, LineBreakToken, LyricsToken, NoteToken, PageBreakToken, SceneHeadingToken, SectionToken, SynopsisToken, TitlePageBlock, TransitionToken } from './token'; | ||
@@ -7,3 +7,3 @@ import { Lexer } from './lexer'; | ||
// reverse the array so that dual dialog can be constructed bottom up | ||
const source = new Lexer().reconstruct(script).split(regex.splitter).reverse(); | ||
const source = new Lexer().reconstruct(script).split(rules.splitter).reverse(); | ||
const tokens = source.reduce((previous, line) => { | ||
@@ -10,0 +10,0 @@ /** title page */ |
@@ -1,6 +0,6 @@ | ||
import { regex } from './regex'; | ||
import { rules } from './rules'; | ||
export class TitlePageBlock { | ||
constructor(line) { | ||
this.tokens = []; | ||
const match = line.replace(regex.title_page, '\n$1').split(regex.splitter).reverse(); | ||
const match = line.replace(rules.title_page, '\n$1').split(rules.splitter).reverse(); | ||
this.tokens = match.reduce((previous, item) => new TitlePageToken(item).addTo(previous), []); | ||
@@ -12,3 +12,3 @@ } | ||
static matchedBy(line) { | ||
return regex.title_page.test(line); | ||
return rules.title_page.test(line); | ||
} | ||
@@ -19,3 +19,3 @@ } | ||
this.is_title = true; | ||
const pair = item.replace(regex.cleaner, '').split(/\:\n*/); | ||
const pair = item.replace(rules.cleaner, '').split(/\:\n*/); | ||
this.type = pair[0].trim().toLowerCase().replace(' ', '_'); | ||
@@ -31,8 +31,8 @@ this.text = pair[1].trim(); | ||
this.type = 'scene_heading'; | ||
const match = line.match(regex.scene_heading); | ||
const match = line.match(rules.scene_heading); | ||
this.text = match[1] || match[2]; | ||
const meta = this.text.match(regex.scene_number); | ||
const meta = this.text.match(rules.scene_number); | ||
if (meta) { | ||
this.scene_number = meta[2]; | ||
this.text = this.text.replace(regex.scene_number, ''); | ||
this.text = this.text.replace(rules.scene_number, ''); | ||
} | ||
@@ -44,3 +44,3 @@ } | ||
static matchedBy(line) { | ||
return regex.scene_heading.test(line); | ||
return rules.scene_heading.test(line); | ||
} | ||
@@ -51,3 +51,3 @@ } | ||
this.type = 'centered'; | ||
const match = line.match(regex.centered); | ||
const match = line.match(rules.centered); | ||
this.text = match[0].replace(/ *[><] */g, ''); | ||
@@ -59,3 +59,3 @@ } | ||
static matchedBy(line) { | ||
return regex.centered.test(line); | ||
return rules.centered.test(line); | ||
} | ||
@@ -66,3 +66,3 @@ } | ||
this.type = 'transition'; | ||
const match = line.match(regex.transition); | ||
const match = line.match(rules.transition); | ||
this.text = match[1] || match[2]; | ||
@@ -74,3 +74,3 @@ } | ||
static matchedBy(line) { | ||
return regex.transition.test(line); | ||
return rules.transition.test(line); | ||
} | ||
@@ -81,3 +81,3 @@ } | ||
this.tokens = []; | ||
const match = line.match(regex.dialogue); | ||
const match = line.match(rules.dialogue); | ||
let name = match[1]; | ||
@@ -97,6 +97,6 @@ // iterating from the bottom up, so push dialogue blocks in reverse order | ||
} | ||
if (regex.parenthetical.test(text)) { | ||
if (rules.parenthetical.test(text)) { | ||
return [...p, new ParentheticalToken(text)]; | ||
} | ||
if (regex.lyrics.test(text)) { | ||
if (rules.lyrics.test(text)) { | ||
if (previousToken.type === 'lyrics') { | ||
@@ -132,3 +132,3 @@ p[lastIndex].text = | ||
static matchedBy(line) { | ||
return regex.dialogue.test(line); | ||
return rules.dialogue.test(line); | ||
} | ||
@@ -205,3 +205,3 @@ } | ||
static matchedBy(line) { | ||
return regex.lyrics.test(line); | ||
return rules.lyrics.test(line); | ||
} | ||
@@ -212,3 +212,3 @@ } | ||
this.type = 'section'; | ||
const match = line.match(regex.section); | ||
const match = line.match(rules.section); | ||
this.text = match[2]; | ||
@@ -221,3 +221,3 @@ this.depth = match[1].length; | ||
static matchedBy(line) { | ||
return regex.section.test(line); | ||
return rules.section.test(line); | ||
} | ||
@@ -228,3 +228,3 @@ } | ||
this.type = 'synopsis'; | ||
const match = line.match(regex.synopsis); | ||
const match = line.match(rules.synopsis); | ||
this.text = match[1]; | ||
@@ -236,3 +236,3 @@ } | ||
static matchedBy(line) { | ||
return regex.synopsis.test(line); | ||
return rules.synopsis.test(line); | ||
} | ||
@@ -243,3 +243,3 @@ } | ||
this.type = 'note'; | ||
const match = line.match(regex.note); | ||
const match = line.match(rules.note); | ||
this.text = match[1]; | ||
@@ -251,3 +251,3 @@ } | ||
static matchedBy(line) { | ||
return regex.note.test(line); | ||
return rules.note.test(line); | ||
} | ||
@@ -257,3 +257,3 @@ } | ||
constructor(line) { | ||
const match = line.match(regex.boneyard); | ||
const match = line.match(rules.boneyard); | ||
this.type = match[0][0] === '/' ? 'boneyard_begin' : 'boneyard_end'; | ||
@@ -265,3 +265,3 @@ } | ||
static matchedBy(line) { | ||
return regex.boneyard.test(line); | ||
return rules.boneyard.test(line); | ||
} | ||
@@ -277,3 +277,3 @@ } | ||
static matchedBy(line) { | ||
return regex.page_break.test(line); | ||
return rules.page_break.test(line); | ||
} | ||
@@ -289,3 +289,3 @@ } | ||
static matchedBy(line) { | ||
return regex.line_break.test(line); | ||
return rules.line_break.test(line); | ||
} | ||
@@ -302,5 +302,2 @@ } | ||
} | ||
function isTooShort(str) { | ||
return str.indexOf(' ') === str.length - 2; | ||
} | ||
//# sourceMappingURL=token.js.map |
@@ -6,2 +6,3 @@ "use strict"; | ||
const lexer_1 = require("./lexer"); | ||
const utilities_1 = require("./utilities"); | ||
class Fountain { | ||
@@ -25,5 +26,5 @@ constructor() { | ||
// lexes any inlines on the title then removes any HTML / line breaks | ||
title = this.inlineLex.reconstruct(titleToken.text) | ||
title = utilities_1.unEscapeHTML(this.inlineLex.reconstruct(titleToken.text) | ||
.replace('<br />', ' ') | ||
.replace(/<(?:.|\n)*?>/g, ''); | ||
.replace(/<(?:.|\n)*?>/g, '')); | ||
} | ||
@@ -30,0 +31,0 @@ return { |
export * from './fountain'; | ||
export { Token } from './token'; | ||
export { regex } from './regex'; | ||
export { Lexer, InlineLexer } from './lexer'; | ||
export { FountainTypes, rules } from './rules'; | ||
export { Lexer, InlineTypes, InlineLexer } from './lexer'; |
@@ -14,4 +14,4 @@ "use strict"; | ||
__exportStar(require("./fountain"), exports); | ||
var regex_1 = require("./regex"); | ||
Object.defineProperty(exports, "regex", { enumerable: true, get: function () { return regex_1.regex; } }); | ||
var rules_1 = require("./rules"); | ||
Object.defineProperty(exports, "rules", { enumerable: true, get: function () { return rules_1.rules; } }); | ||
var lexer_1 = require("./lexer"); | ||
@@ -18,0 +18,0 @@ Object.defineProperty(exports, "Lexer", { enumerable: true, get: function () { return lexer_1.Lexer; } }); |
@@ -0,1 +1,2 @@ | ||
export declare type InlineTypes = 'note' | 'line_break' | 'bold_italic_underline' | 'bold_underline' | 'italic_underline' | 'bold_italic' | 'bold' | 'italic' | 'underline' | 'escape'; | ||
export declare class Lexer { | ||
@@ -5,4 +6,4 @@ reconstruct(script: string): string; | ||
export declare class InlineLexer extends Lexer { | ||
private inline; | ||
inline: Record<InlineTypes, string>; | ||
reconstruct(line: string): string; | ||
} |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.InlineLexer = exports.Lexer = void 0; | ||
const regex_1 = require("./regex"); | ||
const rules_1 = require("./rules"); | ||
const utilities_1 = require("./utilities"); | ||
class Lexer { | ||
reconstruct(script) { | ||
return script.replace(regex_1.regex.boneyard, '\n$1\n') | ||
.replace(regex_1.regex.standardizer, '\n') | ||
.replace(regex_1.regex.cleaner, '') | ||
.replace(regex_1.regex.whitespacer, ''); | ||
return script.replace(rules_1.rules.boneyard, '\n$1\n') | ||
.replace(rules_1.rules.standardizer, '\n') | ||
.replace(rules_1.rules.cleaner, '') | ||
.replace(rules_1.rules.whitespacer, ''); | ||
} | ||
@@ -20,9 +21,10 @@ } | ||
line_break: '<br />', | ||
bold_italic_underline: '<span class="bold italic underline">$2</span>', | ||
bold_underline: '<span class="bold underline">$2</span>', | ||
italic_underline: '<span class="italic underline">$2</span>', | ||
bold_italic: '<span class="bold italic">$2</span>', | ||
bold: '<span class="bold">$2</span>', | ||
italic: '<span class="italic">$2</span>', | ||
underline: '<span class="underline">$2</span>' | ||
bold_italic_underline: '<span class="bold italic underline">$1</span>', | ||
bold_underline: '<span class="bold underline">$1</span>', | ||
italic_underline: '<span class="italic underline">$1</span>', | ||
bold_italic: '<span class="bold italic">$1</span>', | ||
bold: '<span class="bold">$1</span>', | ||
italic: '<span class="italic">$1</span>', | ||
underline: '<span class="underline">$1</span>', | ||
escape: '$1' | ||
}; | ||
@@ -35,8 +37,8 @@ } | ||
const styles = ['bold_italic_underline', 'bold_underline', 'italic_underline', 'bold_italic', 'bold', 'italic', 'underline']; | ||
line = line.replace(regex_1.regex.note_inline, this.inline.note) | ||
.replace(/\\\*/g, '[star]') | ||
.replace(/\\_/g, '[underline]') | ||
.replace(/\n/g, this.inline.line_break); | ||
line = utilities_1.escapeHTML(line | ||
.replace(rules_1.rules.note_inline, this.inline.note) | ||
.replace(rules_1.rules.escape, '[{{{$&}}}]') // perserve escaped characters | ||
); | ||
for (let style of styles) { | ||
match = regex_1.regex[style]; | ||
match = rules_1.rules[style]; | ||
if (match.test(line)) { | ||
@@ -46,3 +48,6 @@ line = line.replace(match, this.inline[style]); | ||
} | ||
return line.replace(/\[star]/g, '*').replace(/\[underline]/g, '_').trim(); | ||
return line | ||
.replace(/\n/g, this.inline.line_break) | ||
.replace(/\[{{{\\(&.+?;|.)}}}]/g, this.inline.escape) // restore escaped chars to intended sequence | ||
.trim(); | ||
} | ||
@@ -49,0 +54,0 @@ } |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.Scanner = void 0; | ||
const regex_1 = require("./regex"); | ||
const rules_1 = require("./rules"); | ||
const token_1 = require("./token"); | ||
@@ -10,3 +10,3 @@ const lexer_1 = require("./lexer"); | ||
// reverse the array so that dual dialog can be constructed bottom up | ||
const source = new lexer_1.Lexer().reconstruct(script).split(regex_1.regex.splitter).reverse(); | ||
const source = new lexer_1.Lexer().reconstruct(script).split(rules_1.rules.splitter).reverse(); | ||
const tokens = source.reduce((previous, line) => { | ||
@@ -13,0 +13,0 @@ /** title page */ |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.ActionToken = exports.LineBreakToken = exports.PageBreakToken = exports.BoneyardToken = exports.NoteToken = exports.SynopsisToken = exports.SectionToken = exports.LyricsToken = exports.DualDialogueEndToken = exports.DualDialogueBeginToken = exports.ParentheticalToken = exports.DialogueEndToken = exports.DialogueToken = exports.CharacterToken = exports.DialogueBeginToken = exports.DialogueBlock = exports.TransitionToken = exports.CenteredToken = exports.SceneHeadingToken = exports.TitlePageToken = exports.TitlePageBlock = void 0; | ||
const regex_1 = require("./regex"); | ||
const rules_1 = require("./rules"); | ||
class TitlePageBlock { | ||
constructor(line) { | ||
this.tokens = []; | ||
const match = line.replace(regex_1.regex.title_page, '\n$1').split(regex_1.regex.splitter).reverse(); | ||
const match = line.replace(rules_1.rules.title_page, '\n$1').split(rules_1.rules.splitter).reverse(); | ||
this.tokens = match.reduce((previous, item) => new TitlePageToken(item).addTo(previous), []); | ||
@@ -15,3 +15,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.title_page.test(line); | ||
return rules_1.rules.title_page.test(line); | ||
} | ||
@@ -23,3 +23,3 @@ } | ||
this.is_title = true; | ||
const pair = item.replace(regex_1.regex.cleaner, '').split(/\:\n*/); | ||
const pair = item.replace(rules_1.rules.cleaner, '').split(/\:\n*/); | ||
this.type = pair[0].trim().toLowerCase().replace(' ', '_'); | ||
@@ -36,8 +36,8 @@ this.text = pair[1].trim(); | ||
this.type = 'scene_heading'; | ||
const match = line.match(regex_1.regex.scene_heading); | ||
const match = line.match(rules_1.rules.scene_heading); | ||
this.text = match[1] || match[2]; | ||
const meta = this.text.match(regex_1.regex.scene_number); | ||
const meta = this.text.match(rules_1.rules.scene_number); | ||
if (meta) { | ||
this.scene_number = meta[2]; | ||
this.text = this.text.replace(regex_1.regex.scene_number, ''); | ||
this.text = this.text.replace(rules_1.rules.scene_number, ''); | ||
} | ||
@@ -49,3 +49,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.scene_heading.test(line); | ||
return rules_1.rules.scene_heading.test(line); | ||
} | ||
@@ -57,3 +57,3 @@ } | ||
this.type = 'centered'; | ||
const match = line.match(regex_1.regex.centered); | ||
const match = line.match(rules_1.rules.centered); | ||
this.text = match[0].replace(/ *[><] */g, ''); | ||
@@ -65,3 +65,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.centered.test(line); | ||
return rules_1.rules.centered.test(line); | ||
} | ||
@@ -73,3 +73,3 @@ } | ||
this.type = 'transition'; | ||
const match = line.match(regex_1.regex.transition); | ||
const match = line.match(rules_1.rules.transition); | ||
this.text = match[1] || match[2]; | ||
@@ -81,3 +81,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.transition.test(line); | ||
return rules_1.rules.transition.test(line); | ||
} | ||
@@ -89,3 +89,3 @@ } | ||
this.tokens = []; | ||
const match = line.match(regex_1.regex.dialogue); | ||
const match = line.match(rules_1.rules.dialogue); | ||
let name = match[1]; | ||
@@ -105,6 +105,6 @@ // iterating from the bottom up, so push dialogue blocks in reverse order | ||
} | ||
if (regex_1.regex.parenthetical.test(text)) { | ||
if (rules_1.rules.parenthetical.test(text)) { | ||
return [...p, new ParentheticalToken(text)]; | ||
} | ||
if (regex_1.regex.lyrics.test(text)) { | ||
if (rules_1.rules.lyrics.test(text)) { | ||
if (previousToken.type === 'lyrics') { | ||
@@ -140,3 +140,3 @@ p[lastIndex].text = | ||
static matchedBy(line) { | ||
return regex_1.regex.dialogue.test(line); | ||
return rules_1.rules.dialogue.test(line); | ||
} | ||
@@ -221,3 +221,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.lyrics.test(line); | ||
return rules_1.rules.lyrics.test(line); | ||
} | ||
@@ -229,3 +229,3 @@ } | ||
this.type = 'section'; | ||
const match = line.match(regex_1.regex.section); | ||
const match = line.match(rules_1.rules.section); | ||
this.text = match[2]; | ||
@@ -238,3 +238,3 @@ this.depth = match[1].length; | ||
static matchedBy(line) { | ||
return regex_1.regex.section.test(line); | ||
return rules_1.rules.section.test(line); | ||
} | ||
@@ -246,3 +246,3 @@ } | ||
this.type = 'synopsis'; | ||
const match = line.match(regex_1.regex.synopsis); | ||
const match = line.match(rules_1.rules.synopsis); | ||
this.text = match[1]; | ||
@@ -254,3 +254,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.synopsis.test(line); | ||
return rules_1.rules.synopsis.test(line); | ||
} | ||
@@ -262,3 +262,3 @@ } | ||
this.type = 'note'; | ||
const match = line.match(regex_1.regex.note); | ||
const match = line.match(rules_1.rules.note); | ||
this.text = match[1]; | ||
@@ -270,3 +270,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.note.test(line); | ||
return rules_1.rules.note.test(line); | ||
} | ||
@@ -277,3 +277,3 @@ } | ||
constructor(line) { | ||
const match = line.match(regex_1.regex.boneyard); | ||
const match = line.match(rules_1.rules.boneyard); | ||
this.type = match[0][0] === '/' ? 'boneyard_begin' : 'boneyard_end'; | ||
@@ -285,3 +285,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.boneyard.test(line); | ||
return rules_1.rules.boneyard.test(line); | ||
} | ||
@@ -298,3 +298,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.page_break.test(line); | ||
return rules_1.rules.page_break.test(line); | ||
} | ||
@@ -311,3 +311,3 @@ } | ||
static matchedBy(line) { | ||
return regex_1.regex.line_break.test(line); | ||
return rules_1.rules.line_break.test(line); | ||
} | ||
@@ -326,5 +326,2 @@ } | ||
exports.ActionToken = ActionToken; | ||
function isTooShort(str) { | ||
return str.indexOf(' ') === str.length - 2; | ||
} | ||
//# sourceMappingURL=token.js.map |
{ | ||
"name": "fountain-js", | ||
"version": "1.1.3", | ||
"version": "1.1.4", | ||
"description": "A simple parser for Fountain, a markup language for formatting screenplays.", | ||
@@ -5,0 +5,0 @@ "main": "dist/index.js", |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
101363
46
1451