Comparing version
@@ -436,9 +436,2 @@ { | ||
}, | ||
"stroke-opacity": { | ||
"comment": "added SVG property", | ||
"references": [ | ||
"https://www.w3.org/TR/SVG/painting.html#StrokeProperties" | ||
], | ||
"syntax": "<'opacity'>" | ||
}, | ||
"stroke-width": { | ||
@@ -714,5 +707,2 @@ "comment": "added SVG property", | ||
}, | ||
"system-color": { | ||
"syntax": "AccentColor | AccentColorText | ActiveText | ButtonBorder | ButtonFace | ButtonText | Canvas | CanvasText | Field | FieldText | GrayText | Highlight | HighlightText | LinkText | Mark | MarkText | SelectedItem | SelectedItemText | VisitedText" | ||
}, | ||
"device-cmyk()": { | ||
@@ -719,0 +709,0 @@ "syntax": "<legacy-device-cmyk-syntax> | <modern-device-cmyk-syntax>" |
@@ -1,1 +0,1 @@ | ||
export const version = "3.0.1"; | ||
export const version = "3.1.0"; |
@@ -79,2 +79,6 @@ function noop(value) { | ||
case 'Boolean': | ||
result = '<boolean-expr[' + internalGenerate(node.term, decorate, forceBraces, compact) + ']>'; | ||
break; | ||
case 'Type': | ||
@@ -81,0 +85,0 @@ result = '<' + node.name + (node.opts ? decorate(generateTypeOpts(node.opts), node.opts) : '') + '>'; |
@@ -1,2 +0,2 @@ | ||
import { Tokenizer } from './tokenizer.js'; | ||
import { Scanner } from './scanner.js'; | ||
@@ -28,5 +28,2 @@ const TAB = 9; | ||
const INFINITY = 8734; // ∞ | ||
const NAME_CHAR = new Uint8Array(128).map((_, idx) => | ||
/[a-zA-Z0-9\-]/.test(String.fromCharCode(idx)) ? 1 : 0 | ||
); | ||
const COMBINATOR_PRECEDENCE = { | ||
@@ -39,70 +36,19 @@ ' ': 1, | ||
function scanSpaces(tokenizer) { | ||
return tokenizer.substringToPos( | ||
tokenizer.findWsEnd(tokenizer.pos) | ||
); | ||
} | ||
function scanWord(tokenizer) { | ||
let end = tokenizer.pos; | ||
for (; end < tokenizer.str.length; end++) { | ||
const code = tokenizer.str.charCodeAt(end); | ||
if (code >= 128 || NAME_CHAR[code] === 0) { | ||
break; | ||
} | ||
} | ||
if (tokenizer.pos === end) { | ||
tokenizer.error('Expect a keyword'); | ||
} | ||
return tokenizer.substringToPos(end); | ||
} | ||
function scanNumber(tokenizer) { | ||
let end = tokenizer.pos; | ||
for (; end < tokenizer.str.length; end++) { | ||
const code = tokenizer.str.charCodeAt(end); | ||
if (code < 48 || code > 57) { | ||
break; | ||
} | ||
} | ||
if (tokenizer.pos === end) { | ||
tokenizer.error('Expect a number'); | ||
} | ||
return tokenizer.substringToPos(end); | ||
} | ||
function scanString(tokenizer) { | ||
const end = tokenizer.str.indexOf('\'', tokenizer.pos + 1); | ||
if (end === -1) { | ||
tokenizer.pos = tokenizer.str.length; | ||
tokenizer.error('Expect an apostrophe'); | ||
} | ||
return tokenizer.substringToPos(end + 1); | ||
} | ||
function readMultiplierRange(tokenizer) { | ||
function readMultiplierRange(scanner) { | ||
let min = null; | ||
let max = null; | ||
tokenizer.eat(LEFTCURLYBRACKET); | ||
tokenizer.skipWs(); | ||
scanner.eat(LEFTCURLYBRACKET); | ||
scanner.skipWs(); | ||
min = scanNumber(tokenizer); | ||
tokenizer.skipWs(); | ||
min = scanner.scanNumber(scanner); | ||
scanner.skipWs(); | ||
if (tokenizer.charCode() === COMMA) { | ||
tokenizer.pos++; | ||
tokenizer.skipWs(); | ||
if (scanner.charCode() === COMMA) { | ||
scanner.pos++; | ||
scanner.skipWs(); | ||
if (tokenizer.charCode() !== RIGHTCURLYBRACKET) { | ||
max = scanNumber(tokenizer); | ||
tokenizer.skipWs(); | ||
if (scanner.charCode() !== RIGHTCURLYBRACKET) { | ||
max = scanner.scanNumber(scanner); | ||
scanner.skipWs(); | ||
} | ||
@@ -113,3 +59,3 @@ } else { | ||
tokenizer.eat(RIGHTCURLYBRACKET); | ||
scanner.eat(RIGHTCURLYBRACKET); | ||
@@ -122,9 +68,9 @@ return { | ||
function readMultiplier(tokenizer) { | ||
function readMultiplier(scanner) { | ||
let range = null; | ||
let comma = false; | ||
switch (tokenizer.charCode()) { | ||
switch (scanner.charCode()) { | ||
case ASTERISK: | ||
tokenizer.pos++; | ||
scanner.pos++; | ||
@@ -139,3 +85,3 @@ range = { | ||
case PLUSSIGN: | ||
tokenizer.pos++; | ||
scanner.pos++; | ||
@@ -150,3 +96,3 @@ range = { | ||
case QUESTIONMARK: | ||
tokenizer.pos++; | ||
scanner.pos++; | ||
@@ -161,9 +107,9 @@ range = { | ||
case NUMBERSIGN: | ||
tokenizer.pos++; | ||
scanner.pos++; | ||
comma = true; | ||
if (tokenizer.charCode() === LEFTCURLYBRACKET) { | ||
range = readMultiplierRange(tokenizer); | ||
} else if (tokenizer.charCode() === QUESTIONMARK) { | ||
if (scanner.charCode() === LEFTCURLYBRACKET) { | ||
range = readMultiplierRange(scanner); | ||
} else if (scanner.charCode() === QUESTIONMARK) { | ||
// https://www.w3.org/TR/css-values-4/#component-multipliers | ||
@@ -173,3 +119,3 @@ // > the # and ? multipliers may be stacked as #? | ||
// { min: 0, max: 0, comma: true } | ||
tokenizer.pos++; | ||
scanner.pos++; | ||
range = { | ||
@@ -189,3 +135,3 @@ min: 0, | ||
case LEFTCURLYBRACKET: | ||
range = readMultiplierRange(tokenizer); | ||
range = readMultiplierRange(scanner); | ||
break; | ||
@@ -206,4 +152,4 @@ | ||
function maybeMultiplied(tokenizer, node) { | ||
const multiplier = readMultiplier(tokenizer); | ||
function maybeMultiplied(scanner, node) { | ||
const multiplier = readMultiplier(scanner); | ||
@@ -222,5 +168,5 @@ if (multiplier !== null) { | ||
// } | ||
if (tokenizer.charCode() === NUMBERSIGN && | ||
tokenizer.charCodeAt(tokenizer.pos - 1) === PLUSSIGN) { | ||
return maybeMultiplied(tokenizer, multiplier); | ||
if (scanner.charCode() === NUMBERSIGN && | ||
scanner.charCodeAt(scanner.pos - 1) === PLUSSIGN) { | ||
return maybeMultiplied(scanner, multiplier); | ||
} | ||
@@ -234,4 +180,4 @@ | ||
function maybeToken(tokenizer) { | ||
const ch = tokenizer.peek(); | ||
function maybeToken(scanner) { | ||
const ch = scanner.peek(); | ||
@@ -242,20 +188,20 @@ if (ch === '') { | ||
return { | ||
return maybeMultiplied(scanner, { | ||
type: 'Token', | ||
value: ch | ||
}; | ||
}); | ||
} | ||
function readProperty(tokenizer) { | ||
function readProperty(scanner) { | ||
let name; | ||
tokenizer.eat(LESSTHANSIGN); | ||
tokenizer.eat(APOSTROPHE); | ||
scanner.eat(LESSTHANSIGN); | ||
scanner.eat(APOSTROPHE); | ||
name = scanWord(tokenizer); | ||
name = scanner.scanWord(); | ||
tokenizer.eat(APOSTROPHE); | ||
tokenizer.eat(GREATERTHANSIGN); | ||
scanner.eat(APOSTROPHE); | ||
scanner.eat(GREATERTHANSIGN); | ||
return maybeMultiplied(tokenizer, { | ||
return maybeMultiplied(scanner, { | ||
type: 'Property', | ||
@@ -273,3 +219,3 @@ name | ||
// For example, <integer [0, 10]> indicates an integer between 0 and 10, inclusive. | ||
function readTypeRange(tokenizer) { | ||
function readTypeRange(scanner) { | ||
// use null for Infinity to make AST format JSON serializable/deserializable | ||
@@ -280,41 +226,41 @@ let min = null; // -Infinity | ||
tokenizer.eat(LEFTSQUAREBRACKET); | ||
scanner.eat(LEFTSQUAREBRACKET); | ||
if (tokenizer.charCode() === HYPERMINUS) { | ||
tokenizer.peek(); | ||
if (scanner.charCode() === HYPERMINUS) { | ||
scanner.peek(); | ||
sign = -1; | ||
} | ||
if (sign == -1 && tokenizer.charCode() === INFINITY) { | ||
tokenizer.peek(); | ||
if (sign == -1 && scanner.charCode() === INFINITY) { | ||
scanner.peek(); | ||
} else { | ||
min = sign * Number(scanNumber(tokenizer)); | ||
min = sign * Number(scanner.scanNumber(scanner)); | ||
if (NAME_CHAR[tokenizer.charCode()] !== 0) { | ||
min += scanWord(tokenizer); | ||
if (scanner.isNameCharCode()) { | ||
min += scanner.scanWord(); | ||
} | ||
} | ||
scanSpaces(tokenizer); | ||
tokenizer.eat(COMMA); | ||
scanSpaces(tokenizer); | ||
scanner.skipWs(); | ||
scanner.eat(COMMA); | ||
scanner.skipWs(); | ||
if (tokenizer.charCode() === INFINITY) { | ||
tokenizer.peek(); | ||
if (scanner.charCode() === INFINITY) { | ||
scanner.peek(); | ||
} else { | ||
sign = 1; | ||
if (tokenizer.charCode() === HYPERMINUS) { | ||
tokenizer.peek(); | ||
if (scanner.charCode() === HYPERMINUS) { | ||
scanner.peek(); | ||
sign = -1; | ||
} | ||
max = sign * Number(scanNumber(tokenizer)); | ||
max = sign * Number(scanner.scanNumber(scanner)); | ||
if (NAME_CHAR[tokenizer.charCode()] !== 0) { | ||
max += scanWord(tokenizer); | ||
if (scanner.isNameCharCode()) { | ||
max += scanner.scanWord(); | ||
} | ||
} | ||
tokenizer.eat(RIGHTSQUAREBRACKET); | ||
scanner.eat(RIGHTSQUAREBRACKET); | ||
@@ -328,23 +274,40 @@ return { | ||
function readType(tokenizer) { | ||
function readType(scanner) { | ||
let name; | ||
let opts = null; | ||
tokenizer.eat(LESSTHANSIGN); | ||
name = scanWord(tokenizer); | ||
scanner.eat(LESSTHANSIGN); | ||
name = scanner.scanWord(); | ||
if (tokenizer.charCode() === LEFTPARENTHESIS && | ||
tokenizer.nextCharCode() === RIGHTPARENTHESIS) { | ||
tokenizer.pos += 2; | ||
// https://drafts.csswg.org/css-values-5/#boolean | ||
if (name === 'boolean-expr') { | ||
scanner.eat(LEFTSQUAREBRACKET); | ||
const implicitGroup = readImplicitGroup(scanner, RIGHTSQUAREBRACKET); | ||
scanner.eat(RIGHTSQUAREBRACKET); | ||
scanner.eat(GREATERTHANSIGN); | ||
return maybeMultiplied(scanner, { | ||
type: 'Boolean', | ||
term: implicitGroup.terms.length === 1 | ||
? implicitGroup.terms[0] | ||
: implicitGroup | ||
}); | ||
} | ||
if (scanner.charCode() === LEFTPARENTHESIS && | ||
scanner.nextCharCode() === RIGHTPARENTHESIS) { | ||
scanner.pos += 2; | ||
name += '()'; | ||
} | ||
if (tokenizer.charCodeAt(tokenizer.findWsEnd(tokenizer.pos)) === LEFTSQUAREBRACKET) { | ||
scanSpaces(tokenizer); | ||
opts = readTypeRange(tokenizer); | ||
if (scanner.charCodeAt(scanner.findWsEnd(scanner.pos)) === LEFTSQUAREBRACKET) { | ||
scanner.skipWs(); | ||
opts = readTypeRange(scanner); | ||
} | ||
tokenizer.eat(GREATERTHANSIGN); | ||
scanner.eat(GREATERTHANSIGN); | ||
return maybeMultiplied(tokenizer, { | ||
return maybeMultiplied(scanner, { | ||
type: 'Type', | ||
@@ -356,7 +319,7 @@ name, | ||
function readKeywordOrFunction(tokenizer) { | ||
const name = scanWord(tokenizer); | ||
function readKeywordOrFunction(scanner) { | ||
const name = scanner.scanWord(); | ||
if (tokenizer.charCode() === LEFTPARENTHESIS) { | ||
tokenizer.pos++; | ||
if (scanner.charCode() === LEFTPARENTHESIS) { | ||
scanner.pos++; | ||
@@ -369,3 +332,3 @@ return { | ||
return maybeMultiplied(tokenizer, { | ||
return maybeMultiplied(scanner, { | ||
type: 'Keyword', | ||
@@ -434,10 +397,10 @@ name | ||
function readImplicitGroup(tokenizer) { | ||
function readImplicitGroup(scanner, stopCharCode) { | ||
const combinators = Object.create(null); | ||
const terms = []; | ||
const combinators = {}; | ||
let token; | ||
let prevToken = null; | ||
let prevTokenPos = tokenizer.pos; | ||
let prevTokenPos = scanner.pos; | ||
while (token = peek(tokenizer)) { | ||
while (scanner.charCode() !== stopCharCode && (token = peek(scanner, stopCharCode))) { | ||
if (token.type !== 'Spaces') { | ||
@@ -447,4 +410,4 @@ if (token.type === 'Combinator') { | ||
if (prevToken === null || prevToken.type === 'Combinator') { | ||
tokenizer.pos = prevTokenPos; | ||
tokenizer.error('Unexpected combinator'); | ||
scanner.pos = prevTokenPos; | ||
scanner.error('Unexpected combinator'); | ||
} | ||
@@ -463,3 +426,3 @@ | ||
prevToken = token; | ||
prevTokenPos = tokenizer.pos; | ||
prevTokenPos = scanner.pos; | ||
} | ||
@@ -470,4 +433,4 @@ } | ||
if (prevToken !== null && prevToken.type === 'Combinator') { | ||
tokenizer.pos -= prevTokenPos; | ||
tokenizer.error('Unexpected combinator'); | ||
scanner.pos -= prevTokenPos; | ||
scanner.error('Unexpected combinator'); | ||
} | ||
@@ -484,13 +447,13 @@ | ||
function readGroup(tokenizer) { | ||
function readGroup(scanner, stopCharCode) { | ||
let result; | ||
tokenizer.eat(LEFTSQUAREBRACKET); | ||
result = readImplicitGroup(tokenizer); | ||
tokenizer.eat(RIGHTSQUAREBRACKET); | ||
scanner.eat(LEFTSQUAREBRACKET); | ||
result = readImplicitGroup(scanner, stopCharCode); | ||
scanner.eat(RIGHTSQUAREBRACKET); | ||
result.explicit = true; | ||
if (tokenizer.charCode() === EXCLAMATIONMARK) { | ||
tokenizer.pos++; | ||
if (scanner.charCode() === EXCLAMATIONMARK) { | ||
scanner.pos++; | ||
result.disallowEmpty = true; | ||
@@ -502,9 +465,5 @@ } | ||
function peek(tokenizer) { | ||
let code = tokenizer.charCode(); | ||
function peek(scanner, stopCharCode) { | ||
let code = scanner.charCode(); | ||
if (code < 128 && NAME_CHAR[code] === 1) { | ||
return readKeywordOrFunction(tokenizer); | ||
} | ||
switch (code) { | ||
@@ -516,8 +475,8 @@ case RIGHTSQUAREBRACKET: | ||
case LEFTSQUAREBRACKET: | ||
return maybeMultiplied(tokenizer, readGroup(tokenizer)); | ||
return maybeMultiplied(scanner, readGroup(scanner, stopCharCode)); | ||
case LESSTHANSIGN: | ||
return tokenizer.nextCharCode() === APOSTROPHE | ||
? readProperty(tokenizer) | ||
: readType(tokenizer); | ||
return scanner.nextCharCode() === APOSTROPHE | ||
? readProperty(scanner) | ||
: readType(scanner); | ||
@@ -527,4 +486,4 @@ case VERTICALLINE: | ||
type: 'Combinator', | ||
value: tokenizer.substringToPos( | ||
tokenizer.pos + (tokenizer.nextCharCode() === VERTICALLINE ? 2 : 1) | ||
value: scanner.substringToPos( | ||
scanner.pos + (scanner.nextCharCode() === VERTICALLINE ? 2 : 1) | ||
) | ||
@@ -534,4 +493,4 @@ }; | ||
case AMPERSAND: | ||
tokenizer.pos++; | ||
tokenizer.eat(AMPERSAND); | ||
scanner.pos++; | ||
scanner.eat(AMPERSAND); | ||
@@ -544,3 +503,3 @@ return { | ||
case COMMA: | ||
tokenizer.pos++; | ||
scanner.pos++; | ||
return { | ||
@@ -551,5 +510,5 @@ type: 'Comma' | ||
case APOSTROPHE: | ||
return maybeMultiplied(tokenizer, { | ||
return maybeMultiplied(scanner, { | ||
type: 'String', | ||
value: scanString(tokenizer) | ||
value: scanner.scanString() | ||
}); | ||
@@ -564,17 +523,17 @@ | ||
type: 'Spaces', | ||
value: scanSpaces(tokenizer) | ||
value: scanner.scanSpaces() | ||
}; | ||
case COMMERCIALAT: | ||
code = tokenizer.nextCharCode(); | ||
code = scanner.nextCharCode(); | ||
if (code < 128 && NAME_CHAR[code] === 1) { | ||
tokenizer.pos++; | ||
if (scanner.isNameCharCode(code)) { | ||
scanner.pos++; | ||
return { | ||
type: 'AtKeyword', | ||
name: scanWord(tokenizer) | ||
name: scanner.scanWord() | ||
}; | ||
} | ||
return maybeToken(tokenizer); | ||
return maybeToken(scanner); | ||
@@ -592,6 +551,6 @@ case ASTERISK: | ||
// check next char isn't a number, because it's likely a disjoined multiplier | ||
code = tokenizer.nextCharCode(); | ||
code = scanner.nextCharCode(); | ||
if (code < 48 || code > 57) { | ||
return maybeToken(tokenizer); | ||
return maybeToken(scanner); | ||
} | ||
@@ -602,3 +561,7 @@ | ||
default: | ||
return maybeToken(tokenizer); | ||
if (scanner.isNameCharCode(code)) { | ||
return readKeywordOrFunction(scanner); | ||
} | ||
return maybeToken(scanner); | ||
} | ||
@@ -608,7 +571,7 @@ } | ||
export function parse(source) { | ||
const tokenizer = new Tokenizer(source); | ||
const result = readImplicitGroup(tokenizer); | ||
const scanner = new Scanner(source); | ||
const result = readImplicitGroup(scanner); | ||
if (tokenizer.pos !== source.length) { | ||
tokenizer.error('Unexpected input'); | ||
if (scanner.pos !== source.length) { | ||
scanner.error('Unexpected input'); | ||
} | ||
@@ -615,0 +578,0 @@ |
@@ -17,2 +17,3 @@ const noop = function() {}; | ||
case 'Multiplier': | ||
case 'Boolean': | ||
walk(node.term); | ||
@@ -19,0 +20,0 @@ break; |
@@ -7,3 +7,3 @@ import syntax from './syntax/index.js'; | ||
export { Lexer } from './lexer/Lexer.js'; | ||
export { tokenTypes, tokenNames, TokenStream } from './tokenizer/index.js'; | ||
export { tokenTypes, tokenNames, TokenStream, OffsetToLocation } from './tokenizer/index.js'; | ||
export * as definitionSyntax from './definition-syntax/index.js'; | ||
@@ -10,0 +10,0 @@ export { clone } from './utils/clone.js'; |
@@ -50,2 +50,31 @@ import { parse } from '../definition-syntax/parse.js'; | ||
function groupNode(terms, combinator = ' ', explicit = false) { | ||
return { | ||
type: 'Group', | ||
terms, | ||
combinator, | ||
disallowEmpty: false, | ||
explicit | ||
}; | ||
} | ||
function replaceTypeInGraph(node, replacements, visited = new Set()) { | ||
if (!visited.has(node)) { | ||
visited.add(node); | ||
switch (node.type) { | ||
case 'If': | ||
node.match = replaceTypeInGraph(node.match, replacements, visited); | ||
node.then = replaceTypeInGraph(node.then, replacements, visited); | ||
node.else = replaceTypeInGraph(node.else, replacements, visited); | ||
break; | ||
case 'Type': | ||
return replacements[node.name] || node; | ||
} | ||
} | ||
return node; | ||
} | ||
function buildGroupMatchGraph(combinator, terms, atLeastOneTermMatched) { | ||
@@ -382,2 +411,44 @@ switch (combinator) { | ||
// https://drafts.csswg.org/css-values-5/#boolean | ||
case 'Boolean': { | ||
const term = buildMatchGraphInternal(node.term); | ||
// <boolean-expr[ <test> ]> = not <boolean-expr-group> | <boolean-expr-group> [ [ and <boolean-expr-group> ]* | [ or <boolean-expr-group> ]* ] | ||
const matchNode = buildMatchGraphInternal(groupNode([ | ||
groupNode([ | ||
{ type: 'Keyword', name: 'not' }, | ||
{ type: 'Type', name: '!boolean-group' } | ||
]), | ||
groupNode([ | ||
{ type: 'Type', name: '!boolean-group' }, | ||
groupNode([ | ||
{ type: 'Multiplier', comma: false, min: 0, max: 0, term: groupNode([ | ||
{ type: 'Keyword', name: 'and' }, | ||
{ type: 'Type', name: '!boolean-group' } | ||
]) }, | ||
{ type: 'Multiplier', comma: false, min: 0, max: 0, term: groupNode([ | ||
{ type: 'Keyword', name: 'or' }, | ||
{ type: 'Type', name: '!boolean-group' } | ||
]) } | ||
], '|') | ||
]) | ||
], '|')); | ||
// <boolean-expr-group> = <test> | ( <boolean-expr[ <test> ]> ) | <general-enclosed> | ||
const booleanGroup = buildMatchGraphInternal( | ||
groupNode([ | ||
{ type: 'Type', name: '!term' }, | ||
groupNode([ | ||
{ type: 'Token', value: '(' }, | ||
{ type: 'Type', name: '!self' }, | ||
{ type: 'Token', value: ')' } | ||
]), | ||
{ type: 'Type', name: 'general-enclosed' } | ||
], '|') | ||
); | ||
replaceTypeInGraph(booleanGroup, { '!term': term, '!self': matchNode }); | ||
replaceTypeInGraph(matchNode, { '!boolean-group': booleanGroup }); | ||
return matchNode; | ||
} | ||
case 'Type': | ||
@@ -384,0 +455,0 @@ case 'Property': |
@@ -11,2 +11,3 @@ import { Ident, Delim } from '../../tokenizer/index.js'; | ||
export function parse() { | ||
let tokenStart = this.tokenStart; | ||
let name = this.consume(Ident); | ||
@@ -21,3 +22,3 @@ | ||
type: 'Layer', | ||
loc: this.getLocation(this.tokenStart, this.tokenEnd), | ||
loc: this.getLocation(tokenStart, this.tokenStart), | ||
name | ||
@@ -24,0 +25,0 @@ }; |
@@ -44,8 +44,8 @@ import { adoptBuffer } from './adopt-buffer.js'; | ||
export class OffsetToLocation { | ||
constructor() { | ||
constructor(source, startOffset, startLine, startColumn) { | ||
this.setSource(source, startOffset, startLine, startColumn); | ||
this.lines = null; | ||
this.columns = null; | ||
this.computed = false; | ||
} | ||
setSource(source, startOffset = 0, startLine = 1, startColumn = 1) { | ||
setSource(source = '', startOffset = 0, startLine = 1, startColumn = 1) { | ||
this.source = source; | ||
@@ -52,0 +52,0 @@ this.startOffset = startOffset; |
@@ -20,9 +20,12 @@ import { adoptBuffer } from './adopt-buffer.js'; | ||
const TYPE_SHIFT = 24; | ||
const balancePair = new Map([ | ||
[FunctionToken, RightParenthesis], | ||
[LeftParenthesis, RightParenthesis], | ||
[LeftSquareBracket, RightSquareBracket], | ||
[LeftCurlyBracket, RightCurlyBracket] | ||
]); | ||
const balancePair = new Uint8Array(32); // 32b of memory ought to be enough for anyone (any number of tokens) | ||
balancePair[FunctionToken] = RightParenthesis; | ||
balancePair[LeftParenthesis] = RightParenthesis; | ||
balancePair[LeftSquareBracket] = RightSquareBracket; | ||
balancePair[LeftCurlyBracket] = RightCurlyBracket; | ||
function isBlockOpenerToken(tokenType) { | ||
return balancePair[tokenType] !== 0; | ||
} | ||
export class TokenStream { | ||
@@ -46,5 +49,5 @@ constructor(source, tokenize) { | ||
let tokenCount = 0; | ||
let firstCharOffset = -1; | ||
let balanceCloseType = 0; | ||
let balanceStart = 0; | ||
let firstCharOffset = -1; | ||
let balanceStart = source.length; | ||
@@ -54,37 +57,31 @@ // capture buffers | ||
this.balance = null; | ||
balance.fill(0); | ||
tokenize(source, (type, start, end) => { | ||
switch (type) { | ||
default: | ||
balance[tokenCount] = sourceLength; | ||
break; | ||
const index = tokenCount++; | ||
case balanceCloseType: { | ||
let balancePrev = balanceStart & OFFSET_MASK; | ||
balanceStart = balance[balancePrev]; | ||
balanceCloseType = balanceStart >> TYPE_SHIFT; | ||
balance[tokenCount] = balancePrev; | ||
balance[balancePrev++] = tokenCount; | ||
for (; balancePrev < tokenCount; balancePrev++) { | ||
if (balance[balancePrev] === sourceLength) { | ||
balance[balancePrev] = tokenCount; | ||
} | ||
} | ||
break; | ||
} | ||
// type & offset | ||
offsetAndType[index] = (type << TYPE_SHIFT) | end; | ||
case LeftParenthesis: | ||
case FunctionToken: | ||
case LeftSquareBracket: | ||
case LeftCurlyBracket: | ||
balance[tokenCount] = balanceStart; | ||
balanceCloseType = balancePair.get(type); | ||
balanceStart = (balanceCloseType << TYPE_SHIFT) | tokenCount; | ||
break; | ||
} | ||
offsetAndType[tokenCount++] = (type << TYPE_SHIFT) | end; | ||
if (firstCharOffset === -1) { | ||
firstCharOffset = start; | ||
} | ||
// balance | ||
balance[index] = balanceStart; | ||
if (type === balanceCloseType) { | ||
const prevBalanceStart = balance[balanceStart]; | ||
// set reference to balance end for a block opener | ||
balance[balanceStart] = index; | ||
// pop state | ||
balanceStart = prevBalanceStart; | ||
balanceCloseType = balancePair[offsetAndType[prevBalanceStart] >> TYPE_SHIFT]; | ||
} else if (isBlockOpenerToken(type)) { // check for FunctionToken, <(-token>, <[-token> and <{-token> | ||
// push state | ||
balanceStart = index; | ||
balanceCloseType = balancePair[type]; | ||
} | ||
}); | ||
@@ -94,10 +91,30 @@ | ||
offsetAndType[tokenCount] = (EOF << TYPE_SHIFT) | sourceLength; // <EOF-token> | ||
balance[tokenCount] = sourceLength; | ||
balance[sourceLength] = sourceLength; // prevents false positive balance match with any token | ||
while (balanceStart !== 0) { | ||
const balancePrev = balanceStart & OFFSET_MASK; | ||
balanceStart = balance[balancePrev]; | ||
balance[balancePrev] = sourceLength; | ||
balance[tokenCount] = tokenCount; // prevents false positive balance match with any token | ||
// reverse references from balance start to end | ||
// tokens | ||
// token: a ( [ b c ] d e ) { | ||
// index: 0 1 2 3 4 5 6 7 8 9 | ||
// before | ||
// balance: 0 8 5 2 2 2 1 1 1 0 | ||
// - > > < < < < < < - | ||
// after | ||
// balance: 9 8 5 5 5 2 8 8 1 9 | ||
// > > > > > < > > < > | ||
for (let i = 0; i < tokenCount; i++) { | ||
const balanceStart = balance[i]; | ||
if (balanceStart <= i) { | ||
const balanceEnd = balance[balanceStart]; | ||
if (balanceEnd !== i) { | ||
balance[i] = balanceEnd; | ||
} | ||
} else if (balanceStart > tokenCount) { | ||
balance[i] = tokenCount; | ||
} | ||
} | ||
// balance[0] = tokenCount; | ||
this.source = source; | ||
@@ -190,2 +207,3 @@ this.firstCharOffset = firstCharOffset === -1 ? 0 : firstCharOffset; | ||
return this.balance[this.tokenIndex] < pos; | ||
// return this.balance[this.balance[pos]] !== this.tokenIndex; | ||
} | ||
@@ -243,4 +261,4 @@ isDelim(code, offset) { | ||
let cursor = startToken; | ||
let balanceEnd; | ||
let offset; | ||
let balanceEnd = 0; | ||
let offset = 0; | ||
@@ -268,4 +286,4 @@ loop: | ||
default: | ||
// fast forward to the end of balanced block | ||
if (this.balance[balanceEnd] === cursor) { | ||
// fast forward to the end of balanced block for an open block tokens | ||
if (isBlockOpenerToken(this.offsetAndType[cursor] >> TYPE_SHIFT)) { | ||
cursor = balanceEnd; | ||
@@ -272,0 +290,0 @@ } |
{ | ||
"name": "css-tree", | ||
"version": "3.0.1", | ||
"version": "3.1.0", | ||
"description": "A tool set for CSS: fast detailed parser (CSS → AST), walker (AST traversal), generator (AST → CSS) and lexer (validation and matching) based on specs and browser implementations", | ||
@@ -102,3 +102,3 @@ "author": "Roman Dvornov <rdvornov@gmail.com> (https://github.com/lahmatiy)", | ||
"dependencies": { | ||
"mdn-data": "2.12.1", | ||
"mdn-data": "2.12.2", | ||
"source-map-js": "^1.0.1" | ||
@@ -109,3 +109,3 @@ }, | ||
"clap": "^2.0.1", | ||
"esbuild": "^0.14.53", | ||
"esbuild": "^0.24.0", | ||
"eslint": "^8.4.1", | ||
@@ -112,0 +112,0 @@ "json-to-ast": "^2.1.0", |
@@ -29,3 +29,3 @@ <img align="right" width="111" height="111" alt="CSSTree logo" src="assets/csstree-logo-rounded.svg" /> | ||
The built-in lexer can test CSS against syntaxes defined by W3C. CSSTree uses [mdn/data](https://github.com/mdn/data/) as a basis for lexer's dictionaries and extends it with vendor specific and legacy syntaxes. Lexer can only check the declaration values currently, but this feature will be extended to other parts of the CSS in the future. | ||
The built-in lexer can test CSS against syntaxes defined by W3C. CSSTree uses [mdn/data](https://github.com/mdn/data/) as a basis for lexer's dictionaries and extends it with vendor specific and legacy syntaxes. Lexer can only check the declaration values and at-rules currently, but this feature will be extended to other parts of the CSS in the future. | ||
@@ -62,2 +62,3 @@ ## Projects using CSSTree | ||
- [url](docs/utils.md#url) | ||
- [List class](docs/list.md) | ||
- AST transforming | ||
@@ -64,0 +65,0 @@ - [clone(ast)](docs/utils.md#cloneast) |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
1327071
0.92%287
0.35%25691
0.94%193
0.52%+ Added
- Removed
Updated