Compare commits
17 Commits
bb92a9e0b4
...
b7a65e07dc
| Author | SHA1 | Date | |
|---|---|---|---|
| b7a65e07dc | |||
| 8299022b4f | |||
| 131c943fc6 | |||
| 866da86862 | |||
| 5ac0b02044 | |||
| e0095b110f | |||
| a38932a833 | |||
| 03596aab5b | |||
| bd1dbe75f3 | |||
| 669e58b71e | |||
| 152aac269f | |||
| a428e98d41 | |||
| 061452a334 | |||
| 4494cbce91 | |||
| 47d1ea1a0b | |||
| 82a97c0a5a | |||
| ab12212df2 |
|
|
@ -200,7 +200,7 @@ function parseExpression(input: string) {
|
||||||
- **Not in scope** → Parses as `Word("obj.prop")` → compiles to `PUSH 'obj.prop'` (treated as file path/string)
|
- **Not in scope** → Parses as `Word("obj.prop")` → compiles to `PUSH 'obj.prop'` (treated as file path/string)
|
||||||
|
|
||||||
Implementation files:
|
Implementation files:
|
||||||
- **src/parser/scopeTracker.ts**: ContextTracker that maintains immutable scope chain
|
- **src/parser/parserScopeContext.ts**: ContextTracker that maintains immutable scope chain
|
||||||
- **src/parser/tokenizer.ts**: External tokenizer checks `stack.context` to decide if dot creates DotGet or Word
|
- **src/parser/tokenizer.ts**: External tokenizer checks `stack.context` to decide if dot creates DotGet or Word
|
||||||
- Scope tracking: Captures variables from assignments (`x = 5`) and function parameters (`fn x:`)
|
- Scope tracking: Captures variables from assignments (`x = 5`) and function parameters (`fn x:`)
|
||||||
- See `src/parser/tests/dot-get.test.ts` for comprehensive examples
|
- See `src/parser/tests/dot-get.test.ts` for comprehensive examples
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ import { CompilerError } from '#compiler/compilerError.ts'
|
||||||
import { parser } from '#parser/shrimp.ts'
|
import { parser } from '#parser/shrimp.ts'
|
||||||
import * as terms from '#parser/shrimp.terms'
|
import * as terms from '#parser/shrimp.terms'
|
||||||
import { setGlobals } from '#parser/tokenizer'
|
import { setGlobals } from '#parser/tokenizer'
|
||||||
|
import { tokenizeCurlyString } from '#parser/curlyTokenizer'
|
||||||
import type { SyntaxNode, Tree } from '@lezer/common'
|
import type { SyntaxNode, Tree } from '@lezer/common'
|
||||||
import { assert, errorMessage } from '#utils/utils'
|
import { assert, errorMessage } from '#utils/utils'
|
||||||
import { toBytecode, type Bytecode, type ProgramItem, bytecodeToString } from 'reefvm'
|
import { toBytecode, type Bytecode, type ProgramItem, bytecodeToString } from 'reefvm'
|
||||||
|
|
@ -112,6 +113,9 @@ export class Compiler {
|
||||||
return [[`PUSH`, number]]
|
return [[`PUSH`, number]]
|
||||||
|
|
||||||
case terms.String: {
|
case terms.String: {
|
||||||
|
if (node.firstChild?.type.id === terms.CurlyString)
|
||||||
|
return this.#compileCurlyString(value, input)
|
||||||
|
|
||||||
const { parts, hasInterpolation } = getStringParts(node, input)
|
const { parts, hasInterpolation } = getStringParts(node, input)
|
||||||
|
|
||||||
// Simple string without interpolation or escapes - extract text directly
|
// Simple string without interpolation or escapes - extract text directly
|
||||||
|
|
@ -772,4 +776,26 @@ export class Compiler {
|
||||||
|
|
||||||
return instructions
|
return instructions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#compileCurlyString(value: string, input: string): ProgramItem[] {
|
||||||
|
const instructions: ProgramItem[] = []
|
||||||
|
const nodes = tokenizeCurlyString(value)
|
||||||
|
|
||||||
|
nodes.forEach((node) => {
|
||||||
|
if (typeof node === 'string') {
|
||||||
|
instructions.push(['PUSH', node])
|
||||||
|
} else {
|
||||||
|
const [input, topNode] = node
|
||||||
|
let child = topNode.topNode.firstChild
|
||||||
|
while (child) {
|
||||||
|
instructions.push(...this.#compileNode(child, input))
|
||||||
|
child = child.nextSibling
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
instructions.push(['STR_CONCAT', nodes.length])
|
||||||
|
|
||||||
|
return instructions
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -155,3 +155,48 @@ describe('dict literals', () => {
|
||||||
c=3]`).toEvaluateTo({ a: 1, b: 2, c: 3 })
|
c=3]`).toEvaluateTo({ a: 1, b: 2, c: 3 })
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('curly strings', () => {
|
||||||
|
test('work on one line', () => {
|
||||||
|
expect('{ one two three }').toEvaluateTo(" one two three ")
|
||||||
|
})
|
||||||
|
|
||||||
|
test('work on multiple lines', () => {
|
||||||
|
expect(`{
|
||||||
|
one
|
||||||
|
two
|
||||||
|
three
|
||||||
|
}`).toEvaluateTo("\n one\n two\n three\n ")
|
||||||
|
})
|
||||||
|
|
||||||
|
test('can contain other curlies', () => {
|
||||||
|
expect(`{
|
||||||
|
{ one }
|
||||||
|
two
|
||||||
|
{ three }
|
||||||
|
}`).toEvaluateTo("\n { one }\n two\n { three }\n ")
|
||||||
|
})
|
||||||
|
|
||||||
|
test('interpolates variables', () => {
|
||||||
|
expect(`name = Bob; { Hello $name! }`).toEvaluateTo(` Hello Bob! `)
|
||||||
|
})
|
||||||
|
|
||||||
|
test("doesn't interpolate escaped variables ", () => {
|
||||||
|
expect(`name = Bob; { Hello \\$name }`).toEvaluateTo(` Hello $name `)
|
||||||
|
expect(`a = 1; b = 2; { sum is \\$(a + b)! }`).toEvaluateTo(` sum is $(a + b)! `)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('interpolates expressions', () => {
|
||||||
|
expect(`a = 1; b = 2; { sum is $(a + b)! }`).toEvaluateTo(` sum is 3! `)
|
||||||
|
expect(`a = 1; b = 2; { sum is { $(a + b) }! }`).toEvaluateTo(` sum is { 3 }! `)
|
||||||
|
expect(`a = 1; b = 2; { sum is $(a + (b * b))! }`).toEvaluateTo(` sum is 5! `)
|
||||||
|
expect(`{ This is $({twisted}). }`).toEvaluateTo(` This is twisted. `)
|
||||||
|
expect(`{ This is $({{twisted}}). }`).toEvaluateTo(` This is {twisted}. `)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('interpolation edge cases', () => {
|
||||||
|
expect(`{[a=1 b=2 c={wild}]}`).toEvaluateTo(`[a=1 b=2 c={wild}]`)
|
||||||
|
expect(`a = 1;b = 2;c = 3;{$a $b $c}`).toEvaluateTo(`1 2 3`)
|
||||||
|
expect(`a = 1;b = 2;c = 3;{$a$b$c}`).toEvaluateTo(`123`)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
|
||||||
|
|
@ -78,4 +78,18 @@ describe('pipe expressions', () => {
|
||||||
div = do a b: a / b end
|
div = do a b: a / b end
|
||||||
sub 3 1 | div (sub 110 9 | sub 1) _ | div 5`).toEvaluateTo(10)
|
sub 3 1 | div (sub 110 9 | sub 1) _ | div 5`).toEvaluateTo(10)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test('pipe with prelude functions (list.reverse and list.map)', () => {
|
||||||
|
expect(`
|
||||||
|
double = do x: x * 2 end
|
||||||
|
range 1 3 | list.reverse | list.map double
|
||||||
|
`).toEvaluateTo([6, 4, 2])
|
||||||
|
})
|
||||||
|
|
||||||
|
test('pipe with prelude function (echo)', () => {
|
||||||
|
expect(`
|
||||||
|
get-msg = do: 'hello' end
|
||||||
|
get-msg | length
|
||||||
|
`).toEvaluateTo(5)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ end
|
||||||
|
|
||||||
test('custom tags', () => {
|
test('custom tags', () => {
|
||||||
expect(`
|
expect(`
|
||||||
list = tag ul class=list
|
list = tag ul class='list'
|
||||||
ribbit:
|
ribbit:
|
||||||
list:
|
list:
|
||||||
li border-bottom='1px solid black' one
|
li border-bottom='1px solid black' one
|
||||||
|
|
|
||||||
|
|
@ -251,7 +251,9 @@ export const getStringParts = (node: SyntaxNode, input: string) => {
|
||||||
return (
|
return (
|
||||||
child.type.id === terms.StringFragment ||
|
child.type.id === terms.StringFragment ||
|
||||||
child.type.id === terms.Interpolation ||
|
child.type.id === terms.Interpolation ||
|
||||||
child.type.id === terms.EscapeSeq
|
child.type.id === terms.EscapeSeq ||
|
||||||
|
child.type.id === terms.CurlyString
|
||||||
|
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
@ -260,7 +262,8 @@ export const getStringParts = (node: SyntaxNode, input: string) => {
|
||||||
if (
|
if (
|
||||||
part.type.id !== terms.StringFragment &&
|
part.type.id !== terms.StringFragment &&
|
||||||
part.type.id !== terms.Interpolation &&
|
part.type.id !== terms.Interpolation &&
|
||||||
part.type.id !== terms.EscapeSeq
|
part.type.id !== terms.EscapeSeq &&
|
||||||
|
part.type.id !== terms.CurlyString
|
||||||
) {
|
) {
|
||||||
throw new CompilerError(
|
throw new CompilerError(
|
||||||
`String child must be StringFragment, Interpolation, or EscapeSeq, got ${part.type.name}`,
|
`String child must be StringFragment, Interpolation, or EscapeSeq, got ${part.type.name}`,
|
||||||
|
|
|
||||||
62
src/parser/curlyTokenizer.ts
Normal file
62
src/parser/curlyTokenizer.ts
Normal file
|
|
@ -0,0 +1,62 @@
|
||||||
|
import { parser } from '#parser/shrimp.ts'
|
||||||
|
import type { Tree } from '@lezer/common'
|
||||||
|
import { isIdentStart, isIdentChar } from './tokenizer'
|
||||||
|
|
||||||
|
// Turns a { curly string } into separate tokens for interpolation
|
||||||
|
export const tokenizeCurlyString = (value: string): (string | [string, Tree])[] => {
|
||||||
|
let pos = 1
|
||||||
|
let start = 1
|
||||||
|
let char = value[pos]
|
||||||
|
const tokens: (string | [string, Tree])[] = []
|
||||||
|
|
||||||
|
while (pos < value.length) {
|
||||||
|
if (char === '$') {
|
||||||
|
// escaped \$
|
||||||
|
if (value[pos - 1] === '\\' && value[pos - 2] !== '\\') {
|
||||||
|
tokens.push(value.slice(start, pos - 1))
|
||||||
|
start = pos
|
||||||
|
char = value[++pos]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens.push(value.slice(start, pos))
|
||||||
|
start = pos
|
||||||
|
|
||||||
|
if (value[pos + 1] === '(') {
|
||||||
|
pos++ // slip opening '('
|
||||||
|
|
||||||
|
char = value[++pos]
|
||||||
|
if (!char) break
|
||||||
|
|
||||||
|
let depth = 0
|
||||||
|
while (char) {
|
||||||
|
if (char === '(') depth++
|
||||||
|
if (char === ')') depth--
|
||||||
|
if (depth < 0) break
|
||||||
|
char = value[++pos]
|
||||||
|
}
|
||||||
|
|
||||||
|
const input = value.slice(start + 2, pos) // skip '$('
|
||||||
|
tokens.push([input, parser.parse(input)])
|
||||||
|
start = ++pos // skip ')'
|
||||||
|
} else {
|
||||||
|
char = value[++pos]
|
||||||
|
if (!char) break
|
||||||
|
if (!isIdentStart(char.charCodeAt(0))) break
|
||||||
|
|
||||||
|
while (char && isIdentChar(char.charCodeAt(0)))
|
||||||
|
char = value[++pos]
|
||||||
|
|
||||||
|
const input = value.slice(start + 1, pos) // skip '$'
|
||||||
|
tokens.push([input, parser.parse(input)])
|
||||||
|
start = pos-- // backtrack and start over
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char = value[++pos]
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens.push(value.slice(start, pos - 1))
|
||||||
|
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
@external propSource highlighting from "./highlight"
|
@external propSource highlighting from "./highlight"
|
||||||
|
|
||||||
@context trackScope from "./scopeTracker"
|
@context trackScope from "./parserScopeContext"
|
||||||
|
|
||||||
@skip { space | Comment }
|
@skip { space | Comment }
|
||||||
|
|
||||||
|
|
@ -37,7 +37,7 @@ finally { @specialize[@name=keyword]<Identifier, "finally"> }
|
||||||
throw { @specialize[@name=keyword]<Identifier, "throw"> }
|
throw { @specialize[@name=keyword]<Identifier, "throw"> }
|
||||||
null { @specialize[@name=Null]<Identifier, "null"> }
|
null { @specialize[@name=Null]<Identifier, "null"> }
|
||||||
|
|
||||||
@external tokens tokenizer from "./tokenizer" { Identifier, AssignableIdentifier, Word, IdentifierBeforeDot }
|
@external tokens tokenizer from "./tokenizer" { Identifier, AssignableIdentifier, Word, IdentifierBeforeDot, CurlyString }
|
||||||
@external specialize {Identifier} specializeKeyword from "./tokenizer" { Do }
|
@external specialize {Identifier} specializeKeyword from "./tokenizer" { Do }
|
||||||
|
|
||||||
@precedence {
|
@precedence {
|
||||||
|
|
@ -205,7 +205,9 @@ expression {
|
||||||
IdentifierBeforeDot dot (Number | Identifier | ParenExpr)
|
IdentifierBeforeDot dot (Number | Identifier | ParenExpr)
|
||||||
}
|
}
|
||||||
|
|
||||||
String { "'" stringContent* "'" }
|
String {
|
||||||
|
"'" stringContent* "'" | CurlyString
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stringContent {
|
stringContent {
|
||||||
|
|
|
||||||
|
|
@ -23,44 +23,45 @@ export const
|
||||||
AssignableIdentifier = 21,
|
AssignableIdentifier = 21,
|
||||||
Word = 22,
|
Word = 22,
|
||||||
IdentifierBeforeDot = 23,
|
IdentifierBeforeDot = 23,
|
||||||
Do = 24,
|
CurlyString = 24,
|
||||||
Comment = 25,
|
Do = 25,
|
||||||
Program = 26,
|
Comment = 26,
|
||||||
PipeExpr = 27,
|
Program = 27,
|
||||||
FunctionCall = 28,
|
PipeExpr = 28,
|
||||||
DotGet = 29,
|
FunctionCall = 29,
|
||||||
Number = 30,
|
DotGet = 30,
|
||||||
ParenExpr = 31,
|
Number = 31,
|
||||||
IfExpr = 32,
|
ParenExpr = 32,
|
||||||
keyword = 70,
|
IfExpr = 33,
|
||||||
ConditionalOp = 34,
|
keyword = 71,
|
||||||
String = 35,
|
ConditionalOp = 35,
|
||||||
StringFragment = 36,
|
String = 36,
|
||||||
Interpolation = 37,
|
StringFragment = 37,
|
||||||
EscapeSeq = 38,
|
Interpolation = 38,
|
||||||
Boolean = 39,
|
EscapeSeq = 39,
|
||||||
Regex = 40,
|
Boolean = 40,
|
||||||
Dict = 41,
|
Regex = 41,
|
||||||
NamedArg = 42,
|
Dict = 42,
|
||||||
NamedArgPrefix = 43,
|
NamedArg = 43,
|
||||||
FunctionDef = 44,
|
NamedArgPrefix = 44,
|
||||||
Params = 45,
|
FunctionDef = 45,
|
||||||
NamedParam = 46,
|
Params = 46,
|
||||||
Null = 47,
|
NamedParam = 47,
|
||||||
colon = 48,
|
Null = 48,
|
||||||
CatchExpr = 49,
|
colon = 49,
|
||||||
Block = 51,
|
CatchExpr = 50,
|
||||||
FinallyExpr = 52,
|
Block = 52,
|
||||||
Underscore = 55,
|
FinallyExpr = 53,
|
||||||
Array = 56,
|
Underscore = 56,
|
||||||
ElseIfExpr = 57,
|
Array = 57,
|
||||||
ElseExpr = 59,
|
ElseIfExpr = 58,
|
||||||
FunctionCallOrIdentifier = 60,
|
ElseExpr = 60,
|
||||||
BinOp = 61,
|
FunctionCallOrIdentifier = 61,
|
||||||
PositionalArg = 62,
|
BinOp = 62,
|
||||||
WhileExpr = 64,
|
PositionalArg = 63,
|
||||||
FunctionCallWithBlock = 66,
|
WhileExpr = 65,
|
||||||
TryExpr = 67,
|
FunctionCallWithBlock = 67,
|
||||||
Throw = 69,
|
TryExpr = 68,
|
||||||
CompoundAssign = 71,
|
Throw = 70,
|
||||||
Assign = 72
|
CompoundAssign = 72,
|
||||||
|
Assign = 73
|
||||||
|
|
|
||||||
|
|
@ -2,26 +2,26 @@
|
||||||
import {LRParser, LocalTokenGroup} from "@lezer/lr"
|
import {LRParser, LocalTokenGroup} from "@lezer/lr"
|
||||||
import {operatorTokenizer} from "./operatorTokenizer"
|
import {operatorTokenizer} from "./operatorTokenizer"
|
||||||
import {tokenizer, specializeKeyword} from "./tokenizer"
|
import {tokenizer, specializeKeyword} from "./tokenizer"
|
||||||
import {trackScope} from "./scopeTracker"
|
import {trackScope} from "./parserScopeContext"
|
||||||
import {highlighting} from "./highlight"
|
import {highlighting} from "./highlight"
|
||||||
const spec_Identifier = {__proto__:null,if:66, null:94, catch:100, finally:106, end:108, else:116, while:130, try:136, throw:140}
|
const spec_Identifier = {__proto__:null,if:68, null:96, catch:102, finally:108, end:110, else:118, while:132, try:138, throw:142}
|
||||||
export const parser = LRParser.deserialize({
|
export const parser = LRParser.deserialize({
|
||||||
version: 14,
|
version: 14,
|
||||||
states: "9[QYQbOOO!dOSO'#DPOOQa'#DV'#DVO#mQbO'#DfO%RQcO'#E^OOQa'#E^'#E^O&XQcO'#E^O'ZQcO'#E]O'qQcO'#E]O)^QRO'#DOO*mQcO'#EWO*wQcO'#EWO+XQbO'#C{O,SOpO'#CyOOQ`'#EX'#EXO,XQbO'#EWO,cQRO'#DuOOQ`'#EW'#EWO,wQQO'#EVOOQ`'#EV'#EVOOQ`'#Dw'#DwQYQbOOO-PQbO'#DYO-[QbO'#C|O.PQbO'#DnO.tQQO'#DqO.PQbO'#DsO.yQbO'#DRO/RQWO'#DSOOOO'#E`'#E`OOOO'#Dx'#DxO/gOSO,59kOOQa,59k,59kOOQ`'#Dy'#DyO/uQbO,5:QO/|QbO'#DWO0WQQO,59qOOQa,5:Q,5:QO0cQbO,5:QOOQa'#E]'#E]OOQ`'#Dl'#DlOOQ`'#El'#ElOOQ`'#EQ'#EQO0mQbO,59dO1gQbO,5:bO.PQbO,59jO.PQbO,59jO.PQbO,59jO.PQbO,5:VO.PQbO,5:VO.PQbO,5:VO1wQRO,59gO2OQRO,59gO2ZQRO,59gO2UQQO,59gO2lQQO,59gO2tObO,59eO3PQbO'#ERO3[QbO,59cO3vQbO,5:[O1gQbO,5:aOOQ`,5:q,5:qOOQ`-E7u-E7uOOQ`'#Dz'#DzO4ZQbO'#DZO4fQbO'#D[OOQO'#D{'#D{O4^QQO'#DZO4tQQO,59tO4yQcO'#E]O6_QRO'#E[O6fQRO'#E[OOQO'#E['#E[O6qQQO,59hO6vQRO,5:YO6}QRO,5:YO3vQbO,5:]O7YQcO,5:_O8UQcO,5:_O8`QcO,5:_OOOO,59m,59mOOOO,59n,59nOOOO-E7v-E7vOOQa1G/V1G/VOOQ`-E7w-E7wO8pQQO1G/]OOQa1G/l1G/lO8{QbO1G/lOOQ`,59r,59rOOQO'#D}'#D}O8pQQO1G/]OOQa1G/]1G/]OOQ`'#EO'#EOO8{QbO1G/lOOQ`-E8O-E8OOOQ`1G/|1G/|OOQa1G/U1G/UO:WQcO1G/UO:_QcO1G/UO:fQcO1G/UOOQa1G/q1G/qO;_QcO1G/qO;iQcO1G/qO;sQcO1G/qOOQa1G/R1G/ROOQa1G/P1G/PO<hQbO'#DjO=_QbO'#CxOOQ`,5:m,5:mOOQ`-E8P-E8POOQ`'#Da'#DaO=lQbO'#DaO>]QbO1G/vOOQ`1G/{1G/{OOQ`-E7x-E7xO>hQQO,59uOOQO,59v,59vOOQO-E7y-E7yO>pQbO1G/`O3vQbO1G/SO3vQbO1G/tO?TQbO1G/wO?`QQO7+$wOOQa7+$w7+$wO?kQbO7+%WOOQa7+%W7+%WOOQO-E7{-E7{OOQ`-E7|-E7|OOQ`'#D|'#D|O?uQQO'#D|O?zQbO'#EiOOQ`,59{,59{O@kQbO'#D_O@pQQO'#DbOOQ`7+%b7+%bO@uQbO7+%bO@zQbO7+%bOASQbO7+$zOA_QbO7+$zOA{QbO7+$nOBTQbO7+%`OOQ`7+%c7+%cOBYQbO7+%cOB_QbO7+%cOOQa<<Hc<<HcOOQa<<Hr<<HrOOQ`,5:h,5:hOOQ`-E7z-E7zOBgQQO,59yO3vQbO,59|OOQ`<<H|<<H|OBlQbO<<H|OOQ`<<Hf<<HfOBqQbO<<HfOBvQbO<<HfOCOQbO<<HfOOQ`'#EP'#EPOCZQbO<<HYOCcQbO'#DiOOQ`<<HY<<HYOCkQbO<<HYOOQ`<<Hz<<HzOOQ`<<H}<<H}OCpQbO<<H}O3vQbO1G/eOOQ`1G/h1G/hOOQ`AN>hAN>hOOQ`AN>QAN>QOCuQbOAN>QOCzQbOAN>QOOQ`-E7}-E7}OOQ`AN=tAN=tODSQbOAN=tO-[QbO,5:RO3vQbO,5:TOOQ`AN>iAN>iOOQ`7+%P7+%POOQ`G23lG23lODXQbOG23lPD^QbO'#DgOOQ`G23`G23`ODcQQO1G/mOOQ`1G/o1G/oOOQ`LD)WLD)WO3vQbO7+%XOOQ`<<Hs<<Hs",
|
states: "9bQYQbOOO!gOSO'#DQOOQa'#DQ'#DQOOQa'#DW'#DWO#sQbO'#DgO%XQcO'#E_OOQa'#E_'#E_O&bQcO'#E_O'dQcO'#E^O'zQcO'#E^O)jQRO'#DPO*yQcO'#EXO+TQcO'#EXO+eQbO'#C|O,cOpO'#CzOOQ`'#EY'#EYO,hQbO'#EXO,rQRO'#DvOOQ`'#EX'#EXO-WQQO'#EWOOQ`'#EW'#EWOOQ`'#Dx'#DxQYQbOOO-`QbO'#DZO-kQbO'#C}O.cQbO'#DoO/ZQQO'#DrO.cQbO'#DtO/`QbO'#DSO/hQWO'#DTOOOO'#Ea'#EaOOOO'#Dy'#DyO/|OSO,59lOOQa,59l,59lOOQ`'#Dz'#DzO0[QbO,5:RO0cQbO'#DXO0mQQO,59rOOQa,5:R,5:RO0xQbO,5:ROOQa'#E^'#E^OOQ`'#Dm'#DmOOQ`'#Em'#EmOOQ`'#ER'#ERO1SQbO,59eO1|QbO,5:cO.cQbO,59kO.cQbO,59kO.cQbO,59kO.cQbO,5:WO.cQbO,5:WO.cQbO,5:WO2^QRO,59hO2eQRO,59hO2pQRO,59hO2kQQO,59hO3RQQO,59hO3ZObO,59fO3fQbO'#ESO3qQbO,59dO4]QbO,5:]O1|QbO,5:bOOQ`,5:r,5:rOOQ`-E7v-E7vOOQ`'#D{'#D{O4pQbO'#D[O4{QbO'#D]OOQO'#D|'#D|O4sQQO'#D[O5^QQO,59uO5cQcO'#E^O6wQRO'#E]O7OQRO'#E]OOQO'#E]'#E]O7ZQQO,59iO7`QRO,5:ZO7gQRO,5:ZO4]QbO,5:^O7rQcO,5:`O8nQcO,5:`O8xQcO,5:`OOOO,59n,59nOOOO,59o,59oOOOO-E7w-E7wOOQa1G/W1G/WOOQ`-E7x-E7xO9YQQO1G/^OOQa1G/m1G/mO9eQbO1G/mOOQ`,59s,59sOOQO'#EO'#EOO9YQQO1G/^OOQa1G/^1G/^OOQ`'#EP'#EPO9eQbO1G/mOOQ`-E8P-E8POOQ`1G/}1G/}OOQa1G/V1G/VO:pQcO1G/VO:wQcO1G/VO;OQcO1G/VOOQa1G/r1G/rO;wQcO1G/rO<RQcO1G/rO<]QcO1G/rOOQa1G/S1G/SOOQa1G/Q1G/QO=QQbO'#DkO=wQbO'#CyOOQ`,5:n,5:nOOQ`-E8Q-E8QOOQ`'#Db'#DbO>UQbO'#DbO>uQbO1G/wOOQ`1G/|1G/|OOQ`-E7y-E7yO?QQQO,59vOOQO,59w,59wOOQO-E7z-E7zO?YQbO1G/aO4]QbO1G/TO4]QbO1G/uO?mQbO1G/xO?xQQO7+$xOOQa7+$x7+$xO@TQbO7+%XOOQa7+%X7+%XOOQO-E7|-E7|OOQ`-E7}-E7}OOQ`'#D}'#D}O@_QQO'#D}O@dQbO'#EjOOQ`,59|,59|OATQbO'#D`OAYQQO'#DcOOQ`7+%c7+%cOA_QbO7+%cOAdQbO7+%cOAlQbO7+${OAwQbO7+${OBeQbO7+$oOBmQbO7+%aOOQ`7+%d7+%dOBrQbO7+%dOBwQbO7+%dOOQa<<Hd<<HdOOQa<<Hs<<HsOOQ`,5:i,5:iOOQ`-E7{-E7{OCPQQO,59zO4]QbO,59}OOQ`<<H}<<H}OCUQbO<<H}OOQ`<<Hg<<HgOCZQbO<<HgOC`QbO<<HgOChQbO<<HgOOQ`'#EQ'#EQOCsQbO<<HZOC{QbO'#DjOOQ`<<HZ<<HZODTQbO<<HZOOQ`<<H{<<H{OOQ`<<IO<<IOODYQbO<<IOO4]QbO1G/fOOQ`1G/i1G/iOOQ`AN>iAN>iOOQ`AN>RAN>ROD_QbOAN>RODdQbOAN>ROOQ`-E8O-E8OOOQ`AN=uAN=uODlQbOAN=uO-kQbO,5:SO4]QbO,5:UOOQ`AN>jAN>jOOQ`7+%Q7+%QOOQ`G23mG23mODqQbOG23mPDvQbO'#DhOOQ`G23aG23aOD{QQO1G/nOOQ`1G/p1G/pOOQ`LD)XLD)XO4]QbO7+%YOOQ`<<Ht<<Ht",
|
||||||
stateData: "Dk~O!xOSiOS~OdWOe`OfTOg]OhfOnTOqgOwTOxTO!PTO!chO!fiO!hjO!}[O#RPO#YQO#ZRO#[cO~OtmO#RpO#TkO#UlO~OdwOfTOg]OnTOwTOxTO{sO!PTO!}[O#RPO#YQO#ZRO#[qO~O#^uO~P!rOP#QXQ#QXR#QXS#QXT#QXU#QXW#QXX#QXY#QXZ#QX[#QX]#QX^#QX#[#QX#a#QX!S#QX!V#QX!W#QX![#QX~OdwOfTOg]OhfOnTOwTOxTO{sO!PTO!XxO!}[O#RPO#YQO#ZRO#_#QX!Q#QX~P#tOV|O~P#tOP#PXQ#PXR#PXS#PXT#PXU#PXW#PXX#PXY#PXZ#PX[#PX]#PX^#PX~O#[!zX#a!zX!S!zX!V!zX!W!zX![!zX~P&`OdwOfTOg]OhfOnTOwTOxTO{sO!PTO!XxO!}[O#RPO#YQO#ZRO!Q!^X!a!^X#[!^X#a!^X#_!^X!S!^X!V!^X!W!^X![!^X~P&`OP!ROQ!ROR!SOS!SOT!OOU!POW}OX}OY}OZ}O[}O]}O^!QO~O#[!zX#a!zX!S!zX!V!zX!W!zX![!zX~OT!OOU!PO~P*XOP!ROQ!ROR!SOS!SO~P*XOdWOfTOg]OhfOnTOqgOwTOxTO!PTO!}[O#RPO#YQO#ZRO~O!|!YO~O!Q!]O!a!ZO~P*XOV|O_!^O`!^Oa!^Ob!^Oc!^O~O#[!_O#a!_O~Od!aO{!cO!Q}P~Od!gOfTOg]OnTOwTOxTO!PTO!}[O#RPO#YQO#ZRO~OdwOfTOg]OnTOwTOxTO!PTO!}[O#RPO#YQO#ZRO~O!Q!nO~Od!rO!}[O~O#R!sO#T!sO#U!sO#V!sO#W!sO#X!sO~OtmO#R!uO#TkO#UlO~O#^!xO~P!rOhfO!X!zO~P.PO{sO#[!{O#^!}O~O#[#OO#^!xO~P.POhfO{sO!XxO!Qla!ala#[la#ala#_la!Sla!Vla!Wla![la~P.POe`O!chO!fiO!hjO~P+XO#_#[O~P&`OT!OOU!PO#_#[O~OP!ROQ!ROR!SOS!SO#_#[O~O!a!ZO#_#[O~Od#]On#]O!}[O~Od#^Og]O!}[O~O!a!ZO#[ka#aka#_ka!Ska!Vka!Wka![ka~Oe`O!chO!fiO!hjO#[#cO~P+XOd!aO{!cO!Q}X~On#hOw#hO!P#hO#RPO~O!Q#jO~OhfO{sO!XxOT#PXU#PXW#PXX#PXY#PXZ#PX[#PX]#PX!Q#PX~P.POT!OOU!POW}OX}OY}OZ}O[}O]}O~O!Q#OX~P5sOT!OOU!PO!Q#OX~O!Q#kO~O!Q#lO~P5sOT!OOU!PO!Q#lO~O#[!ga#a!ga!S!ga!V!ga!W!ga![!ga~P)^O#[!ga#a!ga!S!ga!V!ga!W!ga![!ga~OT!OOU!PO~P7pOP!ROQ!ROR!SOS!SO~P7pO{sO#[!{O#^#oO~O#[#OO#^#qO~P.POW}OX}OY}OZ}O[}O]}OTri#[ri#ari#_ri!Qri!Sri!Vri!Wri![ri~OU!PO~P9VOU!PO~P9iOUri~P9VO^!QOR!_iS!_i#[!_i#a!_i#_!_i!S!_i!V!_i!W!_i![!_i~OP!_iQ!_i~P:mOP!ROQ!RO~P:mOP!ROQ!ROR!_iS!_i#[!_i#a!_i#_!_i!S!_i!V!_i!W!_i![!_i~OhfO{sO!XxO!a!^X#[!^X#a!^X#_!^X!S!^X!V!^X!W!^X![!^X~P.POhfO{sO!XxO~P.POe`O!chO!fiO!hjO#[#tO!S#]P!V#]P!W#]P![#]P~P+XO!S#xO!V#yO!W#zO~O{!cO!Q}a~Oe`O!chO!fiO!hjO#[$OO~P+XO!S#xO!V#yO!W$RO~O{sO#[!{O#^$UO~O#[#OO#^$VO~P.PO#[$WO~Oe`O!chO!fiO!hjO#[#tO!S#]X!V#]X!W#]X![#]X~P+XOd$YO~O!Q$ZO~O!W$[O~O!V#yO!W$[O~O!S#xO!V#yO!W$^O~Oe`O!chO!fiO!hjO#[#tO!S#]P!V#]P!W#]P~P+XO!W$eO![$dO~O!W$gO~O!W$hO~O!V#yO!W$hO~O!Q$jO~O!W$lO~O!W$mO~O!V#yO!W$mO~O!S#xO!V#yO!W$mO~O!W$qO![$dO~Oq$sO!Q$tO~O!W$qO~O!W$uO~O!W$wO~O!V#yO!W$wO~O!W$zO~O!W$}O~Oq$sO~O!Q%OO~Onx~",
|
stateData: "ET~O!yOSjOS~OdXOeaOfUOg^OhQOigOoUOrhOxUOyUO!QUO!diO!gjO!ikO#O]O#SPO#ZRO#[SO#]dO~OunO#SqO#UlO#VmO~OdxOfUOg^OhQOoUOxUOyUO|tO!QUO#O]O#SPO#ZRO#[SO#]rO~O#_vO~P!uOP#RXQ#RXR#RXS#RXT#RXU#RXW#RXX#RXY#RXZ#RX[#RX]#RX^#RX#]#RX#b#RX!T#RX!W#RX!X#RX!]#RX~OdxOfUOg^OhQOigOoUOxUOyUO|tO!QUO!YyO#O]O#SPO#ZRO#[SO#`#RX!R#RX~P#zOV}O~P#zOP#QXQ#QXR#QXS#QXT#QXU#QXW#QXX#QXY#QXZ#QX[#QX]#QX^#QX~O#]!{X#b!{X!T!{X!W!{X!X!{X!]!{X~P&iOdxOfUOg^OhQOigOoUOxUOyUO|tO!QUO!YyO#O]O#SPO#ZRO#[SO!R!_X!b!_X#]!_X#b!_X#`!_X!T!_X!W!_X!X!_X!]!_X~P&iOP!SOQ!SOR!TOS!TOT!POU!QOW!OOX!OOY!OOZ!OO[!OO]!OO^!RO~O#]!{X#b!{X!T!{X!W!{X!X!{X!]!{X~OT!POU!QO~P*eOP!SOQ!SOR!TOS!TO~P*eOdXOfUOg^OhQOigOoUOrhOxUOyUO!QUO#O]O#SPO#ZRO#[SO~O!}!ZO~O!R!^O!b![O~P*eOV}O_!_O`!_Oa!_Ob!_Oc!_O~O#]!`O#b!`O~Od!bO|!dO!R!OP~Od!hOfUOg^OhQOoUOxUOyUO!QUO#O]O#SPO#ZRO#[SO~OdxOfUOg^OhQOoUOxUOyUO!QUO#O]O#SPO#ZRO#[SO~O!R!oO~Od!sO#O]O~O#S!tO#U!tO#V!tO#W!tO#X!tO#Y!tO~OunO#S!vO#UlO#VmO~O#_!yO~P!uOigO!Y!{O~P.cO|tO#]!|O#_#OO~O#]#PO#_!yO~P.cOigO|tO!YyO!Rma!bma#]ma#bma#`ma!Tma!Wma!Xma!]ma~P.cOeaO!diO!gjO!ikO~P+eO#`#]O~P&iOT!POU!QO#`#]O~OP!SOQ!SOR!TOS!TO#`#]O~O!b![O#`#]O~Od#^Oo#^O#O]O~Od#_Og^O#O]O~O!b![O#]la#bla#`la!Tla!Wla!Xla!]la~OeaO!diO!gjO!ikO#]#dO~P+eOd!bO|!dO!R!OX~OhQOo#iOx#iO!Q#iO#SPO~O!R#kO~OigO|tO!YyOT#QXU#QXW#QXX#QXY#QXZ#QX[#QX]#QX!R#QX~P.cOT!POU!QOW!OOX!OOY!OOZ!OO[!OO]!OO~O!R#PX~P6]OT!POU!QO!R#PX~O!R#lO~O!R#mO~P6]OT!POU!QO!R#mO~O#]!ha#b!ha!T!ha!W!ha!X!ha!]!ha~P)jO#]!ha#b!ha!T!ha!W!ha!X!ha!]!ha~OT!POU!QO~P8YOP!SOQ!SOR!TOS!TO~P8YO|tO#]!|O#_#pO~O#]#PO#_#rO~P.cOW!OOX!OOY!OOZ!OO[!OO]!OOTsi#]si#bsi#`si!Rsi!Tsi!Wsi!Xsi!]si~OU!QO~P9oOU!QO~P:ROUsi~P9oO^!ROR!`iS!`i#]!`i#b!`i#`!`i!T!`i!W!`i!X!`i!]!`i~OP!`iQ!`i~P;VOP!SOQ!SO~P;VOP!SOQ!SOR!`iS!`i#]!`i#b!`i#`!`i!T!`i!W!`i!X!`i!]!`i~OigO|tO!YyO!b!_X#]!_X#b!_X#`!_X!T!_X!W!_X!X!_X!]!_X~P.cOigO|tO!YyO~P.cOeaO!diO!gjO!ikO#]#uO!T#^P!W#^P!X#^P!]#^P~P+eO!T#yO!W#zO!X#{O~O|!dO!R!Oa~OeaO!diO!gjO!ikO#]$PO~P+eO!T#yO!W#zO!X$SO~O|tO#]!|O#_$VO~O#]#PO#_$WO~P.cO#]$XO~OeaO!diO!gjO!ikO#]#uO!T#^X!W#^X!X#^X!]#^X~P+eOd$ZO~O!R$[O~O!X$]O~O!W#zO!X$]O~O!T#yO!W#zO!X$_O~OeaO!diO!gjO!ikO#]#uO!T#^P!W#^P!X#^P~P+eO!X$fO!]$eO~O!X$hO~O!X$iO~O!W#zO!X$iO~O!R$kO~O!X$mO~O!X$nO~O!W#zO!X$nO~O!T#yO!W#zO!X$nO~O!X$rO!]$eO~Or$tO!R$uO~O!X$rO~O!X$vO~O!X$xO~O!W#zO!X$xO~O!X${O~O!X%OO~Or$tO~O!R%PO~Ooy~",
|
||||||
goto: "4x#aPPPPPPPPPPPPPPPPPPPPPPPPPPP#b#w$aP%d#bP&k'bP(a(aPP(e)aP)u*g*jPP*pP*|+fPPP+|,zP-O-U-j.YP.bP.b.bP.bP.b.b.t.z/Q/W/^/h/o/y0T0Z0ePPP0l0p1^PP1v1|3fP4fPPPPPPPP4jPP4ppaOe|!]!^!n#c#j#k#l#v$O$Z$j$t%OR!W[t^O[e|!Z!]!^!n#c#j#k#l#v$O$Z$j$t%OT!jg$srWO[e|!]!^!n#c#j#k#l#v$O$Z$j$t%OzwRSWhjrsv{}!O!P!Q!R!S!g!y#P#^#_#pS!gg$sR#^!ZvSO[eg|!]!^!n#c#j#k#l#v$O$Z$j$s$t%OzTRSWhjrsv{}!O!P!Q!R!S!g!y#P#^#_#pQ!rkQ#]!YR#_!ZpYOe|!]!^!n#c#j#k#l#v$O$Z$j$t%OQ!U[S!ig$sQ!mhQ!pjQ#S!PR#U!O!rTORSW[eghjrsv{|}!O!P!Q!R!S!]!^!g!n!y#P#^#_#c#j#k#l#p#v$O$Z$j$s$t%OR#h!cTmPo!sTORSW[eghjrsv{|}!O!P!Q!R!S!]!^!g!n!y#P#^#_#c#j#k#l#p#v$O$Z$j$s$t%OQtR[ySW{!g#^#_Q!wrX!{t!w!|#npaOe|!]!^!n#c#j#k#l#v$O$Z$j$t%O[xSW{!g#^#_Q!W[R!zsR!ffX!df!b!e#gQ#|#dQ$T#mQ$`#}R$o$aQ#d!]Q#m!nQ$P#kQ$Q#lQ$k$ZQ$v$jQ$|$tR%P%OQ#{#dQ$S#mQ$]#|Q$_#}Q$i$TS$n$`$aR$x$o!QTRSW[ghjrsv{}!O!P!Q!R!S!g!y#P#^#_#p$sqUOe|!]!^!n#c#j#k#l#v$O$Z$j$t%OT$b$P$cQ$f$PR$r$cu^O[e|!Z!]!^!n#c#j#k#l#v$O$Z$j$t%OpZOe|!]!^!n#c#j#k#l#v$O$Z$j$t%OQ!V[Q!qjQ#W!RR#Z!S]ySW{!g#^#_qaOe|!]!^!n#c#j#k#l#v$O$Z$j$t%OQeOR!`eQoPR!toQrRR!vrQ!bfR#f!bQ!efQ#g!bT#i!e#gS#v#c$OR$X#vQ!|tQ#n!wT#r!|#nQ#PvQ#p!yT#s#P#pQ$c$PR$p$cY{SW!g#^#_R#Q{S![_!XR#a![TdOeSbOeQ#R|`#b!]!n#k#l$Z$j$t%OQ#e!^U#u#c#v$OR#}#jp_Oe|!]!^!n#c#j#k#l#v$O$Z$j$t%OQ!X[R#`!ZQ!kgR${$srXO[e|!]!^!n#c#j#k#l#v$O$Z$j$t%OQvR[xSW{!g#^#_S!hg$sQ!lhQ!ojQ!yrQ!zsW#Ov!y#P#pQ#S}Q#T!OQ#V!PQ#W!QQ#X!RR#Y!SpVOe|!]!^!n#c#j#k#l#v$O$Z$j$t%O!OwRSWghjrsv{}!O!P!Q!R!S!g!y#P#^#_#p$sR!T[TnPoQ#w#cR$a$O]zSW{!g#^#_",
|
goto: "4y#bPPPPPPPPPPPPPPPPPPPPPPPPPPPP#c#x$bP%e#cP&l'cP(b(bPP(f)bP)v*h*kPP*qP*}+gPPP+},{P-P-V-k.ZP.cP.c.cP.cP.c.c.u.{/R/X/_/i/p/z0U0[0fPPP0m0q1_PP1w1}3gP4gPPPPPPPP4kPP4qpbOf}!^!_!o#d#k#l#m#w$P$[$k$u%PR!X]t_O]f}![!^!_!o#d#k#l#m#w$P$[$k$u%PT!kh$trXO]f}!^!_!o#d#k#l#m#w$P$[$k$u%PzxSTXikstw|!O!P!Q!R!S!T!h!z#Q#_#`#qS!hh$tR#_![vTO]fh}!^!_!o#d#k#l#m#w$P$[$k$t$u%PzUSTXikstw|!O!P!Q!R!S!T!h!z#Q#_#`#qQ!slQ#^!ZR#`![pZOf}!^!_!o#d#k#l#m#w$P$[$k$u%PQ!V]S!jh$tQ!niQ!qkQ#T!QR#V!P!rUOSTX]fhikstw|}!O!P!Q!R!S!T!^!_!h!o!z#Q#_#`#d#k#l#m#q#w$P$[$k$t$u%PR#i!dTnPp!sUOSTX]fhikstw|}!O!P!Q!R!S!T!^!_!h!o!z#Q#_#`#d#k#l#m#q#w$P$[$k$t$u%PQuS[zTX|!h#_#`Q!xsX!|u!x!}#opbOf}!^!_!o#d#k#l#m#w$P$[$k$u%P[yTX|!h#_#`Q!X]R!{tR!ggX!eg!c!f#hQ#}#eQ$U#nQ$a$OR$p$bQ#e!^Q#n!oQ$Q#lQ$R#mQ$l$[Q$w$kQ$}$uR%Q%PQ#|#eQ$T#nQ$^#}Q$`$OQ$j$US$o$a$bR$y$p!QUSTX]hikstw|!O!P!Q!R!S!T!h!z#Q#_#`#q$tqVOf}!^!_!o#d#k#l#m#w$P$[$k$u%PT$c$Q$dQ$g$QR$s$du_O]f}![!^!_!o#d#k#l#m#w$P$[$k$u%Pp[Of}!^!_!o#d#k#l#m#w$P$[$k$u%PQ!W]Q!rkQ#X!SR#[!T]zTX|!h#_#`qbOf}!^!_!o#d#k#l#m#w$P$[$k$u%PQfOR!afQpPR!upQsSR!wsQ!cgR#g!cQ!fgQ#h!cT#j!f#hS#w#d$PR$Y#wQ!}uQ#o!xT#s!}#oQ#QwQ#q!zT#t#Q#qQ$d$QR$q$dY|TX!h#_#`R#R|S!]`!YR#b!]TeOfScOfQ#S}`#c!^!o#l#m$[$k$u%PQ#f!_U#v#d#w$PR$O#kp`Of}!^!_!o#d#k#l#m#w$P$[$k$u%PQ!Y]R#a![Q!lhR$|$trYO]f}!^!_!o#d#k#l#m#w$P$[$k$u%PQwS[yTX|!h#_#`S!ih$tQ!miQ!pkQ!zsQ!{tW#Pw!z#Q#qQ#T!OQ#U!PQ#W!QQ#X!RQ#Y!SR#Z!TpWOf}!^!_!o#d#k#l#m#w$P$[$k$u%P!OxSTXhikstw|!O!P!Q!R!S!T!h!z#Q#_#`#q$tR!U]ToPpQ#x#dR$b$P]{TX|!h#_#`",
|
||||||
nodeNames: "⚠ Star Slash Plus Minus And Or Eq EqEq Neq Lt Lte Gt Gte Modulo PlusEq MinusEq StarEq SlashEq ModuloEq Identifier AssignableIdentifier Word IdentifierBeforeDot Do Comment Program PipeExpr FunctionCall DotGet Number ParenExpr IfExpr keyword ConditionalOp String StringFragment Interpolation EscapeSeq Boolean Regex Dict NamedArg NamedArgPrefix FunctionDef Params NamedParam Null colon CatchExpr keyword Block FinallyExpr keyword keyword Underscore Array ElseIfExpr keyword ElseExpr FunctionCallOrIdentifier BinOp PositionalArg operator WhileExpr keyword FunctionCallWithBlock TryExpr keyword Throw keyword CompoundAssign Assign",
|
nodeNames: "⚠ Star Slash Plus Minus And Or Eq EqEq Neq Lt Lte Gt Gte Modulo PlusEq MinusEq StarEq SlashEq ModuloEq Identifier AssignableIdentifier Word IdentifierBeforeDot CurlyString Do Comment Program PipeExpr FunctionCall DotGet Number ParenExpr IfExpr keyword ConditionalOp String StringFragment Interpolation EscapeSeq Boolean Regex Dict NamedArg NamedArgPrefix FunctionDef Params NamedParam Null colon CatchExpr keyword Block FinallyExpr keyword keyword Underscore Array ElseIfExpr keyword ElseExpr FunctionCallOrIdentifier BinOp PositionalArg operator WhileExpr keyword FunctionCallWithBlock TryExpr keyword Throw keyword CompoundAssign Assign",
|
||||||
maxTerm: 109,
|
maxTerm: 110,
|
||||||
context: trackScope,
|
context: trackScope,
|
||||||
nodeProps: [
|
nodeProps: [
|
||||||
["closedBy", 48,"end"]
|
["closedBy", 49,"end"]
|
||||||
],
|
],
|
||||||
propSources: [highlighting],
|
propSources: [highlighting],
|
||||||
skippedNodes: [0,25],
|
skippedNodes: [0,26],
|
||||||
repeatNodeCount: 11,
|
repeatNodeCount: 11,
|
||||||
tokenData: "C|~R|OX#{XY$jYZ%TZp#{pq$jqs#{st%ntu'tuw#{wx'yxy(Oyz(iz{#{{|)S|}#{}!O+v!O!P#{!P!Q.]!Q![)q![!]6x!]!^%T!^!}#{!}#O7c#O#P9X#P#Q9^#Q#R#{#R#S9w#S#T#{#T#Y,w#Y#Z:b#Z#b,w#b#c?`#c#f,w#f#g@]#g#h,w#h#iAY#i#o,w#o#p#{#p#qC^#q;'S#{;'S;=`$d<%l~#{~O#{~~CwS$QUtSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{S$gP;=`<%l#{^$qUtS!xYOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U%[UtS#[QOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{^%sWtSOp#{pq&]qt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{^&dZiYtSOY&]YZ#{Zt&]tu'Vuw&]wx'Vx#O&]#O#P'V#P;'S&];'S;=`'n<%lO&]Y'[SiYOY'VZ;'S'V;'S;=`'h<%lO'VY'kP;=`<%l'V^'qP;=`<%l&]~'yO#T~~(OO#R~U(VUtS!}QOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U(pUtS#_QOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U)XWtSOt#{uw#{x!Q#{!Q![)q![#O#{#P;'S#{;'S;=`$d<%lO#{U)xYtSnQOt#{uw#{x!O#{!O!P*h!P!Q#{!Q![)q![#O#{#P;'S#{;'S;=`$d<%lO#{U*mWtSOt#{uw#{x!Q#{!Q![+V![#O#{#P;'S#{;'S;=`$d<%lO#{U+^WtSnQOt#{uw#{x!Q#{!Q![+V![#O#{#P;'S#{;'S;=`$d<%lO#{U+{^tSOt#{uw#{x}#{}!O,w!O!Q#{!Q![)q![!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{U,|[tSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{U-yU{QtSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U.bWtSOt#{uw#{x!P#{!P!Q.z!Q#O#{#P;'S#{;'S;=`$d<%lO#{U/P^tSOY/{YZ#{Zt/{tu1Ouw/{wx1Ox!P/{!P!Q#{!Q!}/{!}#O5q#O#P3^#P;'S/{;'S;=`6r<%lO/{U0S^tSxQOY/{YZ#{Zt/{tu1Ouw/{wx1Ox!P/{!P!Q3s!Q!}/{!}#O5q#O#P3^#P;'S/{;'S;=`6r<%lO/{Q1TXxQOY1OZ!P1O!P!Q1p!Q!}1O!}#O2_#O#P3^#P;'S1O;'S;=`3m<%lO1OQ1sP!P!Q1vQ1{UxQ#Z#[1v#]#^1v#a#b1v#g#h1v#i#j1v#m#n1vQ2bVOY2_Z#O2_#O#P2w#P#Q1O#Q;'S2_;'S;=`3W<%lO2_Q2zSOY2_Z;'S2_;'S;=`3W<%lO2_Q3ZP;=`<%l2_Q3aSOY1OZ;'S1O;'S;=`3m<%lO1OQ3pP;=`<%l1OU3xWtSOt#{uw#{x!P#{!P!Q4b!Q#O#{#P;'S#{;'S;=`$d<%lO#{U4ibtSxQOt#{uw#{x#O#{#P#Z#{#Z#[4b#[#]#{#]#^4b#^#a#{#a#b4b#b#g#{#g#h4b#h#i#{#i#j4b#j#m#{#m#n4b#n;'S#{;'S;=`$d<%lO#{U5v[tSOY5qYZ#{Zt5qtu2_uw5qwx2_x#O5q#O#P2w#P#Q/{#Q;'S5q;'S;=`6l<%lO5qU6oP;=`<%l5qU6uP;=`<%l/{U7PUtS!QQOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U7jW#ZQtSOt#{uw#{x!_#{!_!`8S!`#O#{#P;'S#{;'S;=`$d<%lO#{U8XVtSOt#{uw#{x#O#{#P#Q8n#Q;'S#{;'S;=`$d<%lO#{U8uU#YQtSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{~9^O#U~U9eU#^QtSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U:OUtS!XQOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U:g]tSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#U;`#U#o,w#o;'S#{;'S;=`$d<%lO#{U;e^tSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#`,w#`#a<a#a#o,w#o;'S#{;'S;=`$d<%lO#{U<f^tSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#g,w#g#h=b#h#o,w#o;'S#{;'S;=`$d<%lO#{U=g^tSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#X,w#X#Y>c#Y#o,w#o;'S#{;'S;=`$d<%lO#{U>j[wQtSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{^?g[#VWtSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{^@d[#XWtSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{^Aa^#WWtSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#f,w#f#gB]#g#o,w#o;'S#{;'S;=`$d<%lO#{UBb^tSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#i,w#i#j=b#j#o,w#o;'S#{;'S;=`$d<%lO#{UCeU!aQtSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{~C|O#a~",
|
tokenData: "C|~R|OX#{XY$jYZ%TZp#{pq$jqs#{st%ntu'tuw#{wx'yxy(Oyz(iz{#{{|)S|}#{}!O+v!O!P#{!P!Q.]!Q![)q![!]6x!]!^%T!^!}#{!}#O7c#O#P9X#P#Q9^#Q#R#{#R#S9w#S#T#{#T#Y,w#Y#Z:b#Z#b,w#b#c?`#c#f,w#f#g@]#g#h,w#h#iAY#i#o,w#o#p#{#p#qC^#q;'S#{;'S;=`$d<%l~#{~O#{~~CwS$QUuSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{S$gP;=`<%l#{^$qUuS!yYOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U%[UuS#]QOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{^%sWuSOp#{pq&]qt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{^&dZjYuSOY&]YZ#{Zt&]tu'Vuw&]wx'Vx#O&]#O#P'V#P;'S&];'S;=`'n<%lO&]Y'[SjYOY'VZ;'S'V;'S;=`'h<%lO'VY'kP;=`<%l'V^'qP;=`<%l&]~'yO#U~~(OO#S~U(VUuS#OQOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U(pUuS#`QOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U)XWuSOt#{uw#{x!Q#{!Q![)q![#O#{#P;'S#{;'S;=`$d<%lO#{U)xYuSoQOt#{uw#{x!O#{!O!P*h!P!Q#{!Q![)q![#O#{#P;'S#{;'S;=`$d<%lO#{U*mWuSOt#{uw#{x!Q#{!Q![+V![#O#{#P;'S#{;'S;=`$d<%lO#{U+^WuSoQOt#{uw#{x!Q#{!Q![+V![#O#{#P;'S#{;'S;=`$d<%lO#{U+{^uSOt#{uw#{x}#{}!O,w!O!Q#{!Q![)q![!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{U,|[uSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{U-yU|QuSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U.bWuSOt#{uw#{x!P#{!P!Q.z!Q#O#{#P;'S#{;'S;=`$d<%lO#{U/P^uSOY/{YZ#{Zt/{tu1Ouw/{wx1Ox!P/{!P!Q#{!Q!}/{!}#O5q#O#P3^#P;'S/{;'S;=`6r<%lO/{U0S^uSyQOY/{YZ#{Zt/{tu1Ouw/{wx1Ox!P/{!P!Q3s!Q!}/{!}#O5q#O#P3^#P;'S/{;'S;=`6r<%lO/{Q1TXyQOY1OZ!P1O!P!Q1p!Q!}1O!}#O2_#O#P3^#P;'S1O;'S;=`3m<%lO1OQ1sP!P!Q1vQ1{UyQ#Z#[1v#]#^1v#a#b1v#g#h1v#i#j1v#m#n1vQ2bVOY2_Z#O2_#O#P2w#P#Q1O#Q;'S2_;'S;=`3W<%lO2_Q2zSOY2_Z;'S2_;'S;=`3W<%lO2_Q3ZP;=`<%l2_Q3aSOY1OZ;'S1O;'S;=`3m<%lO1OQ3pP;=`<%l1OU3xWuSOt#{uw#{x!P#{!P!Q4b!Q#O#{#P;'S#{;'S;=`$d<%lO#{U4ibuSyQOt#{uw#{x#O#{#P#Z#{#Z#[4b#[#]#{#]#^4b#^#a#{#a#b4b#b#g#{#g#h4b#h#i#{#i#j4b#j#m#{#m#n4b#n;'S#{;'S;=`$d<%lO#{U5v[uSOY5qYZ#{Zt5qtu2_uw5qwx2_x#O5q#O#P2w#P#Q/{#Q;'S5q;'S;=`6l<%lO5qU6oP;=`<%l5qU6uP;=`<%l/{U7PUuS!RQOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U7jW#[QuSOt#{uw#{x!_#{!_!`8S!`#O#{#P;'S#{;'S;=`$d<%lO#{U8XVuSOt#{uw#{x#O#{#P#Q8n#Q;'S#{;'S;=`$d<%lO#{U8uU#ZQuSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{~9^O#V~U9eU#_QuSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U:OUuS!YQOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{U:g]uSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#U;`#U#o,w#o;'S#{;'S;=`$d<%lO#{U;e^uSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#`,w#`#a<a#a#o,w#o;'S#{;'S;=`$d<%lO#{U<f^uSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#g,w#g#h=b#h#o,w#o;'S#{;'S;=`$d<%lO#{U=g^uSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#X,w#X#Y>c#Y#o,w#o;'S#{;'S;=`$d<%lO#{U>j[xQuSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{^?g[#WWuSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{^@d[#YWuSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#o,w#o;'S#{;'S;=`$d<%lO#{^Aa^#XWuSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#f,w#f#gB]#g#o,w#o;'S#{;'S;=`$d<%lO#{UBb^uSOt#{uw#{x}#{}!O,w!O!_#{!_!`-r!`#O#{#P#T#{#T#i,w#i#j=b#j#o,w#o;'S#{;'S;=`$d<%lO#{UCeU!bQuSOt#{uw#{x#O#{#P;'S#{;'S;=`$d<%lO#{~C|O#b~",
|
||||||
tokenizers: [operatorTokenizer, 1, 2, 3, tokenizer, new LocalTokenGroup("[~RP!O!PU~ZO!|~~", 11)],
|
tokenizers: [operatorTokenizer, 1, 2, 3, tokenizer, new LocalTokenGroup("[~RP!O!PU~ZO!}~~", 11)],
|
||||||
topRules: {"Program":[0,26]},
|
topRules: {"Program":[0,27]},
|
||||||
specialized: [{term: 20, get: (value: any, stack: any) => (specializeKeyword(value, stack) << 1), external: specializeKeyword},{term: 20, get: (value: keyof typeof spec_Identifier) => spec_Identifier[value] || -1}],
|
specialized: [{term: 20, get: (value: any, stack: any) => (specializeKeyword(value, stack) << 1), external: specializeKeyword},{term: 20, get: (value: keyof typeof spec_Identifier) => spec_Identifier[value] || -1}],
|
||||||
tokenPrec: 1634
|
tokenPrec: 1658
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -127,3 +127,34 @@ describe('string escape sequences', () => {
|
||||||
`)
|
`)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('curly strings', () => {
|
||||||
|
test('work on one line', () => {
|
||||||
|
expect('{ one two three }').toMatchTree(`
|
||||||
|
String
|
||||||
|
CurlyString { one two three }
|
||||||
|
`)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('work on multiple lines', () => {
|
||||||
|
expect(`{
|
||||||
|
one
|
||||||
|
two
|
||||||
|
three }`).toMatchTree(`
|
||||||
|
String
|
||||||
|
CurlyString {
|
||||||
|
one
|
||||||
|
two
|
||||||
|
three }`)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('can contain other curlies', () => {
|
||||||
|
expect(`{ { one }
|
||||||
|
two
|
||||||
|
{ three } }`).toMatchTree(`
|
||||||
|
String
|
||||||
|
CurlyString { { one }
|
||||||
|
two
|
||||||
|
{ three } }`)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import { ExternalTokenizer, InputStream, Stack } from '@lezer/lr'
|
import { ExternalTokenizer, InputStream, Stack } from '@lezer/lr'
|
||||||
import { Identifier, AssignableIdentifier, Word, IdentifierBeforeDot, Do } from './shrimp.terms'
|
import { Identifier, AssignableIdentifier, Word, IdentifierBeforeDot, Do, CurlyString } from './shrimp.terms'
|
||||||
|
|
||||||
// doobie doobie do (we need the `do` keyword to know when we're defining params)
|
// doobie doobie do (we need the `do` keyword to know when we're defining params)
|
||||||
export function specializeKeyword(ident: string) {
|
export function specializeKeyword(ident: string) {
|
||||||
|
|
@ -18,6 +18,10 @@ export const setGlobals = (newGlobals: string[]) => {
|
||||||
export const tokenizer = new ExternalTokenizer(
|
export const tokenizer = new ExternalTokenizer(
|
||||||
(input: InputStream, stack: Stack) => {
|
(input: InputStream, stack: Stack) => {
|
||||||
const ch = getFullCodePoint(input, 0)
|
const ch = getFullCodePoint(input, 0)
|
||||||
|
|
||||||
|
// Handle curly strings
|
||||||
|
if (ch === 123 /* { */) return consumeCurlyString(input, stack)
|
||||||
|
|
||||||
if (!isWordChar(ch)) return
|
if (!isWordChar(ch)) return
|
||||||
|
|
||||||
// Don't consume things that start with digits - let Number token handle it
|
// Don't consume things that start with digits - let Number token handle it
|
||||||
|
|
@ -26,7 +30,7 @@ export const tokenizer = new ExternalTokenizer(
|
||||||
// Don't consume things that start with - or + followed by a digit (negative/positive numbers)
|
// Don't consume things that start with - or + followed by a digit (negative/positive numbers)
|
||||||
if ((ch === 45 /* - */ || ch === 43) /* + */ && isDigit(input.peek(1))) return
|
if ((ch === 45 /* - */ || ch === 43) /* + */ && isDigit(input.peek(1))) return
|
||||||
|
|
||||||
const isValidStart = isLowercaseLetter(ch) || isEmojiOrUnicode(ch)
|
const isValidStart = isIdentStart(ch)
|
||||||
const canBeWord = stack.canShift(Word)
|
const canBeWord = stack.canShift(Word)
|
||||||
|
|
||||||
// Consume all word characters, tracking if it remains a valid identifier
|
// Consume all word characters, tracking if it remains a valid identifier
|
||||||
|
|
@ -119,7 +123,7 @@ const consumeWordToken = (
|
||||||
}
|
}
|
||||||
|
|
||||||
// Track identifier validity: must be lowercase, digit, dash, or emoji/unicode
|
// Track identifier validity: must be lowercase, digit, dash, or emoji/unicode
|
||||||
if (!isLowercaseLetter(ch) && !isDigit(ch) && ch !== 45 /* - */ && ch !== 63 /* ? */ && !isEmojiOrUnicode(ch)) {
|
if (!isIdentChar(ch)) {
|
||||||
if (!canBeWord) break
|
if (!canBeWord) break
|
||||||
isValidIdentifier = false
|
isValidIdentifier = false
|
||||||
}
|
}
|
||||||
|
|
@ -151,6 +155,32 @@ const consumeRestOfWord = (input: InputStream, startPos: number, canBeWord: bool
|
||||||
return pos
|
return pos
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Consumes { curly strings } and tracks braces so you can { have { braces { inside { braces } } }
|
||||||
|
const consumeCurlyString = (input: InputStream, stack: Stack) => {
|
||||||
|
if (!stack.canShift(CurlyString)) return
|
||||||
|
|
||||||
|
let depth = 0
|
||||||
|
let pos = 0
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const ch = input.peek(pos)
|
||||||
|
if (ch < 0) return // EOF - invalid
|
||||||
|
|
||||||
|
if (ch === 123) depth++ // {
|
||||||
|
else if (ch === 125) { // }
|
||||||
|
depth--
|
||||||
|
if (depth === 0) {
|
||||||
|
pos++ // consume final }
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pos++
|
||||||
|
}
|
||||||
|
|
||||||
|
input.acceptToken(CurlyString, pos)
|
||||||
|
}
|
||||||
|
|
||||||
// Check if this identifier is in scope (for property access detection)
|
// Check if this identifier is in scope (for property access detection)
|
||||||
// Returns IdentifierBeforeDot token if in scope, null otherwise
|
// Returns IdentifierBeforeDot token if in scope, null otherwise
|
||||||
const checkForDotGet = (input: InputStream, stack: Stack, pos: number): number | null => {
|
const checkForDotGet = (input: InputStream, stack: Stack, pos: number): number | null => {
|
||||||
|
|
@ -159,7 +189,9 @@ const checkForDotGet = (input: InputStream, stack: Stack, pos: number): number |
|
||||||
|
|
||||||
// If identifier is in scope, this is property access (e.g., obj.prop)
|
// If identifier is in scope, this is property access (e.g., obj.prop)
|
||||||
// If not in scope, it should be consumed as a Word (e.g., file.txt)
|
// If not in scope, it should be consumed as a Word (e.g., file.txt)
|
||||||
return context?.scope.has(identifierText) || globals.includes(identifierText) ? IdentifierBeforeDot : null
|
return context?.scope.has(identifierText) || globals.includes(identifierText)
|
||||||
|
? IdentifierBeforeDot
|
||||||
|
: null
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decide between AssignableIdentifier and Identifier using grammar state + peek-ahead
|
// Decide between AssignableIdentifier and Identifier using grammar state + peek-ahead
|
||||||
|
|
@ -187,7 +219,10 @@ const chooseIdentifierToken = (input: InputStream, stack: Stack): number => {
|
||||||
const nextCh2 = getFullCodePoint(input, peekPos + 1)
|
const nextCh2 = getFullCodePoint(input, peekPos + 1)
|
||||||
|
|
||||||
// Check for compound assignment operators: +=, -=, *=, /=, %=
|
// Check for compound assignment operators: +=, -=, *=, /=, %=
|
||||||
if ([43/* + */, 45/* - */, 42/* * */, 47/* / */, 37/* % */].includes(nextCh) && nextCh2 === 61/* = */) {
|
if (
|
||||||
|
[43 /* + */, 45 /* - */, 42 /* * */, 47 /* / */, 37 /* % */].includes(nextCh) &&
|
||||||
|
nextCh2 === 61 /* = */
|
||||||
|
) {
|
||||||
// Found compound operator, check if it's followed by whitespace
|
// Found compound operator, check if it's followed by whitespace
|
||||||
const charAfterOp = getFullCodePoint(input, peekPos + 2)
|
const charAfterOp = getFullCodePoint(input, peekPos + 2)
|
||||||
if (isWhiteSpace(charAfterOp) || charAfterOp === -1 /* EOF */) {
|
if (isWhiteSpace(charAfterOp) || charAfterOp === -1 /* EOF */) {
|
||||||
|
|
@ -208,6 +243,14 @@ const chooseIdentifierToken = (input: InputStream, stack: Stack): number => {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Character classification helpers
|
// Character classification helpers
|
||||||
|
export const isIdentStart = (ch: number): boolean => {
|
||||||
|
return isLowercaseLetter(ch) || isEmojiOrUnicode(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
export const isIdentChar = (ch: number): boolean => {
|
||||||
|
return isLowercaseLetter(ch) || isDigit(ch) || ch === 45 /* - */ || ch === 63 /* ? */ || isEmojiOrUnicode(ch)
|
||||||
|
}
|
||||||
|
|
||||||
const isWhiteSpace = (ch: number): boolean => {
|
const isWhiteSpace = (ch: number): boolean => {
|
||||||
return ch === 32 /* space */ || ch === 9 /* tab */ || ch === 13 /* \r */
|
return ch === 32 /* space */ || ch === 9 /* tab */ || ch === 13 /* \r */
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
import { expect } from 'bun:test'
|
import { expect } from 'bun:test'
|
||||||
import { parser } from '#parser/shrimp'
|
import { parser } from '#parser/shrimp'
|
||||||
import { setGlobals } from '#parser/tokenizer'
|
import { setGlobals } from '#parser/tokenizer'
|
||||||
|
import { globals as prelude } from '#prelude'
|
||||||
import { $ } from 'bun'
|
import { $ } from 'bun'
|
||||||
import { assert, errorMessage } from '#utils/utils'
|
import { assert, errorMessage } from '#utils/utils'
|
||||||
import { Compiler } from '#compiler/compiler'
|
import { Compiler } from '#compiler/compiler'
|
||||||
|
|
@ -43,7 +44,8 @@ expect.extend({
|
||||||
toMatchTree(received: unknown, expected: string, globals?: Record<string, any>) {
|
toMatchTree(received: unknown, expected: string, globals?: Record<string, any>) {
|
||||||
assert(typeof received === 'string', 'toMatchTree can only be used with string values')
|
assert(typeof received === 'string', 'toMatchTree can only be used with string values')
|
||||||
|
|
||||||
if (globals) setGlobals(Object.keys(globals))
|
const allGlobals = { ...prelude, ...(globals || {}) }
|
||||||
|
setGlobals(Object.keys(allGlobals))
|
||||||
const tree = parser.parse(received)
|
const tree = parser.parse(received)
|
||||||
const actual = treeToString(tree, received)
|
const actual = treeToString(tree, received)
|
||||||
const normalizedExpected = trimWhitespace(expected)
|
const normalizedExpected = trimWhitespace(expected)
|
||||||
|
|
@ -99,9 +101,10 @@ expect.extend({
|
||||||
assert(typeof received === 'string', 'toEvaluateTo can only be used with string values')
|
assert(typeof received === 'string', 'toEvaluateTo can only be used with string values')
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (globals) setGlobals(Object.keys(globals))
|
const allGlobals = { ...prelude, ...(globals || {}) }
|
||||||
|
setGlobals(Object.keys(allGlobals))
|
||||||
const compiler = new Compiler(received)
|
const compiler = new Compiler(received)
|
||||||
const result = await run(compiler.bytecode, globals)
|
const result = await run(compiler.bytecode, allGlobals)
|
||||||
let value = VMResultToValue(result)
|
let value = VMResultToValue(result)
|
||||||
|
|
||||||
// Just treat regex as strings for comparison purposes
|
// Just treat regex as strings for comparison purposes
|
||||||
|
|
|
||||||
|
|
@ -50,4 +50,4 @@ describe('Shrimp', () => {
|
||||||
await shrimp.run('abc = nothing')
|
await shrimp.run('abc = nothing')
|
||||||
expect(shrimp.get('abc')).toEqual('nothing')
|
expect(shrimp.get('abc')).toEqual('nothing')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
import { test, expect, describe } from 'bun:test'
|
import { test, expect, describe } from 'bun:test'
|
||||||
import { ScopeTracker } from './scopeTracker'
|
import { EditorScopeAnalyzer } from './editorScopeAnalyzer'
|
||||||
import { TextDocument } from 'vscode-languageserver-textdocument'
|
import { TextDocument } from 'vscode-languageserver-textdocument'
|
||||||
import { parser } from '../../../src/parser/shrimp'
|
import { parser } from '../../../src/parser/shrimp'
|
||||||
import * as Terms from '../../../src/parser/shrimp.terms'
|
import * as Terms from '../../../src/parser/shrimp.terms'
|
||||||
|
|
||||||
describe('ScopeTracker', () => {
|
describe('EditorScopeAnalyzer', () => {
|
||||||
test('top-level assignment is in scope', () => {
|
test('top-level assignment is in scope', () => {
|
||||||
const code = 'x = 5\necho x'
|
const code = 'x = 5\necho x'
|
||||||
const { tree, tracker } = parseAndGetScope(code)
|
const { tree, tracker } = parseAndGetScope(code)
|
||||||
|
|
@ -135,11 +135,17 @@ end`
|
||||||
const xInEcho = identifiers[identifiers.length - 1]
|
const xInEcho = identifiers[identifiers.length - 1]
|
||||||
expect(tracker.isInScope('x', xInEcho)).toBe(true)
|
expect(tracker.isInScope('x', xInEcho)).toBe(true)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test('the prelude functions are always in scope', () => {
|
||||||
|
const code = `echo "Hello, World!"`
|
||||||
|
const { tree, tracker } = parseAndGetScope(code)
|
||||||
|
expect(tracker.isInScope('echo', tree.topNode)).toBe(true)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
const parseAndGetScope = (code: string) => {
|
const parseAndGetScope = (code: string) => {
|
||||||
const document = TextDocument.create('test://test.sh', 'shrimp', 1, code)
|
const document = TextDocument.create('test://test.sh', 'shrimp', 1, code)
|
||||||
const tree = parser.parse(code)
|
const tree = parser.parse(code)
|
||||||
const tracker = new ScopeTracker(document)
|
const tracker = new EditorScopeAnalyzer(document)
|
||||||
return { document, tree, tracker }
|
return { document, tree, tracker }
|
||||||
}
|
}
|
||||||
|
|
@ -1,17 +1,20 @@
|
||||||
import { SyntaxNode } from '@lezer/common'
|
import { SyntaxNode } from '@lezer/common'
|
||||||
import { TextDocument } from 'vscode-languageserver-textdocument'
|
import { TextDocument } from 'vscode-languageserver-textdocument'
|
||||||
import * as Terms from '../../../src/parser/shrimp.terms'
|
import * as Terms from '../../../src/parser/shrimp.terms'
|
||||||
|
import { globals } from '../../../src/prelude'
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tracks variables in scope at a given position in the parse tree.
|
* Tracks variables in scope at a given position in the parse tree.
|
||||||
* Used to distinguish identifiers (in scope) from words (not in scope).
|
* Used to distinguish identifiers (in scope) from words (not in scope).
|
||||||
*/
|
*/
|
||||||
export class ScopeTracker {
|
export class EditorScopeAnalyzer {
|
||||||
private document: TextDocument
|
private document: TextDocument
|
||||||
private scopeCache = new Map<number, Set<string>>()
|
private scopeCache = new Map<number, Set<string>>()
|
||||||
|
|
||||||
constructor(document: TextDocument) {
|
constructor(document: TextDocument) {
|
||||||
this.document = document
|
this.document = document
|
||||||
|
const preludeKeys = Object.keys(globals)
|
||||||
|
this.scopeCache.set(0, new Set(preludeKeys))
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -7,7 +7,7 @@ import {
|
||||||
SemanticTokenTypes,
|
SemanticTokenTypes,
|
||||||
SemanticTokenModifiers,
|
SemanticTokenModifiers,
|
||||||
} from 'vscode-languageserver/node'
|
} from 'vscode-languageserver/node'
|
||||||
import { ScopeTracker } from './scopeTracker'
|
import { EditorScopeAnalyzer } from './editorScopeAnalyzer'
|
||||||
|
|
||||||
export const TOKEN_TYPES = [
|
export const TOKEN_TYPES = [
|
||||||
SemanticTokenTypes.function,
|
SemanticTokenTypes.function,
|
||||||
|
|
@ -32,7 +32,7 @@ export function buildSemanticTokens(document: TextDocument): number[] {
|
||||||
const text = document.getText()
|
const text = document.getText()
|
||||||
const tree = parser.parse(text)
|
const tree = parser.parse(text)
|
||||||
const builder = new SemanticTokensBuilder()
|
const builder = new SemanticTokensBuilder()
|
||||||
const scopeTracker = new ScopeTracker(document)
|
const scopeTracker = new EditorScopeAnalyzer(document)
|
||||||
|
|
||||||
walkTree(tree.topNode, document, builder, scopeTracker)
|
walkTree(tree.topNode, document, builder, scopeTracker)
|
||||||
|
|
||||||
|
|
@ -77,7 +77,7 @@ function walkTree(
|
||||||
node: SyntaxNode,
|
node: SyntaxNode,
|
||||||
document: TextDocument,
|
document: TextDocument,
|
||||||
builder: SemanticTokensBuilder,
|
builder: SemanticTokensBuilder,
|
||||||
scopeTracker: ScopeTracker
|
scopeTracker: EditorScopeAnalyzer
|
||||||
) {
|
) {
|
||||||
// Special handling for NamedArgPrefix to split "name=" into two tokens
|
// Special handling for NamedArgPrefix to split "name=" into two tokens
|
||||||
if (node.type.id === Terms.NamedArgPrefix) {
|
if (node.type.id === Terms.NamedArgPrefix) {
|
||||||
|
|
@ -104,7 +104,7 @@ type TokenInfo = { type: number; modifiers: number } | undefined
|
||||||
function getTokenType(
|
function getTokenType(
|
||||||
node: SyntaxNode,
|
node: SyntaxNode,
|
||||||
document: TextDocument,
|
document: TextDocument,
|
||||||
scopeTracker: ScopeTracker
|
scopeTracker: EditorScopeAnalyzer
|
||||||
): TokenInfo {
|
): TokenInfo {
|
||||||
const nodeTypeId = node.type.id
|
const nodeTypeId = node.type.id
|
||||||
const parentTypeId = node.parent?.type.id
|
const parentTypeId = node.parent?.type.id
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user