Compare commits

..

1 Commits

Author SHA1 Message Date
Chris Wanstrath
23642d3da1 import -> use 2025-12-03 15:48:49 -08:00
52 changed files with 1506 additions and 1011 deletions

1
.gitignore vendored
View File

@ -34,7 +34,6 @@ report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
.DS_Store
/tmp
vscode-extension/tmp
/docs
*.vsix

View File

@ -1,192 +0,0 @@
#!/usr/bin/env bun
// WARNING: [[ No human has been anywhere near this file. It's pure Claude slop.
// Enter at your own risk. ]]
import { readFileSync } from 'fs'
type CallInfo = {
method: string
line: number
calls: Set<string>
isRecursive?: boolean
}
// Parse the parser file and extract method calls
function analyzeParser(filePath: string): Map<string, CallInfo> {
const content = readFileSync(filePath, 'utf-8')
const lines = content.split('\n')
const methods = new Map<string, CallInfo>()
// Find all method definitions
const methodRegex = /^\s*(\w+)\s*\([^)]*\):\s*/
let currentMethod: string | null = null
let braceDepth = 0
let classDepth = 0
for (let i = 0; i < lines.length; i++) {
const line = lines[i] || ''
// Track if we're inside the Parser class
if (line.includes('class Parser')) {
classDepth = braceDepth + 1 // Will be the depth after we process this line's brace
}
// Check for method definition (only inside class, at class level)
// Check BEFORE incrementing braceDepth
if (classDepth > 0 && braceDepth === classDepth) {
const methodMatch = line.match(methodRegex)
if (methodMatch && !line.includes('class ')) {
currentMethod = methodMatch[1]!
methods.set(currentMethod, {
method: currentMethod,
line: i + 1,
calls: new Set()
})
}
}
// Track brace depth
braceDepth += (line.match(/{/g) || []).length
braceDepth -= (line.match(/}/g) || []).length
// Find method calls within current method
if (currentMethod && braceDepth > 0) {
// Match this.methodName() calls
const callRegex = /this\.(\w+)\s*\(/g
let match
while ((match = callRegex.exec(line)) !== null) {
const calledMethod = match[1]!
const info = methods.get(currentMethod)!
info.calls.add(calledMethod)
// Mark recursive calls
if (calledMethod === currentMethod) {
info.isRecursive = true
}
}
}
// Reset when method ends
if (braceDepth === 0) {
currentMethod = null
}
}
return methods
}
// Build tree structure starting from a root method
function buildTree(
method: string,
callGraph: Map<string, CallInfo>,
visited: Set<string>,
indent = '',
isLast = true,
depth = 0,
maxDepth = 3
): string[] {
const lines: string[] = []
const info = callGraph.get(method)
if (!info) return lines
// Add current method
const prefix = depth === 0 ? '' : (isLast ? '└─> ' : '├─> ')
const suffix = info.isRecursive ? ' (recursive)' : ''
const lineNum = `[line ${info.line}]`
lines.push(`${indent}${prefix}${method}() ${lineNum}${suffix}`)
// Stop if we've reached max depth
if (depth >= maxDepth) {
return lines
}
// Prevent infinite recursion in tree display
if (visited.has(method)) {
return lines
}
const newVisited = new Set(visited)
newVisited.add(method)
// Helper methods to filter out (low-level utilities)
const helperPatterns = /^(is|next|peek|expect|current|op)/i
// Get sorted unique calls (filter out recursive self-calls for display)
const calls = Array.from(info.calls)
.filter(c => callGraph.has(c)) // Only show parser methods
.filter(c => c !== method) // Don't show immediate self-recursion
.filter(c => !helperPatterns.test(c)) // Filter out helpers
.sort()
// Add children
const newIndent = indent + (isLast ? ' ' : '│ ')
calls.forEach((call, idx) => {
const childLines = buildTree(
call,
callGraph,
newVisited,
newIndent,
idx === calls.length - 1,
depth + 1,
maxDepth
)
lines.push(...childLines)
})
return lines
}
// Main
const parserPath = './src/parser/parser2.ts'
const maxDepth = parseInt(process.argv[2] || '5')
console.log('Parser Call Tree for', parserPath)
console.log(`Max depth: ${maxDepth}`)
console.log('═'.repeat(60))
console.log()
const callGraph = analyzeParser(parserPath)
// Start from parse() method
const tree = buildTree('parse', callGraph, new Set(), '', true, 0, maxDepth)
console.log(tree.join('\n'))
// Show some stats
console.log('\n' + '═'.repeat(60))
console.log('Stats:')
console.log(` Total methods: ${callGraph.size}`)
console.log(` Entry point: parse()`)
// Find methods that are never called (potential dead code or entry points)
const allCalled = new Set<string>()
for (const info of callGraph.values()) {
info.calls.forEach(c => allCalled.add(c))
}
const uncalled = Array.from(callGraph.keys())
.filter(m => !allCalled.has(m) && m !== 'parse')
.sort()
if (uncalled.length > 0) {
console.log(`\n Uncalled methods: ${uncalled.join(', ')}`)
}
// Find most-called methods
const callCount = new Map<string, number>()
for (const info of callGraph.values()) {
for (const called of info.calls) {
callCount.set(called, (callCount.get(called) || 0) + 1)
}
}
const topCalled = Array.from(callCount.entries())
.sort((a, b) => b[1] - a[1])
.slice(0, 5)
console.log(`\n Most-called methods:`)
for (const [method, count] of topCalled) {
console.log(` ${method}() - called ${count} times`)
}

View File

@ -25,9 +25,9 @@ ${colors.bright}Commands:${colors.reset}
${colors.cyan}version${colors.reset} Print version
${colors.bright}Options:${colors.reset}
${colors.cyan}eval -I${colors.reset} ${colors.yellow}<module>${colors.reset} Import module (can be repeated)
Example: shrimp -I math -e 'random | echo'
Example: shrimp -Imath -Istr -e 'random | echo'`)
${colors.cyan}eval -U${colors.reset} ${colors.yellow}<module>${colors.reset} Use module (can be repeated)
Example: shrimp -U math -e 'random | echo'
Example: shrimp -Umath -Ustr -e 'random | echo'`)
}
function showVersion() {
@ -51,22 +51,22 @@ async function main() {
return
}
// Parse -I flags for imports (supports both "-I math" and "-Imath")
// Parse -U flags for use (supports both "-U math" and "-Umath")
const imports: string[] = []
while (args.length > 0) {
const arg = args[0]
if (arg === '-I') {
// "-I math" format
if (arg === '-U') {
// "-U math" format
if (args.length < 2) {
console.log(`${colors.bright}error: -I requires a module name${colors.reset}`)
process.exit(1)
}
imports.push(args[1])
args = args.slice(2)
} else if (arg.startsWith('-I')) {
// "-Imath" format
} else if (arg.startsWith('-U')) {
// "-Umath" format
const moduleName = arg.slice(2)
if (!moduleName) {
console.log(`${colors.bright}error: -I requires a module name${colors.reset}`)

View File

@ -1,18 +0,0 @@
#!/usr/bin/env shrimp
# usage: dice <sides>
import math only=random
import list only=first
import str only=[replace starts-with?]
sides = $.args | first
sides ??= 20
if sides | starts-with? d:
sides = replace sides //\D// ''
end
sides = number sides
echo 'Rolling d$sides...'
random 1 sides | echo

View File

@ -1,31 +0,0 @@
#!/usr/bin/env shrimp
year = date.now | date.year
project = fs.pwd | fs.basename | str.titlecase
{
Copyright $year $project Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
}
| str.trim
| echo

View File

@ -1,39 +0,0 @@
#!/usr/bin/env shrimp
# usage: password <length> [!spaced] [!symbols]
if ($.args | list.contains? -h):
echo 'usage: password <length> [!spaced] [!symbols]'
exit
end
password = do n=22 symbols=true spaced=true:
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
if symbols: chars += '!@#%^&*-=()[]<>' end
out = []
i = 0
max = length chars
while i < n:
idx = math.floor ((math.random) * max)
ch = chars | at idx
list.push out ch
i += 1
end
if spaced:
pos1 = math.floor((n - 2) / 3)
pos2 = math.floor((n - 2) * 2 / 3)
list.insert out pos2 ' '
list.insert out pos1 ' '
end
str.join out ''
end
missing-arg? = do x: $.args | list.contains? x | not end
num = $.args | list.reject (do x: x | str.starts-with? ! end) | list.first
password num symbols=(missing-arg? !symbols) spaced=(missing-arg? !spaced) | echo

View File

@ -1,9 +0,0 @@
#!/usr/bin/env shrimp
if not fs.exists? 'package.json':
echo '🦐 package.json not found'
exit 1
end
package = fs.read 'package.json' | json.decode
package.scripts | dict.keys | list.sort | each do x: echo x end

View File

@ -5,8 +5,9 @@
"private": true,
"type": "module",
"scripts": {
"dev": "bun --hot src/server/server.tsx",
"repl": "bun bin/repl",
"dev": "bun generate-parser && bun --hot src/server/server.tsx",
"generate-parser": "lezer-generator src/parser/shrimp.grammar --typeScript -o src/parser/shrimp.ts",
"repl": "bun generate-parser && bun bin/repl",
"update-reef": "rm -rf ~/.bun/install/cache/ && rm bun.lock && bun update reefvm",
"cli:install": "ln -s \"$(pwd)/bin/shrimp\" ~/.bun/bin/shrimp",
"cli:remove": "rm ~/.bun/bin/shrimp",

View File

@ -1,6 +1,9 @@
import { CompilerError } from '#compiler/compilerError.ts'
import { parse, setGlobals } from '#parser/parser2'
import { parse } from '#parser/parser2'
import { SyntaxNode, Tree } from '#parser/node'
import { parser } from '#parser/shrimp.ts'
import * as terms from '#parser/shrimp.terms'
import { setGlobals } from '#parser/tokenizer'
import { tokenizeCurlyString } from '#parser/curlyTokenizer'
import { assert, errorMessage } from '#utils/utils'
import { toBytecode, type Bytecode, type ProgramItem, bytecodeToString } from 'reefvm'
@ -88,7 +91,7 @@ export class Compiler {
}
#compileCst(cst: Tree, input: string) {
const isProgram = cst.topNode.type.is('Program')
const isProgram = cst.topNode.type.id === terms.Program
assert(isProgram, `Expected Program node, got ${cst.topNode.type.name}`)
let child = cst.topNode.firstChild
@ -104,8 +107,8 @@ export class Compiler {
const value = input.slice(node.from, node.to)
if (DEBUG) console.log(`🫦 ${node.name}: ${value}`)
switch (node.type.name) {
case 'Number':
switch (node.type.id) {
case terms.Number:
// Handle sign prefix for hex, binary, and octal literals
// Number() doesn't parse '-0xFF', '+0xFF', '-0o77', etc. correctly
let numberValue: number
@ -122,8 +125,8 @@ export class Compiler {
return [[`PUSH`, numberValue]]
case 'String': {
if (node.firstChild?.type.is('CurlyString'))
case terms.String: {
if (node.firstChild?.type.id === terms.CurlyString)
return this.#compileCurlyString(value, input)
const { parts, hasInterpolation } = getStringParts(node, input)
@ -140,19 +143,19 @@ export class Compiler {
parts.forEach((part) => {
const partValue = input.slice(part.from, part.to)
switch (part.type.name) {
case 'StringFragment':
switch (part.type.id) {
case terms.StringFragment:
// Plain text fragment - just push as-is
instructions.push(['PUSH', partValue])
break
case 'EscapeSeq':
case terms.EscapeSeq:
// Process escape sequence and push the result
const processed = processEscapeSeq(partValue)
instructions.push(['PUSH', processed])
break
case 'Interpolation':
case terms.Interpolation:
// Interpolation contains either Identifier or ParenExpr (the $ is anonymous)
const child = part.firstChild
if (!child) {
@ -176,15 +179,15 @@ export class Compiler {
return instructions
}
case 'Boolean': {
case terms.Boolean: {
return [[`PUSH`, value === 'true']]
}
case 'Null': {
case terms.Null: {
return [[`PUSH`, null]]
}
case 'Regex': {
case terms.Regex: {
// remove the surrounding slashes and any flags
const [_, pattern, flags] = value.match(/^\/\/(.*)\/\/([gimsuy]*)$/) || []
if (!pattern) {
@ -201,15 +204,15 @@ export class Compiler {
return [['PUSH', regex]]
}
case 'Identifier': {
case terms.Identifier: {
return [[`TRY_LOAD`, value]]
}
case 'Word': {
case terms.Word: {
return [['PUSH', value]]
}
case 'DotGet': {
case terms.DotGet: {
// DotGet is parsed into a nested tree because it's hard to parse it into a flat one.
// However, we want a flat tree - so we're going to pretend like we are getting one from the parser.
//
@ -221,7 +224,7 @@ export class Compiler {
instructions.push(['TRY_LOAD', objectName])
const flattenProperty = (prop: SyntaxNode): void => {
if (prop.type.is('DotGet')) {
if (prop.type.id === terms.DotGet) {
const nestedParts = getDotGetParts(prop, input)
const nestedObjectValue = input.slice(nestedParts.object.from, nestedParts.object.to)
@ -230,7 +233,7 @@ export class Compiler {
flattenProperty(nestedParts.property)
} else {
if (prop.type.is('ParenExpr')) {
if (prop.type.id === terms.ParenExpr) {
instructions.push(...this.#compileNode(prop, input))
} else {
const propertyValue = input.slice(prop.from, prop.to)
@ -244,7 +247,7 @@ export class Compiler {
return instructions
}
case 'BinOp': {
case terms.BinOp: {
const { left, op, right } = getBinaryParts(node)
const instructions: ProgramItem[] = []
instructions.push(...this.#compileNode(left, input))
@ -292,7 +295,7 @@ export class Compiler {
return instructions
}
case 'Assign': {
case terms.Assign: {
const assignParts = getAssignmentParts(node)
const instructions: ProgramItem[] = []
@ -323,7 +326,7 @@ export class Compiler {
return instructions
}
case 'CompoundAssign': {
case terms.CompoundAssign: {
const { identifier, operator, right } = getCompoundAssignmentParts(node)
const identifierName = input.slice(identifier.from, identifier.to)
const instructions: ProgramItem[] = []
@ -385,14 +388,14 @@ export class Compiler {
return instructions
}
case 'ParenExpr': {
case terms.ParenExpr: {
const child = node.firstChild
if (!child) return [] // I guess it is empty parentheses?
return this.#compileNode(child, input)
}
case 'FunctionDef': {
case terms.FunctionDef: {
const { paramNames, bodyNodes, catchVariable, catchBody, finallyBody } =
getFunctionDefParts(node, input)
const instructions: ProgramItem[] = []
@ -438,8 +441,8 @@ export class Compiler {
return instructions
}
case 'FunctionCallOrIdentifier': {
if (node.firstChild?.type.is('DotGet')) {
case terms.FunctionCallOrIdentifier: {
if (node.firstChild?.type.id === terms.DotGet) {
const instructions: ProgramItem[] = []
const callLabel: Label = `.call_dotget_${++this.labelCount}`
const afterLabel: Label = `.after_dotget_${++this.labelCount}`
@ -481,8 +484,8 @@ export class Compiler {
PUSH 1 ; Named count
CALL
*/
case 'FunctionCall': {
case terms.FunctionCallWithNewlines:
case terms.FunctionCall: {
const { identifierNode, namedArgs, positionalArgs } = getFunctionCallParts(node, input)
const instructions: ProgramItem[] = []
instructions.push(...this.#compileNode(identifierNode, input))
@ -504,7 +507,7 @@ export class Compiler {
return instructions
}
case 'Block': {
case terms.Block: {
const children = getAllChildren(node)
const instructions: ProgramItem[] = []
@ -519,7 +522,7 @@ export class Compiler {
return instructions
}
case 'FunctionCallWithBlock': {
case terms.FunctionCallWithBlock: {
const [fn, _colon, ...block] = getAllChildren(node)
let instructions: ProgramItem[] = []
@ -537,13 +540,13 @@ export class Compiler {
instructions.push(['RETURN'])
instructions.push([`${afterLabel}:`])
if (fn?.type.is('FunctionCallOrIdentifier')) {
if (fn?.type.id === terms.FunctionCallOrIdentifier) {
instructions.push(['LOAD', input.slice(fn!.from, fn!.to)])
instructions.push(['MAKE_FUNCTION', [], fnLabel])
instructions.push(['PUSH', 1])
instructions.push(['PUSH', 0])
instructions.push(['CALL'])
} else if (fn?.type.is('FunctionCall')) {
} else if (fn?.type.id === terms.FunctionCall) {
let body = this.#compileNode(fn!, input)
const namedArgCount = (body[body.length - 2]![1] as number) * 2
const startSlice = body.length - namedArgCount - 3
@ -566,7 +569,7 @@ export class Compiler {
return instructions
}
case 'TryExpr': {
case terms.TryExpr: {
const { tryBlock, catchVariable, catchBody, finallyBody } = getTryExprParts(node, input)
return this.#compileTryCatchFinally(
@ -578,14 +581,12 @@ export class Compiler {
)
}
case 'Throw':
case 'Not': {
const keyword = node.type.is('Throw') ? 'Throw' : 'Not'
case terms.Throw: {
const children = getAllChildren(node)
const [_throwKeyword, expression] = children
if (!expression) {
throw new CompilerError(
`${keyword} expected expression, got ${children.length} children`,
`Throw expected expression, got ${children.length} children`,
node.from,
node.to
)
@ -593,12 +594,12 @@ export class Compiler {
const instructions: ProgramItem[] = []
instructions.push(...this.#compileNode(expression, input))
instructions.push([keyword.toUpperCase()]) // THROW or NOT
instructions.push(['THROW'])
return instructions
}
case 'IfExpr': {
case terms.IfExpr: {
const { conditionNode, thenBlock, elseIfBlocks, elseThenBlock } = getIfExprParts(
node,
input
@ -641,7 +642,7 @@ export class Compiler {
}
// - `EQ`, `NEQ`, `LT`, `GT`, `LTE`, `GTE` - Pop 2, push boolean
case 'ConditionalOp': {
case terms.ConditionalOp: {
const instructions: ProgramItem[] = []
const { left, op, right } = getBinaryParts(node)
const leftInstructions: ProgramItem[] = this.#compileNode(left, input)
@ -716,7 +717,7 @@ export class Compiler {
return instructions
}
case 'PipeExpr': {
case terms.PipeExpr: {
const { pipedFunctionCall, pipeReceivers } = getPipeExprParts(node)
if (!pipedFunctionCall || pipeReceivers.length === 0) {
throw new CompilerError('PipeExpr must have at least two operands', node.from, node.to)
@ -738,11 +739,11 @@ export class Compiler {
instructions.push(...this.#compileNode(identifierNode, input))
const isUnderscoreInPositionalArgs = positionalArgs.some(
(arg) => arg.type.is('Underscore')
(arg) => arg.type.id === terms.Underscore
)
const isUnderscoreInNamedArgs = namedArgs.some((arg) => {
const { valueNode } = getNamedArgParts(arg, input)
return valueNode.type.is('Underscore')
return valueNode.type.id === terms.Underscore
})
const shouldPushPositionalArg = !isUnderscoreInPositionalArgs && !isUnderscoreInNamedArgs
@ -753,7 +754,7 @@ export class Compiler {
}
positionalArgs.forEach((arg) => {
if (arg.type.is('Underscore')) {
if (arg.type.id === terms.Underscore) {
instructions.push(['LOAD', pipeValName])
} else {
instructions.push(...this.#compileNode(arg, input))
@ -763,7 +764,7 @@ export class Compiler {
namedArgs.forEach((arg) => {
const { name, valueNode } = getNamedArgParts(arg, input)
instructions.push(['PUSH', name])
if (valueNode.type.is('Underscore')) {
if (valueNode.type.id === terms.Underscore) {
instructions.push(['LOAD', pipeValName])
} else {
instructions.push(...this.#compileNode(valueNode, input))
@ -778,14 +779,14 @@ export class Compiler {
return instructions
}
case 'Array': {
case terms.Array: {
const children = getAllChildren(node)
// We can easily parse [=] as an empty dict, but `[ = ]` is tougher.
// = can be a valid word, and is also valid inside words, so for now we cheat
// and check for arrays that look like `[ = ]` to interpret them as
// empty dicts
if (children.length === 1 && children[0]!.type.is('Word')) {
if (children.length === 1 && children[0]!.type.id === terms.Word) {
const child = children[0]!
if (input.slice(child.from, child.to) === '=') {
return [['MAKE_DICT', 0]]
@ -797,7 +798,7 @@ export class Compiler {
return instructions
}
case 'Dict': {
case terms.Dict: {
const children = getAllChildren(node)
const instructions: ProgramItem[] = []
@ -806,7 +807,7 @@ export class Compiler {
const valueNode = node.firstChild!.nextSibling
// name= -> name
const key = input.slice(keyNode!.from, keyNode!.to).replace(/\s*=$/, '')
const key = input.slice(keyNode!.from, keyNode!.to).slice(0, -1)
instructions.push(['PUSH', key])
instructions.push(...this.#compileNode(valueNode!, input))
@ -816,7 +817,7 @@ export class Compiler {
return instructions
}
case 'WhileExpr': {
case terms.WhileExpr: {
const [_while, test, _colon, block] = getAllChildren(node)
const instructions: ProgramItem[] = []
@ -834,11 +835,11 @@ export class Compiler {
return instructions
}
case 'Import': {
case terms.Import: {
const instructions: ProgramItem[] = []
const [_import, ...nodes] = getAllChildren(node)
const args = nodes.filter(node => node.type.is('Identifier'))
const namedArgs = nodes.filter(node => node.type.is('NamedArg'))
const args = nodes.filter(node => node.type.id === terms.Identifier)
const namedArgs = nodes.filter(node => node.type.id === terms.NamedArg)
instructions.push(['LOAD', 'import'])
@ -859,13 +860,13 @@ export class Compiler {
return instructions
}
case 'Comment': {
case terms.Comment: {
return [] // ignore comments
}
default:
throw new CompilerError(
`Compiler doesn't know how to handle a "${node.type.name}" node.`,
`Compiler doesn't know how to handle a "${node.type.name}" (${node.type.id}) node.`,
node.from,
node.to
)

View File

@ -496,29 +496,29 @@ describe('Compound assignment operators', () => {
})
})
describe('import', () => {
test('imports single dict', () => {
expect(`import str; starts-with? abc a`).toEvaluateTo(true)
describe('use', () => {
test('uses single dict', () => {
expect(`use str; starts-with? abc a`).toEvaluateTo(true)
})
test('imports multiple dicts', () => {
expect(`import str math list; map [1 2 3] do x: x * 2 end`).toEvaluateTo([2, 4, 6])
test('uses multiple dicts', () => {
expect(`use str math list; map [1 2 3] do x: x * 2 end`).toEvaluateTo([2, 4, 6])
})
test('imports non-prelude dicts', () => {
test('uses non-prelude dicts', () => {
expect(`
abc = [a=true b=yes c=si]
import abc
use abc
abc.b
`).toEvaluateTo('yes')
})
test('can specify imports', () => {
expect(`import str only=ends-with?; ref ends-with? | function?`).toEvaluateTo(true)
expect(`import str only=ends-with?; ref starts-with? | function?`).toEvaluateTo(false)
test('can specify uses', () => {
expect(`use str only=ends-with?; ref ends-with? | function?`).toEvaluateTo(true)
expect(`use str only=ends-with?; ref starts-with? | function?`).toEvaluateTo(false)
expect(`
abc = [a=true b=yes c=si]
import abc only=[a c]
use abc only=[a c]
[a c]
`).toEvaluateTo([true, 'si'])
})

View File

@ -151,22 +151,18 @@ describe('array literals', () => {
describe('dict literals', () => {
test('work with numbers', () => {
expect('[a=1 b=2 c=3]').toEvaluateTo({ a: 1, b: 2, c: 3 })
expect('[a = 1 b = 2 c = 3]').toEvaluateTo({ a: 1, b: 2, c: 3 })
})
test('work with strings', () => {
expect("[a='one' b='two' c='three']").toEvaluateTo({ a: 'one', b: 'two', c: 'three' })
expect("[a = 'one' b = 'two' c = 'three']").toEvaluateTo({ a: 'one', b: 'two', c: 'three' })
})
test('work with identifiers', () => {
expect('[a=one b=two c=three]').toEvaluateTo({ a: 'one', b: 'two', c: 'three' })
expect('[a = one b = two c = three]').toEvaluateTo({ a: 'one', b: 'two', c: 'three' })
})
test('can be nested', () => {
expect('[a=one b=[two [c=three]]]').toEvaluateTo({ a: 'one', b: ['two', { c: 'three' }] })
expect('[a = one b = [two [c = three]]]').toEvaluateTo({ a: 'one', b: ['two', { c: 'three' }] })
})
test('can span multiple lines', () => {
@ -175,12 +171,6 @@ describe('dict literals', () => {
b=2
c=3
]`).toEvaluateTo({ a: 1, b: 2, c: 3 })
expect(`[
a = 1
b = 2
c = 3
]`).toEvaluateTo({ a: 1, b: 2, c: 3 })
})
test('empty dict', () => {
@ -200,12 +190,10 @@ describe('dict literals', () => {
test('semicolons as separators', () => {
expect('[a=1; b=2; c=3]').toEvaluateTo({ a: 1, b: 2, c: 3 })
expect('[a = 1; b = 2; c = 3]').toEvaluateTo({ a: 1, b: 2, c: 3 })
})
test('expressions in dicts', () => {
expect('[a=(1 + 2) b=(3 * 4)]').toEvaluateTo({ a: 3, b: 12 })
expect('[a = (1 + 2) b = (3 * 4)]').toEvaluateTo({ a: 3, b: 12 })
})
test('empty lines within dicts', () => {
@ -258,7 +246,7 @@ describe('curly strings', () => {
test('interpolation edge cases', () => {
expect(`{[a=1 b=2 c={wild}]}`).toEvaluateTo(`[a=1 b=2 c={wild}]`)
expect(`a = 1;b = 2;c = 3;{$a $b $c}`).toEvaluateTo(`1 2 3`)
expect(`a = 1;b = 2;c = 3;{$(a)$(b)$(c)}`).toEvaluateTo(`123`)
expect(`a = 1;b = 2;c = 3;{$a$b$c}`).toEvaluateTo(`123`)
})
})

View File

@ -1,4 +1,5 @@
import { CompilerError } from '#compiler/compilerError.ts'
import * as terms from '#parser/shrimp.terms'
import type { SyntaxNode, Tree } from '#parser/node'
export const checkTreeForErrors = (tree: Tree): CompilerError[] => {
@ -23,7 +24,7 @@ export const getAllChildren = (node: SyntaxNode): SyntaxNode[] => {
child = child.nextSibling
}
return children.filter((n) => !n.type.is('Comment'))
return children.filter((n) => n.type.id !== terms.Comment)
}
export const getBinaryParts = (node: SyntaxNode) => {
@ -50,12 +51,12 @@ export const getAssignmentParts = (node: SyntaxNode) => {
}
// array destructuring
if (left && left.type.is('Array')) {
const identifiers = getAllChildren(left).filter((child) => child.type.is('Identifier'))
if (left && left.type.id === terms.Array) {
const identifiers = getAllChildren(left).filter((child) => child.type.id === terms.Identifier)
return { arrayPattern: identifiers, right }
}
if (!left || !left.type.is('AssignableIdentifier')) {
if (!left || left.type.id !== terms.AssignableIdentifier) {
throw new CompilerError(
`Assign left child must be an AssignableIdentifier or Array, got ${left ? left.type.name : 'none'
}`,
@ -71,7 +72,7 @@ export const getCompoundAssignmentParts = (node: SyntaxNode) => {
const children = getAllChildren(node)
const [left, operator, right] = children
if (!left || !left.type.is('AssignableIdentifier')) {
if (!left || left.type.id !== terms.AssignableIdentifier) {
throw new CompilerError(
`CompoundAssign left child must be an AssignableIdentifier, got ${left ? left.type.name : 'none'
}`,
@ -102,7 +103,7 @@ export const getFunctionDefParts = (node: SyntaxNode, input: string) => {
}
const paramNames = getAllChildren(paramsNode).map((param) => {
if (!param.type.is('Identifier') && !param.type.is('NamedParam')) {
if (param.type.id !== terms.Identifier && param.type.id !== terms.NamedParam) {
throw new CompilerError(
`FunctionDef params must be Identifier or NamedParam, got ${param.type.name}`,
param.from,
@ -121,7 +122,7 @@ export const getFunctionDefParts = (node: SyntaxNode, input: string) => {
let finallyBody: SyntaxNode | undefined
for (const child of rest) {
if (child.type.is('CatchExpr')) {
if (child.type.id === terms.CatchExpr) {
catchExpr = child
const catchChildren = getAllChildren(child)
const [_catchKeyword, identifierNode, _colon, body] = catchChildren
@ -134,7 +135,7 @@ export const getFunctionDefParts = (node: SyntaxNode, input: string) => {
}
catchVariable = input.slice(identifierNode.from, identifierNode.to)
catchBody = body
} else if (child.type.is('FinallyExpr')) {
} else if (child.type.id === terms.FinallyExpr) {
finallyExpr = child
const finallyChildren = getAllChildren(child)
const [_finallyKeyword, _colon, body] = finallyChildren
@ -163,9 +164,9 @@ export const getFunctionCallParts = (node: SyntaxNode, input: string) => {
throw new CompilerError(`FunctionCall expected at least 1 child, got 0`, node.from, node.to)
}
const namedArgs = args.filter((arg) => arg.type.is('NamedArg'))
const namedArgs = args.filter((arg) => arg.type.id === terms.NamedArg)
const positionalArgs = args
.filter((arg) => arg.type.is('PositionalArg'))
.filter((arg) => arg.type.id === terms.PositionalArg)
.map((arg) => {
const child = arg.firstChild
if (!child) throw new CompilerError(`PositionalArg has no child`, arg.from, arg.to)
@ -206,13 +207,13 @@ export const getIfExprParts = (node: SyntaxNode, input: string) => {
rest.forEach((child) => {
const parts = getAllChildren(child)
if (child.type.is('ElseExpr')) {
if (child.type.id === terms.ElseExpr) {
if (parts.length !== 3) {
const message = `ElseExpr expected 1 child, got ${parts.length}`
throw new CompilerError(message, child.from, child.to)
}
elseThenBlock = parts.at(-1)
} else if (child.type.is('ElseIfExpr')) {
} else if (child.type.id === terms.ElseIfExpr) {
const [_else, _if, conditional, _colon, thenBlock] = parts
if (!conditional || !thenBlock) {
const names = parts.map((p) => p.type.name).join(', ')
@ -247,10 +248,10 @@ export const getStringParts = (node: SyntaxNode, input: string) => {
// The text is just between the quotes
const parts = children.filter((child) => {
return (
child.type.is('StringFragment') ||
child.type.is('Interpolation') ||
child.type.is('EscapeSeq') ||
child.type.is('CurlyString')
child.type.id === terms.StringFragment ||
child.type.id === terms.Interpolation ||
child.type.id === terms.EscapeSeq ||
child.type.id === terms.CurlyString
)
})
@ -258,10 +259,10 @@ export const getStringParts = (node: SyntaxNode, input: string) => {
// Validate each part is the expected type
parts.forEach((part) => {
if (
part.type.is('StringFragment') &&
part.type.is('Interpolation') &&
part.type.is('EscapeSeq') &&
part.type.is('CurlyString')
part.type.id !== terms.StringFragment &&
part.type.id !== terms.Interpolation &&
part.type.id !== terms.EscapeSeq &&
part.type.id !== terms.CurlyString
) {
throw new CompilerError(
`String child must be StringFragment, Interpolation, or EscapeSeq, got ${part.type.name}`,
@ -274,7 +275,7 @@ export const getStringParts = (node: SyntaxNode, input: string) => {
// hasInterpolation means the string has interpolation ($var) or escape sequences (\n)
// A simple string like 'hello' has one StringFragment but no interpolation
const hasInterpolation = parts.some(
(p) => p.type.is('Interpolation') || p.type.is('EscapeSeq')
(p) => p.type.id === terms.Interpolation || p.type.id === terms.EscapeSeq
)
return { parts, hasInterpolation }
}
@ -291,7 +292,7 @@ export const getDotGetParts = (node: SyntaxNode, input: string) => {
)
}
if (!object.type.is('IdentifierBeforeDot')) {
if (object.type.id !== terms.IdentifierBeforeDot && object.type.id !== terms.Dollar) {
throw new CompilerError(
`DotGet object must be an IdentifierBeforeDot, got ${object.type.name}`,
object.from,
@ -299,7 +300,7 @@ export const getDotGetParts = (node: SyntaxNode, input: string) => {
)
}
if (!['Identifier', 'Number', 'ParenExpr', 'DotGet'].includes(property.type.name)) {
if (![terms.Identifier, terms.Number, terms.ParenExpr, terms.DotGet].includes(property.type.id)) {
throw new CompilerError(
`DotGet property must be an Identifier, Number, ParenExpr, or DotGet, got ${property.type.name}`,
property.from,
@ -333,7 +334,7 @@ export const getTryExprParts = (node: SyntaxNode, input: string) => {
let finallyBody: SyntaxNode | undefined
rest.forEach((child) => {
if (child.type.is('CatchExpr')) {
if (child.type.id === terms.CatchExpr) {
catchExpr = child
const catchChildren = getAllChildren(child)
const [_catchKeyword, identifierNode, _colon, body] = catchChildren
@ -346,7 +347,7 @@ export const getTryExprParts = (node: SyntaxNode, input: string) => {
}
catchVariable = input.slice(identifierNode.from, identifierNode.to)
catchBody = body
} else if (child.type.is('FinallyExpr')) {
} else if (child.type.id === terms.FinallyExpr) {
finallyExpr = child
const finallyChildren = getAllChildren(child)
const [_finallyKeyword, _colon, body] = finallyChildren

View File

@ -3,7 +3,7 @@ import { VM, fromValue, toValue, isValue, type Bytecode } from 'reefvm'
import { Compiler } from '#compiler/compiler'
import { parse } from '#parser/parser2'
import { Tree } from '#parser/node'
import { globals as parserGlobals, setGlobals as setParserGlobals } from '#parser/parser2'
import { globals as parserGlobals, setGlobals as setParserGlobals } from '#parser/tokenizer'
import { globals as prelude } from '#prelude'
export { Compiler } from '#compiler/compiler'

View File

@ -1,6 +1,7 @@
import { parser } from '#parser/shrimp.ts'
import { parse } from '#parser/parser2'
import type { SyntaxNode } from '#parser/node'
import { isIdentStart, isIdentChar } from './tokenizer2'
import { isIdentStart, isIdentChar } from './tokenizer'
// Turns a { curly string } into strings and nodes for interpolation
export const tokenizeCurlyString = (value: string): (string | [string, SyntaxNode])[] => {
@ -38,7 +39,7 @@ export const tokenizeCurlyString = (value: string): (string | [string, SyntaxNod
const input = value.slice(start + 2, pos) // skip '$('
tokens.push([input, parse(input)])
start = pos + 1 // start after ')'
start = ++pos // skip ')'
} else {
char = value[++pos]
if (!char) break

View File

@ -1,4 +1,5 @@
import { type Token, TokenType } from './tokenizer2'
import * as term from './shrimp.terms'
export type NodeType =
| 'Program'
@ -50,14 +51,13 @@ export type NodeType =
| 'FinallyExpr'
| 'Throw'
| 'Not'
| 'Eq'
| 'Modulo'
| 'Plus'
| 'Star'
| 'Slash'
| 'Import'
| 'Use'
| 'Do'
| 'Underscore'
| 'colon'
@ -139,6 +139,180 @@ export class Tree {
}
}
// TODO: TEMPORARY SHIM
class SyntaxNodeType {
constructor(public nodeType: NodeType) { }
is(other: string) {
return this.nodeType === other
}
get id(): number {
switch (this.nodeType) {
case 'Program':
return term.Program
case 'Block':
return term.Block
case 'FunctionCall':
return term.FunctionCall
case 'FunctionCallOrIdentifier':
return term.FunctionCallOrIdentifier
case 'FunctionCallWithBlock':
return term.FunctionCallWithBlock
case 'PositionalArg':
return term.PositionalArg
case 'NamedArg':
return term.NamedArg
case 'FunctionDef':
return term.FunctionDef
case 'Params':
return term.Params
case 'NamedParam':
return term.NamedParam
case 'Null':
return term.Null
case 'Boolean':
return term.Boolean
case 'Number':
return term.Number
case 'String':
return term.String
case 'StringFragment':
return term.StringFragment
case 'CurlyString':
return term.CurlyString
case 'DoubleQuote':
return term.DoubleQuote
case 'EscapeSeq':
return term.EscapeSeq
case 'Interpolation':
return term.Interpolation
case 'Regex':
return term.Regex
case 'Identifier':
return term.Identifier
case 'AssignableIdentifier':
return term.AssignableIdentifier
case 'IdentifierBeforeDot':
return term.IdentifierBeforeDot
case 'Word':
return term.Word
case 'Array':
return term.Array
case 'Dict':
return term.Dict
case 'Comment':
return term.Comment
case 'BinOp':
return term.BinOp
case 'ConditionalOp':
return term.ConditionalOp
case 'ParenExpr':
return term.ParenExpr
case 'Assign':
return term.Assign
case 'CompoundAssign':
return term.CompoundAssign
case 'DotGet':
return term.DotGet
case 'PipeExpr':
return term.PipeExpr
case 'IfExpr':
return term.IfExpr
case 'ElseIfExpr':
return term.ElseIfExpr
case 'ElseExpr':
return term.ElseExpr
case 'WhileExpr':
return term.WhileExpr
case 'TryExpr':
return term.TryExpr
case 'CatchExpr':
return term.CatchExpr
case 'FinallyExpr':
return term.FinallyExpr
case 'Throw':
return term.Throw
case 'Eq':
return term.Eq
case 'Modulo':
return term.Modulo
case 'Plus':
return term.Plus
case 'Star':
return term.Star
case 'Slash':
return term.Slash
case 'Use':
return term.Import
case 'Do':
return term.Do
case 'Underscore':
return term.Underscore
case 'colon':
return term.colon
case 'keyword':
return term.keyword
}
return 0
}
get name(): string {
return this.nodeType
}
}
export class SyntaxNode {
#type: NodeType
#isError = false
@ -158,13 +332,8 @@ export class SyntaxNode {
return new SyntaxNode(TokenType[token.type] as NodeType, token.from, token.to, parent ?? null)
}
get type(): { type: NodeType, name: NodeType, isError: boolean, is: (other: NodeType) => boolean } {
return {
type: this.#type,
name: this.#type,
isError: this.#isError,
is: (other: NodeType) => other === this.#type
}
get type(): SyntaxNodeType {
return new SyntaxNodeType(this.#type)
}
set type(name: NodeType) {

View File

@ -0,0 +1,99 @@
import { ExternalTokenizer, InputStream } from '@lezer/lr'
import * as terms from './shrimp.terms'
type Operator = { str: string; tokenName: keyof typeof terms }
const operators: Array<Operator> = [
{ str: 'and', tokenName: 'And' },
{ str: 'or', tokenName: 'Or' },
{ str: 'band', tokenName: 'Band' },
{ str: 'bor', tokenName: 'Bor' },
{ str: 'bxor', tokenName: 'Bxor' },
{ str: '>>>', tokenName: 'Ushr' }, // Must come before >>
{ str: '>>', tokenName: 'Shr' },
{ str: '<<', tokenName: 'Shl' },
{ str: '>=', tokenName: 'Gte' },
{ str: '<=', tokenName: 'Lte' },
{ str: '!=', tokenName: 'Neq' },
{ str: '==', tokenName: 'EqEq' },
// Compound assignment operators (must come before single-char operators)
{ str: '??=', tokenName: 'NullishEq' },
{ str: '+=', tokenName: 'PlusEq' },
{ str: '-=', tokenName: 'MinusEq' },
{ str: '*=', tokenName: 'StarEq' },
{ str: '/=', tokenName: 'SlashEq' },
{ str: '%=', tokenName: 'ModuloEq' },
// Nullish coalescing (must come before it could be mistaken for other tokens)
{ str: '??', tokenName: 'NullishCoalesce' },
// Single-char operators
{ str: '*', tokenName: 'Star' },
{ str: '=', tokenName: 'Eq' },
{ str: '/', tokenName: 'Slash' },
{ str: '+', tokenName: 'Plus' },
{ str: '-', tokenName: 'Minus' },
{ str: '>', tokenName: 'Gt' },
{ str: '<', tokenName: 'Lt' },
{ str: '%', tokenName: 'Modulo' },
]
export const operatorTokenizer = new ExternalTokenizer((input: InputStream) => {
for (let operator of operators) {
if (!matchesString(input, 0, operator.str)) continue
const afterOpPos = operator.str.length
const charAfterOp = input.peek(afterOpPos)
if (!isWhitespace(charAfterOp)) continue
// Accept the operator token
const token = terms[operator.tokenName]
if (token === undefined) {
throw new Error(`Unknown token name: ${operator.tokenName}`)
}
input.advance(afterOpPos)
input.acceptToken(token)
return
}
})
const isWhitespace = (ch: number): boolean => {
return matchesChar(ch, [' ', '\t', '\n'])
}
const matchesChar = (ch: number, chars: (string | number)[]): boolean => {
for (const c of chars) {
if (typeof c === 'number') {
if (ch === c) {
return true
}
} else if (ch === c.charCodeAt(0)) {
return true
}
}
return false
}
const matchesString = (input: InputStream, pos: number, str: string): boolean => {
for (let i = 0; i < str.length; i++) {
if (input.peek(pos + i) !== str.charCodeAt(i)) {
return false
}
}
return true
}
const peek = (numChars: number, input: InputStream): string => {
let result = ''
for (let i = 0; i < numChars; i++) {
const ch = input.peek(i)
if (ch === -1) {
result += 'EOF'
break
} else {
result += String.fromCharCode(ch)
}
}
return result
}

View File

@ -1,17 +1,11 @@
import { CompilerError } from '#compiler/compilerError'
import { Scanner, type Token, TokenType } from './tokenizer2'
import { SyntaxNode, operators, precedence, conditionals, compounds } from './node'
import { globals } from './tokenizer'
import { parseString } from './stringParser'
const $T = TokenType
// tell the dotGet searcher about builtin globals
export const globals: string[] = []
export const setGlobals = (newGlobals: string[] | Record<string, any>) => {
globals.length = 0
globals.push(...(Array.isArray(newGlobals) ? newGlobals : Object.keys(newGlobals)))
}
export const parse = (input: string): SyntaxNode => {
const parser = new Parser()
return parser.parse(input)
@ -66,7 +60,7 @@ export class Parser {
if (stmt) node.add(stmt)
if (this.pos === prevPos && !this.isEOF())
throw `parser didn't advance - you need to call next()\n\n ${this.input}\n`
throw "parser didn't advance - you need to call next()\n\n ${this.input}\n"
}
return node
@ -222,11 +216,8 @@ export class Parser {
if (this.is($T.Keyword, 'throw'))
return this.throw()
if (this.is($T.Keyword, 'not'))
return this.not()
if (this.is($T.Keyword, 'import'))
return this.import()
if (this.is($T.Keyword, 'use'))
return this.use()
return this.expect($T.Keyword, 'if/while/do/import') as never
}
@ -526,7 +517,7 @@ export class Parser {
if (this.is($T.Keyword, 'finally'))
finalNode = this.finally()
const end = this.keyword('end')
let end = this.keyword('end')
let last = block.at(-1)
if (finalNode) last = finalNode.children.at(-1)!
@ -696,7 +687,7 @@ export class Parser {
const ifWord = this.keyword('if')
const elseIfTest = this.testExpr()
const elseIfBlock = this.block()
const elseIfNode = new SyntaxNode('ElseIfExpr', elseWord.from, elseIfBlock.at(-1)!.to)
const elseIfNode = new SyntaxNode('ElseIfExpr', ifBlock.at(-1)!.from, elseIfBlock.at(-1)!.to)
elseIfNode.push(elseWord, ifWord, elseIfTest)
elseIfNode.push(...elseIfBlock)
node.push(elseIfNode)
@ -705,7 +696,7 @@ export class Parser {
if (this.is($T.Keyword, 'else') && this.nextIs($T.Colon)) {
const elseWord = this.keyword('else')
const elseBlock = this.block()
const elseNode = new SyntaxNode('ElseExpr', elseWord.from, elseBlock.at(-1)!.to)
const elseNode = new SyntaxNode('ElseExpr', ifBlock.at(-1)!.from, elseBlock.at(-1)!.to)
elseNode.push(elseWord)
elseNode.push(...elseBlock)
node.push(elseNode)
@ -714,27 +705,6 @@ export class Parser {
return node.push(this.keyword('end'))
}
import(): SyntaxNode {
const keyword = this.keyword('import')
const args: SyntaxNode[] = []
while (!this.isExprEnd()) {
if (this.is($T.NamedArgPrefix)) {
const prefix = SyntaxNode.from(this.next())
const val = this.value()
const arg = new SyntaxNode('NamedArg', prefix.from, val.to)
arg.push(prefix, val)
args.push(arg)
} else {
args.push(this.identifier())
}
}
const node = new SyntaxNode('Import', keyword.from, args.at(-1)!.to)
node.add(keyword)
return node.push(...args)
}
// if, while, do, etc
keyword(name: string): SyntaxNode {
const node = SyntaxNode.from(this.expect($T.Keyword, name))
@ -769,14 +739,6 @@ export class Parser {
return node.push(prefix, val)
}
// not blah
not(): SyntaxNode {
const keyword = this.keyword('not')
const val = this.expression()
const node = new SyntaxNode('Not', keyword.from, val.to)
return node.push(keyword, val)
}
// operators like + - =
op(op?: string): SyntaxNode {
const token = op ? this.expect($T.Operator, op) : this.expect($T.Operator)
@ -851,6 +813,27 @@ export class Parser {
return node.push(end)
}
use(): SyntaxNode {
const keyword = this.keyword('use')
const args: SyntaxNode[] = []
while (!this.isExprEnd()) {
if (this.is($T.NamedArgPrefix)) {
const prefix = SyntaxNode.from(this.next())
const val = this.value()
const arg = new SyntaxNode('NamedArg', prefix.from, val.to)
arg.push(prefix, val)
args.push(arg)
} else {
args.push(this.identifier())
}
}
const node = new SyntaxNode('Use', keyword.from, args.at(-1)!.to)
node.add(keyword)
return node.push(...args)
}
// while test: blah end
while(): SyntaxNode {
const keyword = this.keyword('while')

View File

@ -0,0 +1,129 @@
import { ContextTracker, InputStream } from '@lezer/lr'
import * as terms from './shrimp.terms'
export class Scope {
constructor(public parent: Scope | null, public vars = new Set<string>()) { }
has(name: string): boolean {
return this.vars.has(name) || (this.parent?.has(name) ?? false)
}
hash(): number {
let h = 0
for (const name of this.vars) {
for (let i = 0; i < name.length; i++) {
h = (h << 5) - h + name.charCodeAt(i)
h |= 0
}
}
if (this.parent) {
h = (h << 5) - h + this.parent.hash()
h |= 0
}
return h
}
// Static methods that return new Scopes (immutable operations)
static add(scope: Scope, ...names: string[]): Scope {
const newVars = new Set(scope.vars)
names.forEach((name) => newVars.add(name))
return new Scope(scope.parent, newVars)
}
push(): Scope {
return new Scope(this, new Set())
}
pop(): Scope {
return this.parent ?? this
}
}
// Tracker context that combines Scope with temporary pending identifiers
class TrackerContext {
constructor(public scope: Scope, public pendingIds: string[] = []) { }
}
// Extract identifier text from input stream
const readIdentifierText = (input: InputStream, start: number, end: number): string => {
let text = ''
for (let i = start; i < end; i++) {
const offset = i - input.pos
const ch = input.peek(offset)
if (ch === -1) break
text += String.fromCharCode(ch)
}
return text
}
let inParams = false
export const trackScope = new ContextTracker<TrackerContext>({
start: new TrackerContext(new Scope(null, new Set())),
shift(context, term, stack, input) {
if (term == terms.Do) inParams = true
if (term === terms.AssignableIdentifier) {
const text = readIdentifierText(input, input.pos, stack.pos)
return new TrackerContext(Scope.add(context.scope, text), context.pendingIds)
}
if (inParams && term === terms.Identifier) {
const text = readIdentifierText(input, input.pos, stack.pos)
return new TrackerContext(context.scope, [...context.pendingIds, text])
}
// Track identifiers in array destructuring: [ a b ] = ...
if (!inParams && term === terms.Identifier && isArrayDestructuring(input)) {
const text = readIdentifierText(input, input.pos, stack.pos)
return new TrackerContext(Scope.add(context.scope, text), context.pendingIds)
}
return context
},
reduce(context, term) {
if (term === terms.Params) {
inParams = false
let newScope = context.scope.push()
if (context.pendingIds.length > 0) {
newScope = Scope.add(newScope, ...context.pendingIds)
}
return new TrackerContext(newScope, [])
}
// Pop scope when exiting function
if (term === terms.FunctionDef) {
return new TrackerContext(context.scope.pop(), [])
}
return context
},
hash: (context) => context.scope.hash(),
})
// Check if we're parsing array destructuring: [ a b ] = ...
const isArrayDestructuring = (input: InputStream): boolean => {
let pos = 0
// Find closing bracket
while (pos < 200 && input.peek(pos) !== 93 /* ] */) {
if (input.peek(pos) === -1) return false // EOF
pos++
}
if (input.peek(pos) !== 93 /* ] */) return false
pos++
// Skip whitespace
while (input.peek(pos) === 32 /* space */ ||
input.peek(pos) === 9 /* tab */ ||
input.peek(pos) === 10 /* \n */) {
pos++
}
return input.peek(pos) === 61 /* = */
}

293
src/parser/shrimp.grammar Normal file
View File

@ -0,0 +1,293 @@
@external propSource highlighting from "./highlight"
@context trackScope from "./parserScopeContext"
@skip { space | Comment }
@top Program { item* }
@external tokens operatorTokenizer from "./operatorTokenizer" { Star, Slash, Plus, Minus, And, Or, Eq, EqEq, Neq, Lt, Lte, Gt, Gte, Modulo, PlusEq, MinusEq, StarEq, SlashEq, ModuloEq, Band, Bor, Bxor, Shl, Shr, Ushr, NullishCoalesce, NullishEq }
@tokens {
@precedence { Number Regex }
StringFragment { !['\\$]+ }
DoubleQuote { '"' !["]* '"' }
NamedArgPrefix { $[a-z] $[a-z0-9-]* "=" }
Number {
("-" | "+")? "0x" $[0-9a-fA-F]+ |
("-" | "+")? "0b" $[01]+ |
("-" | "+")? "0o" $[0-7]+ |
("-" | "+")? $[0-9]+ ("_"? $[0-9]+)* ('.' $[0-9]+ ("_"? $[0-9]+)*)?
}
Boolean { "true" | "false" }
semicolon { ";" }
eof { @eof }
space { " " | "\t" }
Comment { "#" ![\n]* }
leftParen { "(" }
rightParen { ")" }
colon[closedBy="end", @name="colon"] { ":" }
Underscore { "_" }
Dollar { "$" }
Regex { "//" (![/\\\n[] | "\\" ![\n] | "[" (![\n\\\]] | "\\" ![\n])* "]")+ ("//" $[gimsuy]*)? } // Stolen from the lezer JavaScript grammar
"|"[@name=operator]
}
newlineOrSemicolon { newline | semicolon }
end { @specialize[@name=keyword]<Identifier, "end"> }
while { @specialize[@name=keyword]<Identifier, "while"> }
if { @specialize[@name=keyword]<Identifier, "if"> }
else { @specialize[@name=keyword]<Identifier, "else"> }
try { @specialize[@name=keyword]<Identifier, "try"> }
catch { @specialize[@name=keyword]<Identifier, "catch"> }
finally { @specialize[@name=keyword]<Identifier, "finally"> }
throw { @specialize[@name=keyword]<Identifier, "throw"> }
import { @specialize[@name=keyword]<Identifier, "import"> }
null { @specialize[@name=Null]<Identifier, "null"> }
@external tokens tokenizer from "./tokenizer" { Identifier, AssignableIdentifier, Word, IdentifierBeforeDot, CurlyString }
@external tokens pipeStartsLineTokenizer from "./tokenizer" { newline, pipeStartsLine }
@external specialize {Identifier} specializeKeyword from "./tokenizer" { Do }
@precedence {
pipe @left,
or @left,
and @left,
nullish @left,
comparison @left,
multiplicative @left,
additive @left,
bitwise @left,
call,
functionWithNewlines
}
item {
consumeToTerminator newlineOrSemicolon |
consumeToTerminator eof |
newlineOrSemicolon // allow blank lines
}
consumeToTerminator {
PipeExpr |
WhileExpr |
FunctionCallWithBlock |
ambiguousFunctionCall |
TryExpr |
Throw |
Import |
IfExpr |
FunctionDef |
CompoundAssign |
Assign |
BinOp |
ConditionalOp |
expressionWithoutIdentifier
}
PipeExpr {
pipeOperand (!pipe (pipeStartsLine? "|") newlineOrSemicolon* pipeOperand)+
}
pipeOperand {
consumeToTerminator
}
WhileExpr {
while (ConditionalOp | expression) colon Block end
}
Block {
consumeToTerminator | newlineOrSemicolon block
}
FunctionCallWithBlock {
ambiguousFunctionCall colon Block CatchExpr? FinallyExpr? end
}
FunctionCallOrIdentifier {
DotGet | Identifier
}
ambiguousFunctionCall {
FunctionCall | FunctionCallOrIdentifier
}
FunctionCall {
(DotGet | Identifier | ParenExpr) arg+
}
arg {
PositionalArg | NamedArg
}
PositionalArg {
expression | FunctionDef | Underscore
}
NamedArg {
NamedArgPrefix (expression | FunctionDef | Underscore)
}
FunctionDef {
Do Params colon (consumeToTerminator | newlineOrSemicolon block) CatchExpr? FinallyExpr? end
}
ifTest {
ConditionalOp | expression | FunctionCall
}
IfExpr {
if ifTest colon Block ElseIfExpr* ElseExpr? end
}
ElseIfExpr {
else if ifTest colon Block
}
ElseExpr {
else colon Block
}
TryExpr {
try colon Block CatchExpr? FinallyExpr? end
}
CatchExpr {
catch Identifier colon Block
}
FinallyExpr {
finally colon Block
}
Throw {
throw (BinOp | ConditionalOp | expression)
}
// this has to be in the parse tree so the scope tracker can use it
Import {
import NamedArg* Identifier+ NamedArg*
}
ConditionalOp {
expression !comparison EqEq expression |
expression !comparison Neq expression |
expression !comparison Lt expression |
expression !comparison Lte expression |
expression !comparison Gt expression |
expression !comparison Gte expression |
(expression | ConditionalOp) !and And (expression | ConditionalOp) |
(expression | ConditionalOp) !or Or (expression | ConditionalOp) |
(expression | ConditionalOp) !nullish NullishCoalesce (expression | ConditionalOp)
}
Params {
Identifier* NamedParam*
}
NamedParam {
NamedArgPrefix (String | Number | Boolean | null)
}
Assign {
(AssignableIdentifier | Array) Eq consumeToTerminator
}
CompoundAssign {
AssignableIdentifier (PlusEq | MinusEq | StarEq | SlashEq | ModuloEq | NullishEq) consumeToTerminator
}
BinOp {
expression !multiplicative Modulo expression |
(expression | BinOp) !multiplicative Star (expression | BinOp) |
(expression | BinOp) !multiplicative Slash (expression | BinOp) |
(expression | BinOp) !additive Plus (expression | BinOp) |
(expression | BinOp) !additive Minus (expression | BinOp) |
(expression | BinOp) !bitwise Band (expression | BinOp) |
(expression | BinOp) !bitwise Bor (expression | BinOp) |
(expression | BinOp) !bitwise Bxor (expression | BinOp) |
(expression | BinOp) !bitwise Shl (expression | BinOp) |
(expression | BinOp) !bitwise Shr (expression | BinOp) |
(expression | BinOp) !bitwise Ushr (expression | BinOp)
}
ParenExpr {
leftParen newlineOrSemicolon* (
FunctionCallWithNewlines |
IfExpr |
ambiguousFunctionCall |
BinOp newlineOrSemicolon* |
expressionWithoutIdentifier |
ConditionalOp newlineOrSemicolon* |
PipeExpr |
FunctionDef
)
rightParen
}
FunctionCallWithNewlines[@name=FunctionCall] {
(DotGet | Identifier | ParenExpr) newlineOrSemicolon+ arg !functionWithNewlines (newlineOrSemicolon+ arg)* newlineOrSemicolon*
}
expression {
expressionWithoutIdentifier | DotGet | Identifier
}
@local tokens {
dot { "." }
}
@skip {} {
DotGet {
IdentifierBeforeDot dot (DotGet | Number | Identifier | ParenExpr) |
Dollar dot (DotGet | Number | Identifier | ParenExpr)
}
String {
"'" stringContent* "'" | CurlyString | DoubleQuote
}
}
stringContent {
StringFragment |
Interpolation |
EscapeSeq
}
Interpolation {
"$" FunctionCallOrIdentifier |
"$" ParenExpr
}
EscapeSeq {
"\\" ("$" | "n" | "t" | "r" | "\\" | "'")
}
Dict {
"[=]" |
"[" newlineOrSemicolon* NamedArg (newlineOrSemicolon | NamedArg)* "]"
}
Array {
"[" newlineOrSemicolon* (expression (newlineOrSemicolon | expression)*)? "]"
}
// We need expressionWithoutIdentifier to avoid conflicts in consumeToTerminator.
// Without this, when parsing "my-var" at statement level, the parser can't decide:
// - ambiguousFunctionCall → FunctionCallOrIdentifier → Identifier
// - expression → Identifier
// Both want the same Identifier token! So we use expressionWithoutIdentifier
// to remove Identifier from the second path, forcing standalone identifiers
// to go through ambiguousFunctionCall (which is what we want semantically).
// Yes, it is annoying and I gave up trying to use GLR to fix it.
expressionWithoutIdentifier {
ParenExpr | Word | String | Number | Boolean | Regex | Dict | Array | null
}
block {
(consumeToTerminator? newlineOrSemicolon)*
}

4
src/parser/shrimp.grammar.d.ts vendored Normal file
View File

@ -0,0 +1,4 @@
declare module '*.grammar' {
const content: string
export default content
}

View File

@ -0,0 +1,81 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
export const
Star = 1,
Slash = 2,
Plus = 3,
Minus = 4,
And = 5,
Or = 6,
Eq = 7,
EqEq = 8,
Neq = 9,
Lt = 10,
Lte = 11,
Gt = 12,
Gte = 13,
Modulo = 14,
PlusEq = 15,
MinusEq = 16,
StarEq = 17,
SlashEq = 18,
ModuloEq = 19,
Band = 20,
Bor = 21,
Bxor = 22,
Shl = 23,
Shr = 24,
Ushr = 25,
NullishCoalesce = 26,
NullishEq = 27,
Identifier = 28,
AssignableIdentifier = 29,
Word = 30,
IdentifierBeforeDot = 31,
CurlyString = 32,
newline = 101,
pipeStartsLine = 102,
Do = 33,
Comment = 34,
Program = 35,
PipeExpr = 36,
WhileExpr = 38,
keyword = 84,
ConditionalOp = 40,
ParenExpr = 41,
FunctionCallWithNewlines = 42,
DotGet = 43,
Number = 44,
Dollar = 45,
PositionalArg = 46,
FunctionDef = 47,
Params = 48,
NamedParam = 49,
NamedArgPrefix = 50,
String = 51,
StringFragment = 52,
Interpolation = 53,
FunctionCallOrIdentifier = 54,
EscapeSeq = 55,
DoubleQuote = 56,
Boolean = 57,
Null = 58,
colon = 59,
CatchExpr = 60,
Block = 62,
FinallyExpr = 63,
Underscore = 66,
NamedArg = 67,
IfExpr = 68,
FunctionCall = 70,
ElseIfExpr = 71,
ElseExpr = 73,
BinOp = 74,
Regex = 75,
Dict = 76,
Array = 77,
FunctionCallWithBlock = 78,
TryExpr = 79,
Throw = 81,
Import = 83,
CompoundAssign = 85,
Assign = 86

27
src/parser/shrimp.ts Normal file
View File

@ -0,0 +1,27 @@
// This file was generated by lezer-generator. You probably shouldn't edit it.
import {LRParser, LocalTokenGroup} from "@lezer/lr"
import {operatorTokenizer} from "./operatorTokenizer"
import {tokenizer, pipeStartsLineTokenizer, specializeKeyword} from "./tokenizer"
import {trackScope} from "./parserScopeContext"
import {highlighting} from "./highlight"
const spec_Identifier = {__proto__:null,while:78, null:116, catch:122, finally:128, end:130, if:138, else:144, try:160, throw:164, import:168}
export const parser = LRParser.deserialize({
version: 14,
states: "?[QYQ!SOOOOQ!Q'#Ek'#EkO!sO!bO'#DXO%kQ!TO'#DdO&UOSO'#DaOOQ!R'#Da'#DaO)SQ!TO'#EnOOQ!Q'#E{'#E{O)pQRO'#DxO+xQ!TO'#EjO,fQ!SO'#DVOOQ!R'#Dz'#DzO/WQ!SO'#D{OOQ!R'#En'#EnO/_Q!TO'#EnO1cQ!TO'#EmO2qQ!TO'#EjO3OQRO'#ETOOQ!Q'#Ej'#EjO3gQ!SO'#EjO3nQrO'#EiOOQ!Q'#Ei'#EiOOQ!Q'#EV'#EVQYQ!SOOO4PQbO'#D]O4[QbO'#DrO5YQbO'#DSO6WQQO'#D}O5YQbO'#EPO6]QbO'#ERO6eObO,59sOOQ!Q'#D['#D[O6vQbO'#DqOOQ!Q'#Eq'#EqOOQ!Q'#E_'#E_O7QQ!SO,5:`OOQ!R'#Em'#EmO8QQbO'#DcO8`QWO'#DeOOOO'#Es'#EsOOOO'#E['#E[O8tOSO,59{OOQ!R,59{,59{O5YQbO,5:dO5YQbO,5:dO5YQbO,5:dO5YQbO,5:dO5YQbO,59pO5YQbO,59pO5YQbO,59pO5YQbO,59pOOQ!Q'#EX'#EXO,fQ!SO,59qO9SQ!TO'#DdO9^Q!TO'#EnO9hQsO,59qO9uQQO,59qO9zQrO,59qO:VQrO,59qO:eQsO,59qO;TQsO,59qO;[QrO'#DQO;dQ!SO,5:gO;kQrO,5:fOOQ!R,5:g,5:gO;yQ!SO,5:gO<WQbO,5:pO<WQbO,5:oOYQ!SO,5:hO=kQ!SO,59lOOQ!Q,5;T,5;TOYQ!SO'#EWO>]QQO'#EWOOQ!Q-E8T-E8TOOQ!Q'#EY'#EYO>bQbO'#D^O>mQbO'#D_OOQO'#EZ'#EZO>eQQO'#D^O?RQQO,59wO?WQcO'#EmO@TQRO'#EzOAQQRO'#EzOOQO'#Ez'#EzOAXQQO,5:^OA^QRO,59nOAeQRO,59nOYQ!SO,5:iOAsQ!TO,5:kOCXQ!TO,5:kOC{Q!TO,5:kODYQ!SO,5:mOOQ!Q'#Ec'#EcO6]QbO,5:mOOQ!R1G/_1G/_OOQ!Q,5:],5:]OOQ!Q-E8]-E8]OOOO'#Dd'#DdOOOO,59},59}OOOO,5:P,5:POOOO-E8Y-E8YOOQ!R1G/g1G/gOOQ!R1G0O1G0OOF_Q!TO1G0OOFiQ!TO1G0OOG}Q!TO1G0OOHXQ!TO1G0OOHfQ!TO1G0OOOQ!R1G/[1G/[OI}Q!TO1G/[OJUQ!TO1G/[OJ]Q!TO1G/[OKbQ!TO1G/[OJdQ!TO1G/[OOQ!Q-E8V-E8VOKxQsO1G/]OLVQQO1G/]OL[QrO1G/]OLgQrO1G/]OLuQsO1G/]OL|QsO1G/]OMTQ!SO,59rOM_QrO1G/]OOQ!R1G/]1G/]OMjQrO1G0QOOQ!R1G0R1G0ROMxQ!SO1G0ROOQp'#Ea'#EaOMjQrO1G0QOOQ!R1G0Q1G0QOOQ!Q'#Eb'#EbOMxQ!SO1G0RONVQ!SO1G0[ONwQ!SO1G0ZO! iQ!SO'#DlO! }Q!SO'#DlO!!_QbO1G0SOOQ!Q-E8U-E8UOYQ!SO,5:rOOQ!Q,5:r,5:rOYQ!SO,5:rOOQ!Q-E8W-E8WO!!jQQO,59xOOQO,59y,59yOOQO-E8X-E8XOYQ!SO1G/cOYQ!SO1G/xOYQ!SO1G/YO!!rQbO1G0TO!!}Q!SO1G0XO!#rQ!SO1G0XOOQ!Q-E8a-E8aO!#yQrO7+$wOOQ!R7+$w7+$wO!$UQrO1G/^O!$aQrO7+%lOOQ!R7+%l7+%lO!$oQ!SO7+%mOOQ!R7+%m7+%mOOQp-E8_-E8_OOQ!Q-E8`-E8`OOQ!Q'#E]'#E]O!$|QrO'#E]O!%[Q!SO'#EyOOQ`,5:W,5:WO!%lQbO'#DjO!%qQQO'#DmOOQ!Q7+%n7+%nO!%vQbO7+%nO!%{QbO7+%nOOQ!Q1G0^1G0^OYQ!SO1G0^O!&TQ!SO7+$}O!&fQ!SO7+$}O!&sQbO7+%dO!&{QbO7+$tOOQ!Q7+%o7+%oO!'QQbO7+%oO!'VQbO7+%oO!'_Q!SO7+%sOOQ!R<<Hc<<HcO!(SQ!SO7+$xO!(aQrO7+$xOOQ!R<<IW<<IWOOQ!R<<IX<<IXOOQ!Q,5:w,5:wOOQ!Q-E8Z-E8ZO!(lQQO,5:UOYQ!SO,5:XOOQ!Q<<IY<<IYO!(qQbO<<IYOOQ!Q7+%x7+%xOOQ!Q<<Hi<<HiO!(vQbO<<HiO!({QbO<<HiO!)TQbO<<HiOOQ`'#E`'#E`O!)`QbO<<IOO!)hQbO'#DwOOQ!Q<<IO<<IOO!)pQbO<<IOOOQ!Q<<H`<<H`OOQ!Q<<IZ<<IZO!)uQbO<<IZOOQp,5:x,5:xO!)zQ!SO<<HdOOQp-E8[-E8[OYQ!SO1G/pOOQ`1G/s1G/sOOQ!QAN>tAN>tOOQ!QAN>TAN>TO!*XQbOAN>TO!*^QbOAN>TOOQ`-E8^-E8^OOQ!QAN>jAN>jO!*fQbOAN>jO4[QbO,5:aOYQ!SO,5:cOOQ!QAN>uAN>uPMTQ!SO'#EXOOQ`7+%[7+%[OOQ!QG23oG23oO!*kQbOG23oP!)kQbO'#DuOOQ!QG24UG24UO!*pQQO1G/{OOQ`1G/}1G/}OOQ!QLD)ZLD)ZOYQ!SO7+%gOOQ`<<IR<<IRO!*uObO,59sO!+WO!bO'#DX",
stateData: "!+`~O#[OSrOS~OlROmaOn]OoQOpTOqhOwjO|]O}QO!YTO!Z]O![]O!giO!m]O!rkO!tlO!vmO#XPO#`PO#cYO#fSO#qZO#r[O~O#dnO~OltOn]OoQOpTOqhO|]O}QO!SpO!YTO!Z]O![]O!doO!m]O#cYO#fSO#qZO#r[OP#aXQ#aXR#aXS#aXT#aXU#aXW#aXX#aXY#aXZ#aX[#aX]#aX^#aXd#aXe#aXf#aXg#aXh#aXi#aXj#aXu!WX!]!WX#Y!WX#p!WX~O#X!WX#`!WX#t!WX!_!WX!b!WX!c!WX!j!WX~P!xO!UwO#fzO#huO#ivO~OltOn]OoQOpTOqhO|]O}QO!SpO!YTO!Z]O![]O!doO!m]O#cYO#fSO#qZO#r[OP#bXQ#bXR#bXS#bXT#bXU#bXW#bXX#bXY#bXZ#bX[#bX]#bX^#bXd#bXe#bXf#bXg#bXh#bXi#bXj#bXu#bX#Y#bX#p#bX~O#X#bX#`#bX#t#bX!]#bX!_#bX!b#bX!c#bX!j#bX~P&dOP|OQ|OR}OS}OT!QOU!ROW!POX!POY!POZ!PO[!PO]!PO^{Od!OOe!OOf!OOg!OOh!OOi!OOj!SO~OP|OQ|OR}OS}Od!OOe!OOf!OOg!OOh!OOi!OOu#^X#Y#^X~O#X#^X#`#^X#t#^X!_#^X!b#^X!c#^X#p#^X!j#^X~P+QOl!VOmaOn]OoQOpTOqhOwjO|]O}QO!YTO!Z]O![]O!giO!m]O!rkO!tlO!vmO#XPO#`PO#cYO#fSO#qZO#r[O~OltOn]OoQOpTO|]O}QO!SpO!YTO!Z]O![]O!m]O#XPO#`PO#cYO#fSO#qZO#r[O~O#s!bO~P.POV!dO#X#bX#`#bX#t#bX!_#bX!b#bX!c#bX!j#bX~P'iOP#aXQ#aXR#aXS#aXT#aXU#aXW#aXX#aXY#aXZ#aX[#aX]#aX^#aXd#aXe#aXf#aXg#aXh#aXi#aXj#aXu#^X#Y#^X~O#X#^X#`#^X#t#^X!_#^X!b#^X!c#^X#p#^X!j#^X~P/{Ou#^X#X#^X#Y#^X#`#^X#t#^X!_#^X!b#^X!c#^X#p#^X!j#^X~OT!QOU!ROj!SO~P2POV!dO_!eO`!eOa!eOb!eOc!eOk!eO~O!]!fO~P2POu!iO#XPO#Y!jO#`PO#t!hO~Ol!lO!S!nO!]!QP~Ol!rOn]OoQOpTO|]O}QO!YTO!Z]O![]O!m]O#cYO#fSO#qZO#r[O~OltOn]OoQOpTO|]O}QO!YTO!Z]O![]O!m]O#cYO#fSO#qZO#r[O~O!]!yO~Ol!lO!SpO~Ol#QOoQO|#QO}QO#cYO~OqhO!d#RO~P5YOqhO!SpO!doOu!ha!]!ha#X!ha#Y!ha#`!ha#t!ha#p!ha!_!ha!b!ha!c!ha!j!ha~P5YOl#TOo&PO}&PO#cYO~O#f#VO#h#VO#i#VO#j#VO#k#VO#l#VO~O!UwO#f#XO#huO#ivO~O#XPO#`PO~P!xO#XPO#`PO~P&dO#XPO#`PO#p#oO~P+QO#p#oO~O#p#oOu#^X#Y#^X~O!]!fO#p#oOu#^X#Y#^X~O#p#oO~P/{OT!QOU!ROj!SO#XPO#`POu#^X#Y#^X~O#p#oO~P:lOu!iO#Y!jO~O#s#qO~P.PO!SpO#XPO#`PO#s#uO~O#XPO#`PO#s#qO~P5YOlROmaOn]OoQOpTOqhOwjO|]O}QO!YTO!Z]O![]O!giO!m]O!rkO!tlO!vmO#cYO#fSO#qZO#r[O~Ou!iO#Y!jO#Xta#`ta#tta#pta!_ta!bta!cta!jta~Ou$QO~Ol!lO!S!nO!]!QX~OpTO|$TO!YTO!Z$TO![$TO#fSO~O!]$VO~OqhO!SpO!doOT#aXU#aXW#aXX#aXY#aXZ#aX[#aX]#aXj#aX!]#aX~P5YOT!QOU!ROj!SO!]#nX~OT!QOU!ROW!POX!POY!POZ!PO[!PO]!POj!SO~O!]#nX~P@cO!]$WO~O!]$XO~P@cOT!QOU!ROj!SO!]$XO~Ou!sa#X!sa#Y!sa#`!sa#t!sa!_!sa!b!sa!c!sa#p!sa!j!sa~P)pOu!sa#X!sa#Y!sa#`!sa#t!sa!_!sa!b!sa!c!sa#p!sa!j!sa~OP|OQ|OR}OS}Od!OOe!OOf!OOg!OOh!OOi!OO~PBgOT!QOU!ROj!SO~PBgOl!lO!SpOu!ua#X!ua#Y!ua#`!ua#t!ua!_!ua!b!ua!c!ua#p!ua!j!ua~O^{OR!liS!lid!lie!lif!lig!lih!lii!liu!li#X!li#Y!li#`!li#t!li#p!li!_!li!b!li!c!li!j!li~OP!liQ!li~PEQOP|OQ|O~PEQOP|OQ|Od!lie!lif!lig!lih!lii!liu!li#X!li#Y!li#`!li#t!li#p!li!_!li!b!li!c!li!j!li~OR!liS!li~PFsOR}OS}O^{O~PFsOR}OS}O~PFsOW!POX!POY!POZ!PO[!PO]!POTxijxiuxi#Xxi#Yxi#`xi#txi#pxi!]xi!_xi!bxi!cxi!jxi~OU!RO~PHpOU!RO~PISOUxi~PHpOT!QOU!ROjxiuxi#Xxi#Yxi#`xi#txi#pxi!]xi!_xi!bxi!cxi!jxi~OW!POX!POY!POZ!PO[!PO]!PO~PJdO#XPO#`PO#p$_O~P+QO#p$_O~O#p$_Ou#^X#Y#^X~O!]!fO#p$_Ou#^X#Y#^X~O#p$_O~P/{O#p$_O~P:lOqhO!doO~P.PO#XPO#`PO#p$_O~O!SpO#XPO#`PO#s$bO~O#XPO#`PO#s$dO~P5YOu!iO#Y!jO#X!xi#`!xi#t!xi!_!xi!b!xi!c!xi#p!xi!j!xi~Ou!iO#Y!jO#X!wi#`!wi#t!wi!_!wi!b!wi!c!wi#p!wi!j!wi~Ou!iO#Y!jO!_!`X!b!`X!c!`X!j!`X~O!_#mP!b#mP!c#mP!j#mP~PYO!_$kO!b$lO!c$mO~O!S!nO!]!Qa~O!_$kO!b$lO!c$vO~O!SpOu!ui#X!ui#Y!ui#`!ui#t!ui!_!ui!b!ui!c!ui#p!ui!j!ui~Ol!lO~P!!}O#XPO#`PO#p$zO~O#XPO#`PO#pzi~O!SpO#XPO#`PO#s$}O~O#XPO#`PO#s%OO~P5YOu!iO#XPO#Y!jO#`PO~O!_#mX!b#mX!c#mX!j#mX~PYOl%RO~O!]%SO~O!c%TO~O!b$lO!c%TO~Ou!iO!_$kO!b$lO!c%WO#Y!jO~O!_#mP!b#mP!c#mP~PYO!c%_O!j%^O~O!c%aO~O!c%bO~O!b$lO!c%bO~O!SpOu!uq#X!uq#Y!uq#`!uq#t!uq!_!uq!b!uq!c!uq#p!uq!j!uq~OqhO!doO#pzq~P.PO#XPO#`PO#pzq~O!]%gO~O!c%iO~O!c%jO~O!b$lO!c%jO~O!_$kO!b$lO!c%jO~O!c%nO!j%^O~O!]%qO!g%pO~O!c%nO~O!c%rO~OqhO!doO#pzy~P.PO!c%uO~O!b$lO!c%uO~O!c%xO~O!c%{O~O!]%|O~Ol#QOo&PO|#QO}&PO#cYO~O#d&OO~O|!m~",
goto: "<[#pPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP#qP$_P$w%x'['bPP(v)S*P*SP*YP+d+h+dPPPP,TP,a,yPPP-a#qP.R.oP.s.yP/s0z$_$_P$_P$_P$_$_2T2Z2g3c3q3{4R4Y4`4j4p4z5UPPPPP5d5h6dP7v9oPP:|P;^PPPPP;b;h;nxbOg!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|Q!ZYR#i!U}bOYg!U!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|x`Og!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|Q!^YS!si%pQ!xjQ!|lQ#`!RQ#b!QQ#e!SR#l!U|UOgi!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%p%q%|!W]RU[jlps{|}!O!P!Q!R!S!V!W!`!c!r#m#r#w$c${%e%sS!WY!US#Qn&OR#UuQ!YYR#h!UxROg!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|!WtRU[jlps{|}!O!P!Q!R!S!V!W!`!c!r#m#r#w$c${%e%sS!VY!US!ri%pS#Qn&OR#TueqRUs!V!W!r#m${%e%sxbOg!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|doRUs!V!W!r#m${%e%sQ!ZYQ#RpR#i!UR!qhX!oh!m!p$S#Y]ORUY[gijlps{|}!O!P!Q!R!S!U!V!W!`!c!d!e!f!i!r!y#m#r#w#{$O$Q$V$W$X$c$i$q$s${%S%e%g%p%q%s%|R$T!nTwSy|VOYg!U!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|R#UuQ$o#|Q$x$YQ%Y$rR%l%ZQ#|!fQ$Y!yQ$t$WQ$u$XQ%h%SQ%t%gQ%z%qR%}%|Q$n#|Q$w$YQ%U$oQ%X$rQ%c$xS%k%Y%ZR%v%ldqRUs!V!W!r#m${%e%sQ!a[[#Om!}#P$Z$[$yQ#p!`X#s!a#p#t$a|VOYg!U!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|T!ui%pT%[$t%]Q%`$tR%o%]xXOg!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|Q!XYQ!{lQ#Y|Q#]}Q#_!OR#g!U#Z]ORUY[gijlps{|}!O!P!Q!R!S!U!V!W!`!c!d!e!f!i!r!y#m#r#w#{$O$Q$V$W$X$c$i$q$s${%S%e%g%p%q%s%|![]RU[ijlps{|}!O!P!Q!R!S!V!W!`!c!r#m#r#w$c${%e%p%s}^OYg!U!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|QgOR!kg^!gd!_#x#y#z$h$rR#}!gQ!UYQ!`[d#f!U!`#m#n$O$^$q${%e%sS#m!V!WS#n!X!^Q$O!iS$^#g#lQ$q$QQ${$`R%e$|Q!mhQ!}mU$R!m!}$[R$[#PQ!phQ$S!mT$U!p$SQySR#WyS$i#{$sR%Q$iQ$|$`R%f$|YsRU!V!W!rR#SsQ%]$tR%m%]Q#t!aQ$a#pT$e#t$aQ#w!cQ$c#rT$f#w$cQ#PmQ$Z!}U$]#P$Z$yR$y$[TfOgSdOgS!_Y!UQ#x!dQ#y!e`#z!f!y$W$X%S%g%q%|Q$P!iU$h#{$i$sS$p$O$QQ$r$VR%V$qSeOg|!TY[!U!V!W!X!^!`!i#g#l#m#n$O$Q$^$`$q${$|%e%sQ!hdW#s!a#p#t$aW#v!c#r#w$c`#{!f!y$W$X%S%g%q%|U$g#{$i$sQ$s$VR%P$h|WOYg!U!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|doRUs!V!W!r#m${%e%sQ!c[S!ti%pQ!wjQ!zlQ#RpQ#Y{Q#Z|Q#[}Q#^!OQ#`!PQ#a!QQ#c!RQ#d!SQ#r!`X#v!c#r#w$cx_Og!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|![tRU[ijlps{|}!O!P!Q!R!S!V!W!`!c!r#m#r#w$c${%e%p%sQ!]YR#k!U[rRUs!V!W!rQ$`#mV%d${%e%sTxSyQ$j#{R%Z$sQ!viR%y%pxcOg!d!e!f!i!y#{$O$Q$V$W$X$i$q$s%S%g%q%|Q![YR#j!U",
nodeNames: "⚠ Star Slash Plus Minus And Or Eq EqEq Neq Lt Lte Gt Gte Modulo PlusEq MinusEq StarEq SlashEq ModuloEq Band Bor Bxor Shl Shr Ushr NullishCoalesce NullishEq Identifier AssignableIdentifier Word IdentifierBeforeDot CurlyString Do Comment Program PipeExpr operator WhileExpr keyword ConditionalOp ParenExpr FunctionCall DotGet Number Dollar PositionalArg FunctionDef Params NamedParam NamedArgPrefix String StringFragment Interpolation FunctionCallOrIdentifier EscapeSeq DoubleQuote Boolean Null colon CatchExpr keyword Block FinallyExpr keyword keyword Underscore NamedArg IfExpr keyword FunctionCall ElseIfExpr keyword ElseExpr BinOp Regex Dict Array FunctionCallWithBlock TryExpr keyword Throw keyword Import keyword CompoundAssign Assign",
maxTerm: 128,
context: trackScope,
nodeProps: [
["closedBy", 59,"end"]
],
propSources: [highlighting],
skippedNodes: [0,34],
repeatNodeCount: 13,
tokenData: "Lp~R}OX$OXY$mYp$Opq$mqr$Ors%Wst'^tu(uuw$Owx(|xy)Ryz)lz{$O{|*V|}$O}!O*V!O!P$O!P!Q3r!Q!R*w!R![-l![!]<_!]!^<x!^!}$O!}#O=c#O#P?X#P#Q?^#Q#R$O#R#S?w#S#T$O#T#Y@b#Y#ZA|#Z#b@b#b#cGj#c#f@b#f#gHm#g#h@b#h#iIp#i#o@b#o#p$O#p#qLQ#q;'S$O;'S;=`$g<%l~$O~O$O~~LkS$TU!USOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OS$jP;=`<%l$O^$tU!US#[YOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OU%]Z!USOr%Wrs&Ost%Wtu&iuw%Wwx&ix#O%W#O#P&i#P;'S%W;'S;=`'W<%lO%WU&VU!YQ!USOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OQ&lTOr&irs&{s;'S&i;'S;=`'Q<%lO&iQ'QO!YQQ'TP;=`<%l&iU'ZP;=`<%l%W^'eZrY!USOY'^YZ$OZt'^tu(Wuw'^wx(Wx#O'^#O#P(W#P;'S'^;'S;=`(o<%lO'^Y(]SrYOY(WZ;'S(W;'S;=`(i<%lO(WY(lP;=`<%l(W^(rP;=`<%l'^^(|O#h[}Q~)RO#f~U)YU!US#cQOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OU)sU!US#pQOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OU*[X!USOt$Ouw$Ox!Q$O!Q!R*w!R![-l![#O$O#P;'S$O;'S;=`$g<%lO$OU+Ob!US|QOt$Ouw$Ox!O$O!O!P,W!P!Q$O!Q![-l![#O$O#P#R$O#R#S.i#S#U$O#U#V/W#V#c$O#c#d0l#d#l$O#l#m1z#m;'S$O;'S;=`$g<%lO$OU,]W!USOt$Ouw$Ox!Q$O!Q![,u![#O$O#P;'S$O;'S;=`$g<%lO$OU,|Y!US|QOt$Ouw$Ox!Q$O!Q![,u![#O$O#P#R$O#R#S,W#S;'S$O;'S;=`$g<%lO$OU-s[!US|QOt$Ouw$Ox!O$O!O!P,W!P!Q$O!Q![-l![#O$O#P#R$O#R#S.i#S;'S$O;'S;=`$g<%lO$OU.nW!USOt$Ouw$Ox!Q$O!Q![-l![#O$O#P;'S$O;'S;=`$g<%lO$OU/]X!USOt$Ouw$Ox!Q$O!Q!R/x!R!S/x!S#O$O#P;'S$O;'S;=`$g<%lO$OU0PX!US|QOt$Ouw$Ox!Q$O!Q!R/x!R!S/x!S#O$O#P;'S$O;'S;=`$g<%lO$OU0qW!USOt$Ouw$Ox!Q$O!Q!Y1Z!Y#O$O#P;'S$O;'S;=`$g<%lO$OU1bW!US|QOt$Ouw$Ox!Q$O!Q!Y1Z!Y#O$O#P;'S$O;'S;=`$g<%lO$OU2P[!USOt$Ouw$Ox!Q$O!Q![2u![!c$O!c!i2u!i#O$O#P#T$O#T#Z2u#Z;'S$O;'S;=`$g<%lO$OU2|[!US|QOt$Ouw$Ox!Q$O!Q![2u![!c$O!c!i2u!i#O$O#P#T$O#T#Z2u#Z;'S$O;'S;=`$g<%lO$OU3wW!USOt$Ouw$Ox!P$O!P!Q4a!Q#O$O#P;'S$O;'S;=`$g<%lO$OU4f^!USOY5bYZ$OZt5btu6euw5bwx6ex!P5b!P!Q$O!Q!}5b!}#O;W#O#P8s#P;'S5b;'S;=`<X<%lO5bU5i^!US!mQOY5bYZ$OZt5btu6euw5bwx6ex!P5b!P!Q9Y!Q!}5b!}#O;W#O#P8s#P;'S5b;'S;=`<X<%lO5bQ6jX!mQOY6eZ!P6e!P!Q7V!Q!}6e!}#O7t#O#P8s#P;'S6e;'S;=`9S<%lO6eQ7YP!P!Q7]Q7bU!mQ#Z#[7]#]#^7]#a#b7]#g#h7]#i#j7]#m#n7]Q7wVOY7tZ#O7t#O#P8^#P#Q6e#Q;'S7t;'S;=`8m<%lO7tQ8aSOY7tZ;'S7t;'S;=`8m<%lO7tQ8pP;=`<%l7tQ8vSOY6eZ;'S6e;'S;=`9S<%lO6eQ9VP;=`<%l6eU9_W!USOt$Ouw$Ox!P$O!P!Q9w!Q#O$O#P;'S$O;'S;=`$g<%lO$OU:Ob!US!mQOt$Ouw$Ox#O$O#P#Z$O#Z#[9w#[#]$O#]#^9w#^#a$O#a#b9w#b#g$O#g#h9w#h#i$O#i#j9w#j#m$O#m#n9w#n;'S$O;'S;=`$g<%lO$OU;][!USOY;WYZ$OZt;Wtu7tuw;Wwx7tx#O;W#O#P8^#P#Q5b#Q;'S;W;'S;=`<R<%lO;WU<UP;=`<%l;WU<[P;=`<%l5bU<fU!US!]QOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OU=PU!US#`QOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OU=jW#rQ!USOt$Ouw$Ox!_$O!_!`>S!`#O$O#P;'S$O;'S;=`$g<%lO$OU>XV!USOt$Ouw$Ox#O$O#P#Q>n#Q;'S$O;'S;=`$g<%lO$OU>uU#qQ!USOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$O~?^O#i~U?eU#sQ!USOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OU@OU!US!dQOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OU@g^!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#o@b#o;'S$O;'S;=`$g<%lO$OUAjU!SQ!USOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$OUBR_!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#UCQ#U#o@b#o;'S$O;'S;=`$g<%lO$OUCV`!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#`@b#`#aDX#a#o@b#o;'S$O;'S;=`$g<%lO$OUD^`!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#g@b#g#hE`#h#o@b#o;'S$O;'S;=`$g<%lO$OUEe`!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#X@b#X#YFg#Y#o@b#o;'S$O;'S;=`$g<%lO$OUFn^!ZQ!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#o@b#o;'S$O;'S;=`$g<%lO$O^Gq^#jW!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#o@b#o;'S$O;'S;=`$g<%lO$O^Ht^#lW!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#o@b#o;'S$O;'S;=`$g<%lO$O^Iw`#kW!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#f@b#f#gJy#g#o@b#o;'S$O;'S;=`$g<%lO$OUKO`!USOt$Ouw$Ox}$O}!O@b!O!Q$O!Q![@b![!_$O!_!`Ac!`#O$O#P#T$O#T#i@b#i#jE`#j#o@b#o;'S$O;'S;=`$g<%lO$OULXUuQ!USOt$Ouw$Ox#O$O#P;'S$O;'S;=`$g<%lO$O~LpO#t~",
tokenizers: [operatorTokenizer, 1, 2, 3, tokenizer, pipeStartsLineTokenizer, new LocalTokenGroup("[~RP!O!PU~ZO#d~~", 11)],
topRules: {"Program":[0,35]},
specialized: [{term: 28, get: (value: any, stack: any) => (specializeKeyword(value, stack) << 1), external: specializeKeyword},{term: 28, get: (value: keyof typeof spec_Identifier) => spec_Identifier[value] || -1}],
tokenPrec: 2589
})

View File

@ -244,7 +244,7 @@ const findIdentifierEnd = (input: string, pos: number, maxPos: number): number =
let end = pos
while (end < maxPos) {
const char = input[end]!
const char = input[end]
// Stop at non-identifier characters
if (!/[a-z0-9\-?]/.test(char)) {

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('null', () => {
test('parses null', () => {
expect('null').toMatchTree(`Null null`)

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('bitwise operators - grammar', () => {
test('parses band (bitwise AND)', () => {
expect('5 band 3').toMatchTree(`

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('if/else if/else', () => {
test('parses single line if', () => {
expect(`if y == 1: 'cool' end`).toMatchTree(`

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('Array destructuring', () => {
test('parses array pattern with two variables', () => {
expect('[ a b ] = [ 1 2 3 4]').toMatchTree(`

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('try/catch/finally/throw', () => {
test('parses try with catch', () => {
expect(`try:

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('single line function blocks', () => {
test('work with no args', () => {
expect(`trap: echo bye bye end`).toMatchTree(`

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('calling functions', () => {
test('call with no args', () => {
expect('tail').toMatchTree(`

View File

@ -1,32 +0,0 @@
import { expect, describe, test } from 'bun:test'
describe('import', () => {
test('parses single import', () => {
expect(`import str`).toMatchTree(`
Import
keyword import
Identifier str
`)
})
test('parses multiple imports', () => {
expect(`import str math list`).toMatchTree(`
Import
keyword import
Identifier str
Identifier math
Identifier list
`)
})
test('parses named args', () => {
expect(`import str only=ends-with?`).toMatchTree(`
Import
keyword import
Identifier str
NamedArg
NamedArgPrefix only=
Identifier ends-with?
`)
})
})

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('number literals', () => {
test('binary numbers', () => {
expect('0b110').toMatchTree(`

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('multiline', () => {
test('parses multiline strings', () => {
expect(`'first'\n'second'`).toMatchTree(`

View File

@ -1,4 +1,7 @@
import { expect, describe, test } from 'bun:test'
import { parser } from '../shrimp'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('pipe expressions', () => {
test('simple pipe expression', () => {

View File

@ -1,5 +1,7 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('string interpolation', () => {
test('string with variable interpolation', () => {
expect("'hello $name'").toMatchTree(`

View File

@ -290,7 +290,7 @@ describe('operators', () => {
describe('keywords', () => {
test('keywords', () => {
expect(`import`).toMatchToken('Keyword', 'import')
expect(`use`).toMatchToken('Keyword', 'use')
expect(`end`).toMatchToken('Keyword', 'end')
expect(`do`).toMatchToken('Keyword', 'do')
@ -304,7 +304,6 @@ describe('keywords', () => {
expect(`catch`).toMatchToken('Keyword', 'catch')
expect(`finally`).toMatchToken('Keyword', 'finally')
expect(`throw`).toMatchToken('Keyword', 'throw')
expect(`not`).toMatchToken('Keyword', 'not')
})
})
@ -426,26 +425,6 @@ f
)
})
test('function call w/ parens', () => {
expect('echo(bold hello world)').toMatchTokens(
{ type: 'Identifier', value: 'echo' },
{ type: 'OpenParen' },
{ type: 'Identifier', value: 'bold' },
{ type: 'Identifier', value: 'hello' },
{ type: 'Identifier', value: 'world' },
{ type: 'CloseParen' },
)
expect('echo (bold hello world)').toMatchTokens(
{ type: 'Identifier', value: 'echo' },
{ type: 'OpenParen' },
{ type: 'Identifier', value: 'bold' },
{ type: 'Identifier', value: 'hello' },
{ type: 'Identifier', value: 'world' },
{ type: 'CloseParen' },
)
})
test('assignment', () => {
expect('x = 5').toMatchTokens(
{ type: 'Identifier', value: 'x' },

View File

@ -0,0 +1,34 @@
import { expect, describe, test } from 'bun:test'
import '../shrimp.grammar' // Importing this so changes cause it to retest!
describe('use', () => {
test('parses single use', () => {
expect(`use str`).toMatchTree(`
Use
keyword use
Identifier str
`)
})
test('parses multiple uses', () => {
expect(`use str math list`).toMatchTree(`
Use
keyword use
Identifier str
Identifier math
Identifier list
`)
})
test('parses named args', () => {
expect(`use str only=ends-with?`).toMatchTree(`
Use
keyword use
Identifier str
NamedArg
NamedArgPrefix only=
Identifier ends-with?
`)
})
})

389
src/parser/tokenizer.ts Normal file
View File

@ -0,0 +1,389 @@
import { ExternalTokenizer, InputStream, Stack } from '@lezer/lr'
import { Identifier, AssignableIdentifier, Word, IdentifierBeforeDot, Do, CurlyString, DotGet, newline, pipeStartsLine } from './shrimp.terms'
// doobie doobie do (we need the `do` keyword to know when we're defining params)
export function specializeKeyword(ident: string) {
return ident === 'do' ? Do : -1
}
// tell the dotGet searcher about builtin globals
export const globals: string[] = []
export const setGlobals = (newGlobals: string[] | Record<string, any>) => {
globals.length = 0
globals.push(...(Array.isArray(newGlobals) ? newGlobals : Object.keys(newGlobals)))
}
// The only chars that can't be words are whitespace, apostrophes, closing parens, and EOF.
export const tokenizer = new ExternalTokenizer(
(input: InputStream, stack: Stack) => {
const ch = getFullCodePoint(input, 0)
// Handle curly strings
if (ch === 123 /* { */) return consumeCurlyString(input, stack)
if (!isWordChar(ch)) return
// Don't consume things that start with digits - let Number token handle it
if (isDigit(ch)) return
// Don't consume things that start with - or + followed by a digit (negative/positive numbers)
if ((ch === 45 /* - */ || ch === 43) /* + */ && isDigit(input.peek(1))) return
const isValidStart = isIdentStart(ch)
const canBeWord = stack.canShift(Word)
// Consume all word characters, tracking if it remains a valid identifier
const { pos, isValidIdentifier, stoppedAtDot } = consumeWordToken(
input,
isValidStart,
canBeWord
)
// Check if we should emit IdentifierBeforeDot for property access
if (stoppedAtDot) {
const dotGetToken = checkForDotGet(input, stack, pos)
if (dotGetToken) {
input.advance(pos)
input.acceptToken(dotGetToken)
} else {
// Not in scope - continue consuming the dot as part of the word
const afterDot = consumeRestOfWord(input, pos + 1, canBeWord)
input.advance(afterDot)
input.acceptToken(Word)
}
return
}
// Advance past the token we consumed
input.advance(pos)
// Choose which token to emit
if (isValidIdentifier) {
const token = chooseIdentifierToken(input, stack)
input.acceptToken(token)
} else {
input.acceptToken(Word)
}
},
{ contextual: true }
)
// Build identifier text from input stream, handling surrogate pairs for emoji
const buildIdentifierText = (input: InputStream, length: number): string => {
let text = ''
for (let i = 0; i < length; i++) {
const charCode = input.peek(i)
if (charCode === -1) break
// Handle surrogate pairs for emoji (UTF-16 encoding)
if (charCode >= 0xd800 && charCode <= 0xdbff && i + 1 < length) {
const low = input.peek(i + 1)
if (low >= 0xdc00 && low <= 0xdfff) {
text += String.fromCharCode(charCode, low)
i++ // Skip the low surrogate
continue
}
}
text += String.fromCharCode(charCode)
}
return text
}
// Consume word characters, tracking if it remains a valid identifier
// Returns the position after consuming, whether it's a valid identifier, and if we stopped at a dot
const consumeWordToken = (
input: InputStream,
isValidStart: boolean,
canBeWord: boolean
): { pos: number; isValidIdentifier: boolean; stoppedAtDot: boolean } => {
let pos = getCharSize(getFullCodePoint(input, 0))
let isValidIdentifier = isValidStart
let stoppedAtDot = false
while (true) {
const ch = getFullCodePoint(input, pos)
// Stop at dot if we have a valid identifier (might be property access)
if (ch === 46 /* . */ && isValidIdentifier) {
stoppedAtDot = true
break
}
// Stop if we hit a non-word character
if (!isWordChar(ch)) break
// Context-aware termination: semicolon/colon can end a word if followed by whitespace
// This allows `hello; 2` to parse correctly while `hello;world` stays as one word
if (canBeWord && (ch === 59 /* ; */ || ch === 58) /* : */) {
const nextCh = getFullCodePoint(input, pos + 1)
if (!isWordChar(nextCh)) break
}
// Track identifier validity: must be lowercase, digit, dash, or emoji/unicode
if (!isIdentChar(ch)) {
if (!canBeWord) break
isValidIdentifier = false
}
pos += getCharSize(ch)
}
return { pos, isValidIdentifier, stoppedAtDot }
}
// Consume the rest of a word after we've decided not to treat a dot as DotGet
// Used when we have "file.txt" - we already consumed "file", now consume ".txt"
const consumeRestOfWord = (input: InputStream, startPos: number, canBeWord: boolean): number => {
let pos = startPos
while (true) {
const ch = getFullCodePoint(input, pos)
// Stop if we hit a non-word character
if (!isWordChar(ch)) break
// Context-aware termination for semicolon/colon
if (canBeWord && (ch === 59 /* ; */ || ch === 58) /* : */) {
const nextCh = getFullCodePoint(input, pos + 1)
if (!isWordChar(nextCh)) break
}
pos += getCharSize(ch)
}
return pos
}
// Consumes { curly strings } and tracks braces so you can { have { braces { inside { braces } } }
const consumeCurlyString = (input: InputStream, stack: Stack) => {
if (!stack.canShift(CurlyString)) return
let depth = 0
let pos = 0
while (true) {
const ch = input.peek(pos)
if (ch < 0) return // EOF - invalid
if (ch === 123) depth++ // {
else if (ch === 125) { // }
depth--
if (depth === 0) {
pos++ // consume final }
break
}
}
pos++
}
input.acceptToken(CurlyString, pos)
}
// Check if this identifier is in scope (for property access detection)
// Returns IdentifierBeforeDot token if in scope, null otherwise
const checkForDotGet = (input: InputStream, stack: Stack, pos: number): number | null => {
const identifierText = buildIdentifierText(input, pos)
const context = stack.context as { scope: { has(name: string): boolean } } | undefined
// Check if identifier is in scope (lexical scope or globals)
const inScope = context?.scope.has(identifierText) || globals.includes(identifierText)
// property access
if (inScope) return IdentifierBeforeDot
// Not in scope - check if we're inside a DotGet chain
// Inside the @skip {} block where DotGet is defined, Word cannot be shifted
// but Identifier can be. This tells us we're at the RHS of a DotGet.
const canShiftIdentifier = stack.canShift(Identifier)
const canShiftWord = stack.canShift(Word)
const inDotGetChain = canShiftIdentifier && !canShiftWord
// continue if we're inside a DotGet
return inDotGetChain ? IdentifierBeforeDot : null
}
// Decide between AssignableIdentifier and Identifier using grammar state + peek-ahead
const chooseIdentifierToken = (input: InputStream, stack: Stack): number => {
const canAssignable = stack.canShift(AssignableIdentifier)
const canRegular = stack.canShift(Identifier)
// Only one option is valid - use it
if (canAssignable && !canRegular) return AssignableIdentifier
if (canRegular && !canAssignable) return Identifier
// Both possible (ambiguous context) - peek ahead for '=' to disambiguate
// This happens at statement start where both `x = 5` (assign) and `echo x` (call) are valid
let peekPos = 0
while (true) {
const ch = getFullCodePoint(input, peekPos)
if (isWhiteSpace(ch)) {
peekPos += getCharSize(ch)
} else {
break
}
}
const nextCh = getFullCodePoint(input, peekPos)
const nextCh2 = getFullCodePoint(input, peekPos + 1)
const nextCh3 = getFullCodePoint(input, peekPos + 2)
// Check for ??= (three-character compound operator)
if (nextCh === 63 /* ? */ && nextCh2 === 63 /* ? */ && nextCh3 === 61 /* = */) {
const charAfterOp = getFullCodePoint(input, peekPos + 3)
if (isWhiteSpace(charAfterOp) || charAfterOp === -1 /* EOF */) {
return AssignableIdentifier
}
}
// Check for compound assignment operators: +=, -=, *=, /=, %=
if (
[43 /* + */, 45 /* - */, 42 /* * */, 47 /* / */, 37 /* % */].includes(nextCh) &&
nextCh2 === 61 /* = */
) {
// Found compound operator, check if it's followed by whitespace
const charAfterOp = getFullCodePoint(input, peekPos + 2)
if (isWhiteSpace(charAfterOp) || charAfterOp === -1 /* EOF */) {
return AssignableIdentifier
}
}
if (nextCh === 61 /* = */) {
// Found '=', but check if it's followed by whitespace
// If '=' is followed by non-whitespace (like '=cool*'), it won't be tokenized as Eq
// In that case, this should be Identifier (for function call), not AssignableIdentifier
const charAfterEquals = getFullCodePoint(input, peekPos + 1)
if (isWhiteSpace(charAfterEquals) || charAfterEquals === -1 /* EOF */) {
return AssignableIdentifier
}
}
return Identifier
}
// Character classification helpers
export const isIdentStart = (ch: number): boolean => {
return isLowercaseLetter(ch) || isEmojiOrUnicode(ch)
}
export const isIdentChar = (ch: number): boolean => {
return isLowercaseLetter(ch) || isDigit(ch) || ch === 45 /* - */ || ch === 63 /* ? */ || isEmojiOrUnicode(ch)
}
const isWhiteSpace = (ch: number): boolean => {
return ch === 32 /* space */ || ch === 9 /* tab */ || ch === 13 /* \r */
}
const isWordChar = (ch: number): boolean => {
return (
!isWhiteSpace(ch) &&
ch !== 10 /* \n */ &&
ch !== 41 /* ) */ &&
ch !== 93 /* ] */ &&
ch !== -1 /* EOF */
)
}
const isLowercaseLetter = (ch: number): boolean => {
return ch >= 97 && ch <= 122 // a-z
}
const isDigit = (ch: number): boolean => {
return ch >= 48 && ch <= 57 // 0-9
}
const getFullCodePoint = (input: InputStream, pos: number): number => {
const ch = input.peek(pos)
// Check if this is a high surrogate (0xD800-0xDBFF)
if (ch >= 0xd800 && ch <= 0xdbff) {
const low = input.peek(pos + 1)
// Check if next is low surrogate (0xDC00-0xDFFF)
if (low >= 0xdc00 && low <= 0xdfff) {
// Combine surrogate pair into full code point
return 0x10000 + ((ch & 0x3ff) << 10) + (low & 0x3ff)
}
}
return ch
}
const isEmojiOrUnicode = (ch: number): boolean => {
return (
// Basic Emoticons
(ch >= 0x1f600 && ch <= 0x1f64f) ||
// Miscellaneous Symbols and Pictographs
(ch >= 0x1f300 && ch <= 0x1f5ff) ||
// Transport and Map Symbols
(ch >= 0x1f680 && ch <= 0x1f6ff) ||
// Regional Indicator Symbols (flags)
(ch >= 0x1f1e6 && ch <= 0x1f1ff) ||
// Miscellaneous Symbols (hearts, stars, weather)
(ch >= 0x2600 && ch <= 0x26ff) ||
// Dingbats (scissors, pencils, etc)
(ch >= 0x2700 && ch <= 0x27bf) ||
// Supplemental Symbols and Pictographs (newer emojis)
(ch >= 0x1f900 && ch <= 0x1f9ff) ||
// Symbols and Pictographs Extended-A (newest emojis)
(ch >= 0x1fa70 && ch <= 0x1faff) ||
// Various Asian Characters with emoji presentation
(ch >= 0x1f018 && ch <= 0x1f270) ||
// Variation Selectors (for emoji presentation)
(ch >= 0xfe00 && ch <= 0xfe0f) ||
// Additional miscellaneous items
(ch >= 0x238c && ch <= 0x2454) ||
// Combining Diacritical Marks for Symbols
(ch >= 0x20d0 && ch <= 0x20ff) ||
// Latin-1 Supplement (includes ², ³, ¹ and other special chars)
(ch >= 0x00a0 && ch <= 0x00ff) ||
// Greek and Coptic (U+0370-U+03FF)
(ch >= 0x0370 && ch <= 0x03ff) ||
// Mathematical Alphanumeric Symbols (U+1D400-U+1D7FF)
(ch >= 0x1d400 && ch <= 0x1d7ff) ||
// Mathematical Operators (U+2200-U+22FF)
(ch >= 0x2200 && ch <= 0x22ff) ||
// Superscripts and Subscripts (U+2070-U+209F)
(ch >= 0x2070 && ch <= 0x209f) ||
// Arrows (U+2190-U+21FF)
(ch >= 0x2190 && ch <= 0x21ff) ||
// Hiragana (U+3040-U+309F)
(ch >= 0x3040 && ch <= 0x309f) ||
// Katakana (U+30A0-U+30FF)
(ch >= 0x30a0 && ch <= 0x30ff) ||
// CJK Unified Ideographs (U+4E00-U+9FFF)
(ch >= 0x4e00 && ch <= 0x9fff)
)
}
const getCharSize = (ch: number) => (ch > 0xffff ? 2 : 1) // emoji takes 2 UTF-16 code units
export const pipeStartsLineTokenizer = new ExternalTokenizer((input: InputStream, stack: Stack) => {
const ch = input.peek(0)
if (ch !== 10 /* \n */) return
// ignore whitespace
let offset = 1
let lastNewlineOffset = 0
while (true) {
const ch = input.peek(offset)
if (ch === 10 /* \n */) {
lastNewlineOffset = offset
offset++
} else if (isWhiteSpace(ch)) {
offset++
} else {
break
}
}
// look for pipe after skipping empty lines
if (input.peek(offset) === 124 /* | */) {
input.advance(lastNewlineOffset + 1)
input.acceptToken(pipeStartsLine)
} else {
input.advance(1)
input.acceptToken(newline)
}
})

View File

@ -93,7 +93,7 @@ const operators = new Set([
])
const keywords = new Set([
'import',
'use',
'end',
'do',
'if',
@ -104,7 +104,6 @@ const keywords = new Set([
'catch',
'finally',
'throw',
'not',
])
// helper
@ -475,12 +474,12 @@ const isStringDelim = (ch: number): boolean => {
return ch === c`'` || ch === c`"`
}
export const isIdentStart = (char: number | string): boolean => {
const isIdentStart = (char: number | string): boolean => {
let ch = typeof char === 'string' ? char.charCodeAt(0) : char
return isLowercaseLetter(ch) || isEmojiOrUnicode(ch) || ch === 36 /* $ */
}
export const isIdentChar = (char: number | string): boolean => {
const isIdentChar = (char: number | string): boolean => {
let ch = typeof char === 'string' ? char.charCodeAt(0) : char
return isIdentStart(ch) || isDigit(ch) || ch === 45 /* - */ || ch === 63 /* ? */
}
@ -508,7 +507,6 @@ const isWordChar = (ch: number): boolean => {
!isWhitespace(ch) &&
ch !== 10 /* \n */ &&
ch !== 59 /* ; */ &&
ch !== 40 /* ( */ &&
ch !== 41 /* ) */ &&
ch !== 93 /* ] */ &&
ch !== -1 /* EOF */

View File

@ -1,12 +0,0 @@
export const date = {
now: () => Date.now(),
year: (time: number) => (new Date(time)).getFullYear(),
month: (time: number) => (new Date(time)).getMonth(),
date: (time: number) => (new Date(time)).getDate(),
hour: (time: number) => (new Date(time)).getHours(),
minute: (time: number) => (new Date(time)).getMinutes(),
second: (time: number) => (new Date(time)).getSeconds(),
ms: (time: number) => (new Date(time)).getMilliseconds(),
new: (year: number, month: number, day: number, hour = 0, minute = 0, second = 0, ms = 0) =>
new Date(year, month, day, hour, minute, second, ms).getTime()
}

View File

@ -6,7 +6,6 @@ import {
extractParamInfo, isWrapped, getOriginalFunction,
} from 'reefvm'
import { date } from './date'
import { dict } from './dict'
import { fs } from './fs'
import { json } from './json'
@ -14,10 +13,8 @@ import { load } from './load'
import { list } from './list'
import { math } from './math'
import { str } from './str'
import { types } from './types'
export const globals: Record<string, any> = {
date,
export const globals = {
dict,
fs,
json,
@ -37,6 +34,7 @@ export const globals: Record<string, any> = {
name: Bun.argv[2] || '(shrimp)',
path: resolve(join('.', Bun.argv[2] ?? ''))
},
},
// hello
@ -86,9 +84,20 @@ export const globals: Record<string, any> = {
exit: (num: number) => process.exit(num ?? 0),
// type predicates
'string?': (v: any) => toValue(v).type === 'string',
'number?': (v: any) => toValue(v).type === 'number',
'boolean?': (v: any) => toValue(v).type === 'boolean',
'array?': (v: any) => toValue(v).type === 'array',
'dict?': (v: any) => toValue(v).type === 'dict',
'function?': (v: any) => {
const t = toValue(v).type
return t === 'function' || t === 'native'
},
'null?': (v: any) => toValue(v).type === 'null',
'some?': (v: any) => toValue(v).type !== 'null',
// boolean/logic
not: (v: any) => !v,
bnot: (n: number) => ~(n | 0),
// utilities
@ -204,7 +213,3 @@ export function formatValue(value: Value, inner = false): string {
return String(value)
}
}
// add types functions to top-level namespace
for (const [key, value] of Object.entries(types))
globals[key] = value

View File

@ -16,10 +16,7 @@ export const math = {
if (n < 0) throw new Error(`sqrt: cannot take square root of negative number ${n}`)
return Math.sqrt(n)
},
random: (min = 0, max = 1) => {
if (min === 0 && max === 1) return Math.random()
return Math.floor(Math.random() * (max - min + 1)) + min
},
random: () => Math.random(),
clamp: (n: number, min: number, max: number) => {
if (min > max) throw new Error(`clamp: min (${min}) must be less than or equal to max (${max})`)
return Math.min(Math.max(n, min), max)

View File

@ -28,16 +28,6 @@ export const str = {
},
'pad-start': (str: string, length: number, pad: string = ' ') => String(str ?? '').padStart(length, pad),
'pad-end': (str: string, length: number, pad: string = ' ') => String(str ?? '').padEnd(length, pad),
capitalize: (str: string) => {
const s = String(str ?? '')
return s.charAt(0).toUpperCase() + s.slice(1).toLowerCase()
},
titlecase: (s: string) => {
return String(s ?? '')
.split(' ')
.map(str.capitalize)
.join(' ')
},
lines: (str: string) => String(str ?? '').split('\n'),
chars: (str: string) => String(str ?? '').split(''),

View File

@ -1,170 +0,0 @@
import { expect, describe, test } from 'bun:test'
describe('date', () => {
test('date.now returns current timestamp', () => {
expect(`date.now | number?`).toEvaluateTo(true)
expect(`(date.now) > 1577836800000`).toEvaluateTo(true)
})
test('date.new creates timestamp from components', () => {
expect(`
t = date.new 2024 0 1 12 0 0 500
[
(date.year t)
(date.month t)
(date.date t)
(date.hour t)
(date.minute t)
(date.second t)
(date.ms t)
]
`).toEvaluateTo([2024, 0, 1, 12, 0, 0, 500])
})
test('date.new with minimal arguments', () => {
expect(`
t = date.new 2024 5 15
[
(date.year t)
(date.month t)
(date.date t)
(date.hour t)
(date.minute t)
(date.second t)
(date.ms t)
]
`).toEvaluateTo([2024, 5, 15, 0, 0, 0, 0])
})
test('date.year extracts year', () => {
expect(`
t = date.new 2024 0 1
date.year t
`).toEvaluateTo(2024)
expect(`
t = date.new 1999 11 31
date.year t
`).toEvaluateTo(1999)
})
test('date.month extracts month (0-indexed)', () => {
// January = 0, December = 11
expect(`
jan = date.new 2024 0 1
dec = date.new 2024 11 31
[(date.month jan) (date.month dec)]
`).toEvaluateTo([0, 11])
})
test('date.date extracts day of month', () => {
expect(`
t = date.new 2024 5 15
date.date t
`).toEvaluateTo(15)
expect(`
date.new 2024 0 1 | date.date
`).toEvaluateTo(1)
})
test('date.hour extracts hour', () => {
expect(`
t = date.new 2024 0 1 14 30 45
date.hour t
`).toEvaluateTo(14)
expect(`
t = date.new 2024 0 1 0 0 0
date.hour t
`).toEvaluateTo(0)
})
test('date.minute extracts minute', () => {
expect(`
t = date.new 2024 0 1 14 30 45
date.minute t
`).toEvaluateTo(30)
})
test('date.second extracts second', () => {
expect(`
t = date.new 2024 0 1 14 30 45
date.second t
`).toEvaluateTo(45)
})
test('date.ms extracts milliseconds', () => {
expect(`
t = date.new 2024 0 1 14 30 45 250
date.ms t
`).toEvaluateTo(250)
})
test('round-trip: create and extract components', () => {
expect(`
t = date.new 2024 6 4 15 30 45 123
year = date.year t
month = date.month t
day = date.date t
hour = date.hour t
min = date.minute t
sec = date.second t
ms = date.ms t
[year month day hour min sec ms]
`).toEvaluateTo([2024, 6, 4, 15, 30, 45, 123])
})
test('edge cases - midnight', () => {
expect(`
t = date.new 2024 0 1 0 0 0 0
[
(date.hour t)
(date.minute t)
(date.second t)
(date.ms t)
]
`).toEvaluateTo([0, 0, 0, 0])
})
test('edge cases - end of day', () => {
expect(`
t = date.new 2024 0 1 23 59 59 999
[
(date.hour t)
(date.minute t)
(date.second t)
(date.ms t)
]
`).toEvaluateTo([23, 59, 59, 999])
})
test('edge cases - leap year', () => {
expect(`
t = date.new 2024 1 29
[
(date.year t)
(date.month t)
(date.date t)
]
`).toEvaluateTo([2024, 1, 29])
})
test('combining date functions with arithmetic', () => {
expect(`
t = date.new 2024 5 15 10 30 0
next-hour = date.new 2024 5 15 11 30 0
(date.hour next-hour) - (date.hour t)
`).toEvaluateTo(1)
})
test('using date.now in calculations', () => {
// Check that date.now is in the past compared to a future timestamp
expect(`
now = (date.now)
future = date.new 2030 0 1
future > now
`).toEvaluateTo(true)
})
})

View File

@ -16,18 +16,6 @@ describe('string operations', () => {
await expect(`str.trim '\\n\\thello\\t\\n'`).toEvaluateTo('hello')
})
test('capitalize makes first char uppercase', async () => {
await expect(`str.capitalize 'hello'`).toEvaluateTo('Hello')
await expect(`str.capitalize 'HELLO'`).toEvaluateTo('Hello')
await expect(`str.capitalize 'hello world'`).toEvaluateTo('Hello world')
})
test('titlecase capitalizes each word', async () => {
await expect(`str.titlecase 'hello world'`).toEvaluateTo('Hello World')
await expect(`str.titlecase 'HELLO WORLD'`).toEvaluateTo('Hello World')
await expect(`str.titlecase 'the quick brown fox'`).toEvaluateTo('The Quick Brown Fox')
})
test('split divides string by separator', async () => {
await expect(`str.split 'a,b,c' ','`).toEvaluateTo(['a', 'b', 'c'])
await expect(`str.split 'hello' ''`).toEvaluateTo(['h', 'e', 'l', 'l', 'o'])
@ -117,17 +105,6 @@ describe('boolean logic', () => {
await expect(`not 42`).toEvaluateTo(false)
await expect(`not null`).toEvaluateTo(true)
})
test('not works with function calls', async () => {
await expect(`equals = do x y: x == y end; not equals 5 5`).toEvaluateTo(false)
await expect(`equals = do x y: x == y end; not equals 5 10`).toEvaluateTo(true)
})
test('not works with binary operations and comparisons', async () => {
await expect(`not 5 > 10`).toEvaluateTo(true)
await expect(`not 10 > 5`).toEvaluateTo(false)
await expect(`not true and false`).toEvaluateTo(true)
})
})
describe('utilities', () => {

View File

@ -1,143 +0,0 @@
import { expect, describe, test } from 'bun:test'
describe('type predicates', () => {
test('boolean? checks if value is boolean', async () => {
await expect(`boolean? true`).toEvaluateTo(true)
await expect(`boolean? false`).toEvaluateTo(true)
await expect(`boolean? 42`).toEvaluateTo(false)
await expect(`boolean? 'hello'`).toEvaluateTo(false)
await expect(`boolean? null`).toEvaluateTo(false)
await expect(`boolean? [1 2 3]`).toEvaluateTo(false)
})
test('number? checks if value is number', async () => {
await expect(`number? 42`).toEvaluateTo(true)
await expect(`number? 3.14`).toEvaluateTo(true)
await expect(`number? 0`).toEvaluateTo(true)
await expect(`number? -5`).toEvaluateTo(true)
await expect(`number? 'hello'`).toEvaluateTo(false)
await expect(`number? true`).toEvaluateTo(false)
await expect(`number? null`).toEvaluateTo(false)
})
test('string? checks if value is string', async () => {
await expect(`string? 'hello'`).toEvaluateTo(true)
await expect(`string? ''`).toEvaluateTo(true)
await expect(`string? world`).toEvaluateTo(true)
await expect(`string? 42`).toEvaluateTo(false)
await expect(`string? true`).toEvaluateTo(false)
await expect(`string? null`).toEvaluateTo(false)
await expect(`string? [1 2 3]`).toEvaluateTo(false)
})
test('array? checks if value is array', async () => {
await expect(`array? [1 2 3]`).toEvaluateTo(true)
await expect(`array? []`).toEvaluateTo(true)
await expect(`array? ['a' 'b']`).toEvaluateTo(true)
await expect(`array? [a=1 b=2]`).toEvaluateTo(false)
await expect(`array? 42`).toEvaluateTo(false)
await expect(`array? 'hello'`).toEvaluateTo(false)
await expect(`array? null`).toEvaluateTo(false)
})
test('list? is alias for array?', async () => {
await expect(`list? [1 2 3]`).toEvaluateTo(true)
await expect(`list? []`).toEvaluateTo(true)
await expect(`list? [a=1 b=2]`).toEvaluateTo(false)
})
test('dict? checks if value is dict', async () => {
await expect(`dict? [a=1 b=2]`).toEvaluateTo(true)
await expect(`dict? [=]`).toEvaluateTo(true)
await expect(`dict? [1 2 3]`).toEvaluateTo(false)
await expect(`dict? []`).toEvaluateTo(false)
await expect(`dict? 42`).toEvaluateTo(false)
await expect(`dict? 'hello'`).toEvaluateTo(false)
})
test('function? checks if value is function', async () => {
await expect(`
my-fn = do x: x * 2 end
function? my-fn
`).toEvaluateTo(true)
await expect(`function? inc`).toEvaluateTo(true)
await expect(`function? list.map`).toEvaluateTo(true)
await expect(`function? 42`).toEvaluateTo(false)
await expect(`function? 'hello'`).toEvaluateTo(false)
await expect(`function? [1 2 3]`).toEvaluateTo(false)
})
test('null? checks if value is null', async () => {
await expect(`null? null`).toEvaluateTo(true)
await expect(`null? 0`).toEvaluateTo(false)
await expect(`null? false`).toEvaluateTo(false)
await expect(`null? ''`).toEvaluateTo(false)
await expect(`null? []`).toEvaluateTo(false)
})
})
describe('type coercion', () => {
test('boolean coerces to boolean', async () => {
await expect(`boolean true`).toEvaluateTo(true)
await expect(`boolean false`).toEvaluateTo(false)
await expect(`boolean 1`).toEvaluateTo(true)
await expect(`boolean 0`).toEvaluateTo(false)
await expect(`boolean 'hello'`).toEvaluateTo(true)
await expect(`boolean ''`).toEvaluateTo(false)
await expect(`boolean null`).toEvaluateTo(false)
await expect(`boolean [1 2 3]`).toEvaluateTo(true)
})
test('number coerces to number', async () => {
await expect(`number 42`).toEvaluateTo(42)
await expect(`number '42'`).toEvaluateTo(42)
await expect(`number '3.14'`).toEvaluateTo(3.14)
await expect(`number true`).toEvaluateTo(1)
await expect(`number false`).toEvaluateTo(0)
})
test('string coerces to string', async () => {
await expect(`string 'hello'`).toEvaluateTo('hello')
await expect(`string 42`).toEvaluateTo('42')
await expect(`string true`).toEvaluateTo('true')
await expect(`string false`).toEvaluateTo('false')
await expect(`string null`).toEvaluateTo('null')
})
})
describe('type predicates in conditionals', () => {
test('using type predicates in if statements', async () => {
await expect(`
x = 42
if (number? x):
'is-num'
else:
'not-num'
end
`).toEvaluateTo('is-num')
})
test('filtering by type', async () => {
await expect(`
items = [1 'hello' 2 'world' 3]
list.filter items number?
`).toEvaluateTo([1, 2, 3])
})
test('filtering strings', async () => {
await expect(`
items = [1 'hello' 2 'world' 3]
list.filter items string?
`).toEvaluateTo(['hello', 'world'])
})
test('checking for functions', async () => {
await expect(`
double = do x: x * 2 end
not-fn = 42
is-fn = function? double
is-not-fn = function? not-fn
is-fn and (not is-not-fn)
`).toEvaluateTo(true)
})
})

View File

@ -1,22 +0,0 @@
import { toValue } from 'reefvm'
export const types = {
'boolean?': (v: any) => toValue(v).type === 'boolean',
boolean: (v: any) => Boolean(v),
'number?': (v: any) => toValue(v).type === 'number',
number: (v: any) => Number(v),
'string?': (v: any) => toValue(v).type === 'string',
string: (v: any) => String(v),
'array?': (v: any) => toValue(v).type === 'array',
'list?': (v: any) => toValue(v).type === 'array',
'dict?': (v: any) => toValue(v).type === 'dict',
'function?': (v: any) => ['function', 'native'].includes(toValue(v).type),
'null?': (v: any) => toValue(v).type === 'null',
}

View File

@ -2,13 +2,36 @@ import { expect } from 'bun:test'
import { diffLines } from 'diff'
import color from 'kleur'
import { Scanner, TokenType, type Token } from '#parser/tokenizer2'
import { parse, setGlobals } from '#parser/parser2'
import { Tree } from '#parser/node'
import { parser } from '#parser/shrimp'
import { setGlobals } from '#parser/tokenizer'
import { parse } from '#parser/parser2'
import { globals as prelude } from '#prelude'
import { $ } from 'bun'
import { assert, errorMessage } from '#utils/utils'
import { Compiler } from '#compiler/compiler'
import { run, VM } from 'reefvm'
import { treeToString2, VMResultToValue } from '#utils/tree'
import { treeToString2, treeToString, VMResultToValue } from '#utils/tree'
const regenerateParser = async () => {
let generate = true
try {
const grammarStat = await Bun.file('./src/parser/shrimp.grammar').stat()
const tokenizerStat = await Bun.file('./src/parser/tokenizer.ts').stat()
const parserStat = await Bun.file('./src/parser/shrimp.ts').stat()
if (grammarStat.mtime <= parserStat.mtime && tokenizerStat.mtime <= parserStat.mtime) {
generate = false
}
} catch (e) {
console.error('Error checking or regenerating parser:', e)
} finally {
if (generate) {
await $`bun generate-parser`
}
}
}
await regenerateParser()
// Type declaration for TypeScript
declare module 'bun:test' {
@ -50,8 +73,7 @@ expect.extend({
assert(typeof received === 'string', 'toFailParse can only be used with string values')
try {
const node = parse(received)
const tree = new Tree(node)
const tree = parser.parse(received)
let hasErrors = false
tree.iterate({
enter(n) {
@ -68,7 +90,7 @@ expect.extend({
pass: true,
}
} else {
const actual = treeToString2(node, received)
const actual = treeToString(tree, received)
return {
message: () => `Expected input to fail parsing, but it parsed successfully:\n${actual}`,
pass: false,

View File

@ -8,57 +8,6 @@ export type CompletionMetadata = {
export const completions = {
modules: {
"date": {
"now": {
"params": []
},
"year": {
"params": [
"time"
]
},
"month": {
"params": [
"time"
]
},
"date": {
"params": [
"time"
]
},
"hour": {
"params": [
"time"
]
},
"minute": {
"params": [
"time"
]
},
"second": {
"params": [
"time"
]
},
"ms": {
"params": [
"time"
]
},
"new": {
"params": [
"year",
"month",
"day",
"hour",
"minute",
"second",
"ms"
]
}
},
"dict": {
"keys": {
"params": [
@ -580,10 +529,7 @@ export const completions = {
]
},
"random": {
"params": [
"min",
"max"
]
"params": []
},
"clamp": {
"params": [
@ -739,16 +685,6 @@ export const completions = {
"pad"
]
},
"capitalize": {
"params": [
"str"
]
},
"titlecase": {
"params": [
"s"
]
},
"lines": {
"params": [
"str"

View File

@ -6,9 +6,7 @@ export const PRELUDE_NAMES = [
"array?",
"at",
"bnot",
"boolean",
"boolean?",
"date",
"dec",
"describe",
"dict",
@ -26,18 +24,15 @@ export const PRELUDE_NAMES = [
"json",
"length",
"list",
"list?",
"load",
"math",
"not",
"null?",
"number",
"number?",
"range",
"ref",
"some?",
"str",
"string",
"string?",
"type",
"var",

View File

@ -17,6 +17,7 @@ import {
CompletionItemKind,
TextDocumentChangeEvent,
} from 'vscode-languageserver/node'
import { setGlobals } from '../../../src/parser/tokenizer'
import { globals } from '../../../src/prelude'
// Initialize parser with prelude globals so it knows dict/list/str are in scope

View File

@ -0,0 +1,41 @@
import { parser } from '../../src/parser/shrimp'
import { setGlobals } from '../../src/parser/tokenizer'
import { PRELUDE_NAMES } from '../server/src/prelude-names'
// Set globals for DotGet detection
setGlobals(PRELUDE_NAMES as unknown as string[])
// Test cases - does incomplete DotGet parse correctly?
const testCases = [
'dict.',
'dict.g',
'dict.get',
'$.',
'$.e',
'$.env',
]
for (const code of testCases) {
console.log(`\nTesting: "${code}"`)
const tree = parser.parse(code)
const cursor = tree.cursor()
// Print the parse tree
const printTree = (depth = 0) => {
const indent = ' '.repeat(depth)
console.log(`${indent}${cursor.name} [${cursor.from}-${cursor.to}]`)
if (cursor.firstChild()) {
do {
printTree(depth + 1)
} while (cursor.nextSibling())
cursor.parent()
}
}
printTree()
// Check at cursor position (end of string)
const node = tree.resolveInner(code.length, -1)
console.log(`Node at end: ${node.name} (type: ${node.type.id})`)
}