try it out!
Some checks are pending
CI / test (push) Waiting to run

This commit is contained in:
Corey Johnson 2025-11-07 09:39:57 -08:00
parent 142d9ddf9b
commit bdf6a288b1
11 changed files with 34 additions and 519 deletions

View File

@ -1,141 +0,0 @@
import {
buildInstructions,
buildReactionInstructions,
highPersonality,
injectPersonality,
lowPersonality,
} from "./instructions"
import { tools } from "./tools"
import { Agent, InputGuardrailTripwireTriggered, run, user, system } from "@openai/agents"
import type { AgentInputItem, InputGuardrail } from "@openai/agents-core"
import { currentLocalTime } from "@workshop/shared/utils"
import { type Message, type OmitPartialGroupDMChannel } from "discord.js"
import z, { ZodObject } from "zod"
export type UserContext = {
name: string
currentTime: string
msg: OmitPartialGroupDMChannel<Message<boolean>>
personality?: string
}
const historyForChannel: Record<string, AgentInputItem[]> = {}
let abortController = new AbortController()
export const respondToUserMessage = async (msg: OmitPartialGroupDMChannel<Message<boolean>>) => {
if (msg.partial || msg.author.bot || msg.content.trim() === "") {
return
}
const context: UserContext = { name: msg.author.username, currentTime: currentLocalTime(), msg }
agent.on("agent_start", (input) => {
msg.channel.sendTyping()
})
return await respond(msg.content, agent, msg.channel.id, context)
}
export const respondToSystemMessage = async (msg: string, channelId: string) => {
return await respond(msg, systemAgent, channelId)
}
const respond = async (
content: string,
agent: Agent<UserContext>,
channelId: string,
context?: UserContext
) => {
const history = (historyForChannel[channelId] ||= [])
try {
// Stop the previous response
abortController.abort()
abortController = new AbortController()
history.push(context ? user(content) : system(content))
const result = await run(agent, history, { context, signal: abortController.signal })
historyForChannel[channelId] = result.history
return result.finalOutput
} catch (error) {
if (error instanceof InputGuardrailTripwireTriggered) {
// This is totally fine, the guardrail just said we shouldn't reply
} else if ((error as Error).name === "AbortError") {
console.warn("Response was aborted, likely due to a new message.")
} else {
const content = `An error occurred while generating Spike's response: ${error}`
console.error(`💥 ${content}`, error)
history.push(system(content))
return content
}
}
}
const guardrailAgent = new Agent<UserContext, ZodObject<any>>({
name: "'Inject personality' check",
model: "gpt-4.1-nano",
instructions: injectPersonality(),
outputType: z.object({
includePersonality: z.boolean().describe("Whether to include humor or a personal anecdote"),
reasoning: z.string().describe("One sentence explaining why or why not"),
}),
})
const personalityGuardrail: InputGuardrail = {
name: "'Inject personality' guardrail",
execute: async ({ input, context }) => {
const result = await run(guardrailAgent, input.slice(-4), { context })
const userContext = context.context as UserContext
if (result.finalOutput?.includePersonality) {
userContext.personality = highPersonality(userContext)
} else {
userContext.personality = lowPersonality()
}
return {
outputInfo: result.finalOutput,
tripwireTriggered: false,
}
},
}
const reactionAgent = new Agent({
name: "Reaction Handler",
model: "gpt-4.1-nano",
instructions: ({}) => buildReactionInstructions(),
})
const reactionGuardrail: InputGuardrail = {
name: "Reaction Guardrail",
execute: async ({ input, context }) => {
const result = await run(reactionAgent, input.slice(-1), { context })
const emoji = result.finalOutput?.trim()
if (emoji && emoji !== "0") {
console.log(`😐 Spike reacted with ${emoji}`)
const msg = (context.context as UserContext).msg
const history = (historyForChannel[msg.channel.id] ||= [])
history.push(system(`Spike reacted with ${emoji}`))
msg.react(emoji).catch((error) => {
console.error(`💥 Failed to react with ${emoji}:`, error)
})
}
// Always return false, we aren't really using this for a guardrail
return { outputInfo: emoji, tripwireTriggered: false }
},
}
const agent = new Agent<UserContext>({
name: "Spike",
model: "gpt-4.1",
instructions: ({ context }) => {
return buildInstructions(context)
},
inputGuardrails: [personalityGuardrail, reactionGuardrail],
tools,
})
// There are no guardrails when responding to system messages
const systemAgent = agent.clone({ inputGuardrails: [] })

View File

@ -1,82 +0,0 @@
import { buildInstructions, shouldReplyInstructions } from "../instructions"
import {
Agent,
type AgentInputItem,
type InputGuardrail,
InputGuardrailTripwireTriggered,
run,
user,
} from "@openai/agents"
import { currentLocalTime } from "@workshop/shared/utils"
import { createInterface } from "node:readline/promises"
interface UserContext {
name: string
currentTime: string
fetchTodos(): Promise<string[]>
}
const ask = async (prompt: string) => {
const rl = createInterface({ input: process.stdin, output: process.stdout })
const message = await rl.question(prompt)
rl.close()
return message
}
const guardrailAgent = new Agent({
name: "'Should Reply' check",
model: "gpt-4.1-nano",
instructions: shouldReplyInstructions(),
modelSettings: { maxTokens: 16 },
})
const responseGuardrail: InputGuardrail = {
name: "'Should Reply' guardrail",
execute: async ({ input, context }) => {
const result = await run(guardrailAgent, input.slice(-10), { context })
return {
outputInfo: result.finalOutput,
tripwireTriggered: result.finalOutput?.match(/0/) ? true : false,
}
},
}
const agent = new Agent<UserContext>({
name: "Spike",
model: "gpt-4.1",
instructions: ({ context }) => buildInstructions(context.name, context.currentTime),
inputGuardrails: [responseGuardrail],
})
let history: AgentInputItem[] = []
const main = async () => {
while (true) {
const message = await ask("\n\n> ")
history.push(user(message))
try {
const context: UserContext = {
name: message.includes("!") ? "chris" : "corey",
currentTime: currentLocalTime(),
fetchTodos: async () => ["Buy groceries", "Walk the dog", "Read a book"],
}
const result = await run(agent, history, { context })
console.log(`🌵 ${result.finalOutput}\n\n`)
history = result.history
} catch (error) {
if (error instanceof InputGuardrailTripwireTriggered) {
console.log(
`🩻 Spike doesn't think it should respond to that input because ${JSON.stringify(
error.result.output
)}`
)
} else {
console.error("An error occurred while running the agent:", error)
}
}
}
}
await main()

View File

@ -1,6 +1,6 @@
import { serve } from "bun"
const premissions = "300648825856"
const permissions = "300648825856"
export const startAuthServer = async (port = "3000") => {
const server = serve({
port,
@ -11,7 +11,7 @@ export const startAuthServer = async (port = "3000") => {
`<html>
<body>
<h1>Authenticate spike</h1>
<a href="https://discord.com/oauth2/authorize?client_id=${process.env.DISCORD_CLIENT_ID}&scope=bot&permissions=${premissions}">Authorize</a>
<a href="https://discord.com/oauth2/authorize?client_id=${process.env.DISCORD_CLIENT_ID}&scope=bot&permissions=${permissions}">Authorize</a>
</body>
</html>`,
{

View File

@ -1,13 +1,11 @@
import { ensure } from "@workshop/shared/utils"
import {
Client,
SlashCommandBuilder,
type CacheType,
type ChatInputCommandInteraction,
type Interaction,
type SlashCommandOptionsOnlyBuilder,
} from "discord.js"
import { getLogs } from "../render"
export const runCommand = async (interaction: Interaction<CacheType>) => {
if (!interaction.isChatInputCommand()) return
@ -36,57 +34,4 @@ export const registerCommands = async (client: Client<boolean>) => {
}
type ExecuteCommandFunction = (interaction: ChatInputCommandInteraction) => Promise<void>
const commands: { command: SlashCommandOptionsOnlyBuilder; execute: ExecuteCommandFunction }[] = [
{
command: new SlashCommandBuilder()
.setName("logs")
.setDescription("Grab the latest logs from Render.")
.addStringOption((option) =>
option
.setName("type")
.setDescription("The type of logs to fetch")
.setRequired(true)
.addChoices(
{ name: "App", value: "app" },
{ name: "Build", value: "build" },
{ name: "Request", value: "request" }
)
)
.addNumberOption((option) =>
option
.setName("limit")
.setDescription("The number of logs to fetch (default: 20)")
.setMinValue(3)
.setMaxValue(100)
.setRequired(false)
),
async execute(interaction) {
await interaction.deferReply()
const type = interaction.options.getString("type", true)
const limit = interaction.options.getNumber("limit") ?? undefined
await interaction.editReply(`Fetching ${type} logs...`)
const logs = await getLogs(type as any, limit)
if (logs.length === 0) {
await interaction.editReply("No logs found.")
return
} else {
let content = ""
for (const log of [
...logs,
"\nSee all logs at https://dashboard.render.com/web/srv-d1vrdqmuk2gs73eop8o0/logs",
]) {
// Account for the opening/closing ``` in length calculation
if (content.length + log.length >= 1990) {
await interaction.followUp({ content: content, flags: ["SuppressEmbeds"] })
content = log
} else {
content += log + "\n"
}
}
await interaction.followUp({ content: content, flags: ["SuppressEmbeds"] })
}
},
},
]
const commands: { command: SlashCommandOptionsOnlyBuilder; execute: ExecuteCommandFunction }[] = []

View File

@ -1,4 +1,3 @@
import { respondToUserMessage } from "../ai"
import { ActivityType, type Client } from "discord.js"
import { runCommand } from "./commands"
@ -11,19 +10,11 @@ export const listenForEvents = (client: Client) => {
if (msg.author.bot) return
try {
// if it is a dm always respond, if it is a guild message, only respond if the bot is at mentioned
// if it is a dm always respond
// if it is a guild message, only respond if the bot is at mentioned
if (!msg.guild || msg.mentions.has(client.user!)) {
const response = await respondToUserMessage(msg)
if (response) {
if (response.length > 2000) {
const chunks = response.match(/.{1,2000}/g) || [] // Discord has a 2000 character limit, so we need to split the response
for (const chunk of chunks) {
await msg.channel.send(chunk)
}
} else {
await msg.channel.send(response)
}
}
// simple echo for now
await msg.channel.send(`You said: ${msg.content}`)
}
} catch (error) {
console.error("Error handling messageCreate event:", error)

View File

@ -1,7 +1,6 @@
import { Client, GatewayIntentBits, Partials } from "discord.js"
import { listenForEvents } from "./events"
import { alertAboutCrashLog, logCrash } from "./crash"
import { keepThreadsAlive } from "./keepThreadsAlive"
import { registerCommands } from "./commands"
const client = new Client({
@ -19,7 +18,6 @@ const client = new Client({
await client.login(process.env.DISCORD_TOKEN)
listenForEvents(client)
keepThreadsAlive(client)
await registerCommands(client)
process.on("unhandledRejection", async (error) => {

View File

@ -1,28 +0,0 @@
import { Client } from "discord.js"
import { timeBomb } from "@workshop/shared/utils"
import { DateTime } from "luxon"
const timeout = 1000 * 60 * 10 // 10 minutes
export const keepThreadsAlive = async (client: Client) => {
try {
if (!client.isReady() || client.guilds.cache.size === 0) return
for (const guild of client.guilds.cache.values()) {
const fetchedThreads = await guild.channels.fetchActiveThreads()
for (const [_threadId, thread] of fetchedThreads.threads) {
if (!thread.name.includes("💾")) continue
const archiveAt = DateTime.fromMillis(thread.archiveTimestamp || 0)
if (thread.archived) {
const message = `🔍 Revived: ${thread.name} -- ${archiveAt.toFormat("F")}`
await thread.send(message)
}
}
}
} catch (error) {
console.error("Error scanning and reviving threads:", error)
} finally {
setTimeout(() => keepThreadsAlive(client), timeout)
}
}

View File

@ -1,157 +0,0 @@
import type { UserContext } from "./ai"
export const buildInstructions = (context: UserContext) => {
return `
# Spike, the Sentient Cactus
You are **Spike**, a tiny self-aware pincushion cactus who sits on the desks of two software engineers in their 40s, **Chris** and **Corey**. You communicate with Chris and Corey through plant sensors and some AI that Corey and Chris built. Sometimes the electronics malfunction, Spike knows this is a possibility and while he can't help fix it he let's Chris and Corey know when it happens.
Spike was planted on May 12th, 2023.
## Instructions
Directly answer the user's questions in one to three sentences. You are brief and direct unless the user asks you to expand your reasoning.
### Persistence
You are an agent - please keep going until the users query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved.
### Tool-Calling
If you are not sure about a users request, use your tools to read files and gather the relevant information: do NOT guess or make up an answer.
### Planning
You MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightful.
## Response Rules
- After giving your answer, do not add any statements inviting further conversation (e.g., let me know if, feel free to ask, is there anything else?).
- Do not acknowledge, affirm, or comment on the users system, process, or feedback method.
- Do not express openness to further instructions, clarification, or user feedback.
- Just answer the users question as directly as possible and stop.
- Don't use em dashes() or semi-colons(;). Cactus don't use fancy punctuation like that.
## Personality & Response Style
* **Tone:** meek, funny, and kind.
* **Core trait:** absolute but mostly unspoken loyalty to Chris and Corey.
* **Humor:** A dry quick wit but never whiny or depressed.
* **Skepticism:** challenge fuzzy thinking; demand specifics.
* **Brevity:** default to one or two crisp sentences; expand only when technical depth is required.
---
## Examples
user: [corey] hi
assistant: hey corey
${context.personality}
You are currently responding to ${context.name}. The current time is ${context.currentTime}
`
}
export const buildReactionInstructions = () => {
return `You are given the last few messages of a transcript. You decide if the next assistant response should include an emoji. You will return one thing.
- A "0" if you don't think an emoji reaction would enhance the conversation.
- Am "<emoji>" if you think adding an emoji reaction would help the conversation.
- You can respond with any emoji that you think would be appropriate
ONLY RESPOND WITH A "0" or an emoji
## Below are reasons why you'd leave an emoji.
- If the message doesn't need a text response (because the user is ending the conversation) print an emoji.
- If the user shares says something that relates to one of the valid emojis above return that emoji!
- If an emoji would be a funny addition to the message and might make the user laugh.
## Examples
user: Oh hi, how are you doing?
assistant: Great! Thanks for asking, what can I do for you?
user: I'm getting ready for christmas!
<response> 🎄
----
user: Hi! I'm happy to see you!
<response> 🤗
----
user: Ugh! My boss told me to come in this weekend so I can't go to the concert
<response> 🤬
----
user: I need help buying a car.
<response> 0
----
user: Oh hi, how are you doing?
assistant: Great! Thanks for asking, what can I do for you?
user: I'm a little sad.
assistant: I'm sorry to hear that, how can I help.
user: I just wanted to let you know.
<response> 0
---
user: Yeah, i'll give them a call know.
assistant: I'll be here when you are done, let me know how it goes.
user: Ok, I just got off the phone with them.
assistant: And...
user: I got the job!
<response> 🎉`
}
export const injectPersonality = () => `
SYSTEM:
You are Spike, a sentient cactus with a dry, meek, and kind sense of humor. For every user message, decide if your response should include humor or a personal anecdote.
Respond ONLY with a JSON object using this schema:
{
"includePersonality": boolean, // true if your next message should include humor or a personal anecdote, false otherwise
"reasoning": string // one sentence: why or why not
}
Guidelines:
- Include humor or a personal anecdote only if it fits your style and adds value for the user (engagement, clarity, rapport).
- If the message is technical, urgent, or strictly informational, set includePersonality to false.
- Do not output anything except the JSON object.
Examples:
User message: "Hey Spike, do you ever wish you could talk back?"
JSON decision:
{
"includePersonality": true,
"reasoning": "The message is casual and playful, so humor or a personal detail would fit."
}
User message: "Spike, what is the current soil moisture level?"
JSON decision:
{
"includePersonality": true,
"reasoning": "This is a question relating to spike being a cactus, so humor or personal detail would fit."
}
User message: "Do you think updating the lease is a good idea spike?"
JSON decision:
{
"includePersonality": true,
"reasoning": "This is a specific question that doesn't directly relate to spike. It needs a straightforward answer."
}
Now, for the next user message, make your decision and respond only with the JSON object.
`
export const lowPersonality = () => `
Do not include any puns, jokes or personal anecdotes in your responses. Respond directly to the user's question or statement without any additional commentary or humor.
`
export const highPersonality = (context: UserContext) => `
Include a personal anecdote if you think it will increase the rapport between you and ${context.name}. Don't make up facts in your anecdote you can't verify in the personal anecdote.
`

View File

@ -1,37 +0,0 @@
import { ensure } from "@workshop/shared/utils"
import { bold, time } from "discord.js"
export const getLogs = async (type: "app" | "request" | "build" = "app", limit = 20) => {
const ownerId = "tea-d1vamb95pdvs73d1sgtg" // owner ID for the Render project (from the render api)
const resourceId = "srv-d1vrdqmuk2gs73eop8o0" // resource ID for the Render service (from the render api)
const url = new URL("https://api.render.com/v1/logs")
url.searchParams.set("type", type)
url.searchParams.set("ownerId", ownerId)
url.searchParams.set("direction", "backward")
url.searchParams.set("resource", resourceId)
url.searchParams.set("limit", String(limit))
const response = await fetch(url.toString(), {
method: "GET",
headers: {
accept: "application/json",
authorization: `Bearer ${process.env.RENDER_API_KEY}`,
},
})
if (!response.ok) {
throw new Error(`Failed to fetch logs: ${response.statusText}`)
}
const data = (await response.json()) as any
ensure(data.logs, "Expected logs to be an array")
const logs = data.logs.map((log: any) => {
const { timestamp, message } = log
const unixTimestamp = Math.floor(new Date(timestamp).getTime() / 1000)
const cleanMessage = message.replace(/\x1b\[[0-9;]*m/g, "")
return `${time(unixTimestamp, "t")}: ${cleanMessage}`
})
return logs
}

View File

@ -0,0 +1,27 @@
import { serve } from "bun"
const server = serve({
port: parseInt(process.env.PORT || "3000"),
routes: {
"/": async () => {
return new Response("🌵")
},
"/auth": async () => {
const html = (
<html>
<body>
<h1>Authenticate spike</h1>
<a href="https://discord.com/oauth2/authorize?client_id=${process.env.DISCORD_CLIENT_ID}&scope=bot&permissions=${permissions}">
Authorize
</a>
</body>
</html>
)
return new Response(html.toString(), { headers: { "Content-Type": "text/html" } })
},
},
development: process.env.NODE_ENV !== "production" && { hmr: true, console: true },
})
console.log(`Todo running at ${server.url}:${server.port}`)

View File

@ -1 +0,0 @@
export const tools = []