This commit is contained in:
Corey Johnson 2025-07-21 12:03:31 -07:00
parent 0721f2ead1
commit 97a4c750ad
15 changed files with 402 additions and 159 deletions

View File

@ -88,7 +88,7 @@
"@workshop/shared": "workspace:*",
"discord.js": "^14.19.3",
"luxon": "^3.6.1",
"zod": "3.25.67",
"zod": "catalog:",
},
"devDependencies": {
"@types/bun": "latest",
@ -147,7 +147,8 @@
"hono": "catalog:",
"luxon": "^3.7.1",
"pngjs": "^7.0.0",
"zod": "3.25.67",
"tailwind": "^4.0.0",
"zod": "catalog:",
},
"devDependencies": {
"@types/bun": "latest",
@ -156,6 +157,7 @@
},
"catalog": {
"hono": "^4.8.0",
"zod": "3.25.67",
},
"packages": {
"@babel/runtime": ["@babel/runtime@7.3.4", "", { "dependencies": { "regenerator-runtime": "^0.12.0" } }, "sha512-IvfvnMdSaLBateu0jfsYIpZTxAc2cKEXEMiezGGN75QcBcecDUKd3PgLAncT0oOgxKy8dd8hrJKj9MfzgfZd6g=="],
@ -836,8 +838,16 @@
"@sapphire/shapeshift/lodash": ["lodash@4.17.21", "", {}, "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="],
"@workshop/nano-remix/@types/bun": ["@types/bun@1.2.19", "", { "dependencies": { "bun-types": "1.2.19" } }, "sha512-d9ZCmrH3CJ2uYKXQIUuZ/pUnTqIvLDS0SK7pFmbx8ma+ziH/FRMoAq5bYpRG7y+w1gl+HgyNZbtqgMq4W4e2Lg=="],
"@workshop/shared/@types/bun": ["@types/bun@1.2.19", "", { "dependencies": { "bun-types": "1.2.19" } }, "sha512-d9ZCmrH3CJ2uYKXQIUuZ/pUnTqIvLDS0SK7pFmbx8ma+ziH/FRMoAq5bYpRG7y+w1gl+HgyNZbtqgMq4W4e2Lg=="],
"@workshop/spike/@types/bun": ["@types/bun@1.2.19", "", { "dependencies": { "bun-types": "1.2.19" } }, "sha512-d9ZCmrH3CJ2uYKXQIUuZ/pUnTqIvLDS0SK7pFmbx8ma+ziH/FRMoAq5bYpRG7y+w1gl+HgyNZbtqgMq4W4e2Lg=="],
"@workshop/whiteboard/@openai/agents": ["@openai/agents@0.0.11", "", { "dependencies": { "@openai/agents-core": "0.0.11", "@openai/agents-openai": "0.0.11", "@openai/agents-realtime": "0.0.11", "debug": "^4.4.0", "openai": "^5.0.1" } }, "sha512-MYSuQ0PptjryTb/BzrqoZB+cajv/p31uF42uXeqkI3s9PihqRttnQBJ1YCTJS/xQCl4f5R9cIradh/o5PpbDkA=="],
"@workshop/whiteboard/@types/bun": ["@types/bun@1.2.19", "", { "dependencies": { "bun-types": "1.2.19" } }, "sha512-d9ZCmrH3CJ2uYKXQIUuZ/pUnTqIvLDS0SK7pFmbx8ma+ziH/FRMoAq5bYpRG7y+w1gl+HgyNZbtqgMq4W4e2Lg=="],
"ajv/fast-deep-equal": ["fast-deep-equal@2.0.1", "", {}, "sha512-bCK/2Z4zLidyB4ReuIsvALH6w31YfAQDmXMqMx6FyfHqvBxtjC0eRumeSu4Bs3XtXwpyIywtSTrVT99BxY1f9w=="],
"amqplib/readable-stream": ["readable-stream@1.1.14", "", { "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.1", "isarray": "0.0.1", "string_decoder": "~0.10.x" } }, "sha512-+MeVjFf4L44XUkhM1eYbD8fyEsxcV81pqMSR5gblfcLCHfZvbrqy4/qYHE+/R5HoBUT11WV5O08Cr1n3YXkWVQ=="],
@ -926,12 +936,20 @@
"@modelcontextprotocol/sdk/raw-body/iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="],
"@workshop/nano-remix/@types/bun/bun-types": ["bun-types@1.2.19", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-uAOTaZSPuYsWIXRpj7o56Let0g/wjihKCkeRqUBhlLVM/Bt+Fj9xTo+LhC1OV1XDaGkz4hNC80et5xgy+9KTHQ=="],
"@workshop/shared/@types/bun/bun-types": ["bun-types@1.2.19", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-uAOTaZSPuYsWIXRpj7o56Let0g/wjihKCkeRqUBhlLVM/Bt+Fj9xTo+LhC1OV1XDaGkz4hNC80et5xgy+9KTHQ=="],
"@workshop/spike/@types/bun/bun-types": ["bun-types@1.2.19", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-uAOTaZSPuYsWIXRpj7o56Let0g/wjihKCkeRqUBhlLVM/Bt+Fj9xTo+LhC1OV1XDaGkz4hNC80et5xgy+9KTHQ=="],
"@workshop/whiteboard/@openai/agents/@openai/agents-core": ["@openai/agents-core@0.0.11", "", { "dependencies": { "@openai/zod": "npm:zod@3.25.40 - 3.25.67", "debug": "^4.4.0", "openai": "^5.0.1" }, "optionalDependencies": { "@modelcontextprotocol/sdk": "^1.12.0" }, "peerDependencies": { "zod": "3.25.40 - 3.25.67" }, "optionalPeers": ["zod"] }, "sha512-kMG/B620fsFAwUe/ounmXty4FuAmWbMWgql4z/gCoER3S6h5tBqNTxffN0MAOFHV3EuPLiqTxA0kGiSdTpDwyA=="],
"@workshop/whiteboard/@openai/agents/@openai/agents-openai": ["@openai/agents-openai@0.0.11", "", { "dependencies": { "@openai/agents-core": "0.0.11", "@openai/zod": "npm:zod@3.25.40 - 3.25.67", "debug": "^4.4.0", "openai": "^5.0.1" } }, "sha512-gqVVDfyD0UYYBkc4kPJgbWzFzayKCKQBHMKHnbMsReZ8/nqHKGEd/hjBiqAZGqDW0BTKNaGfzGB8XAiLWWipnw=="],
"@workshop/whiteboard/@openai/agents/@openai/agents-realtime": ["@openai/agents-realtime@0.0.11", "", { "dependencies": { "@openai/agents-core": "0.0.11", "@openai/zod": "npm:zod@3.25.40 - 3.25.67", "@types/ws": "^8.18.1", "debug": "^4.4.0", "ws": "^8.18.1" } }, "sha512-gVdrKri0dPBOJfsQR6m9rdpBscRZK/efc1zLKqOA2mfmaL0RxI2/LvnyXbwrDGHQ6GEbovULkbWWQ9D4nUafow=="],
"@workshop/whiteboard/@types/bun/bun-types": ["bun-types@1.2.19", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-uAOTaZSPuYsWIXRpj7o56Let0g/wjihKCkeRqUBhlLVM/Bt+Fj9xTo+LhC1OV1XDaGkz4hNC80et5xgy+9KTHQ=="],
"amqplib/readable-stream/string_decoder": ["string_decoder@0.10.31", "", {}, "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ=="],
"body-parser/debug/ms": ["ms@2.0.0", "", {}, "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="],

View File

@ -6,7 +6,8 @@
"packages/*"
],
"catalog": {
"hono": "^4.8.0"
"hono": "^4.8.0",
"zod": "3.25.67"
}
},
"prettier": {

View File

@ -17,7 +17,7 @@
"@workshop/shared": "workspace:*",
"discord.js": "^14.19.3",
"luxon": "^3.6.1",
"zod": "3.25.67"
"zod": "catalog:"
},
"devDependencies": {
"@types/luxon": "^3.6.2",

View File

@ -18,7 +18,8 @@
"hono": "catalog:",
"luxon": "^3.7.1",
"pngjs": "^7.0.0",
"zod": "3.25.67"
"tailwind": "^4.0.0",
"zod": "catalog:"
},
"devDependencies": {
"@types/bun": "latest"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 897 KiB

After

Width:  |  Height:  |  Size: 1007 KiB

View File

@ -21,9 +21,13 @@ export const getGeminiResponse = async <T = StructuredResponse>(
]
const response = await ai.models.generateContent({
model: "gemini-2.5-pro",
model: "gemini-2.5-flash",
// model: "gemini-2.5-flash-lite-preview-06-17",
contents: contents,
config: {
thinkingConfig: {
thinkingBudget: 0,
},
responseMimeType: "application/json",
responseSchema: whiteboardSchema,
},

View File

@ -0,0 +1 @@
@import "tailwindcss"

View File

@ -16,11 +16,12 @@ const categories = [
const prompts = {
default: `Detect all of the of the following objects: ${categories}. The box_2d should be an object with ymin, xmin, ymax, xmax properties normalized to 0-1000.`,
simple: `Detect the 2d bounding boxes of the following objects: ${categories}.`,
specific: `Detect 2d inscribed box for the green circle?`,
}
export const action = async (req: Request, params: {}) => {
const imageBuffer = await Bun.file("public/whiteboard.png").arrayBuffer()
const response = await getGeminiResponse(imageBuffer, prompts.default)
const response = await getGeminiResponse(imageBuffer, prompts.specific)
// return { elements: response?.elements || [] }
// const response = await detectShapes(imageBuffer)

View File

@ -1,7 +1,9 @@
import { ensure } from "@workshop/shared/utils"
import { useRef, useState, useEffect } from "hono/jsx"
import { useAction, submitAction } from "@workshop/nano-remix"
import { join } from "path"
import { useVideo } from "../useVideo"
import { VideoOverlay, type OverlayItem } from "../videoOverlay"
import "../index.css"
export const action = async (req: Request, params: {}) => {
const formData = await req.formData()
@ -30,89 +32,35 @@ export const action = async (req: Request, params: {}) => {
export default function Camera() {
const videoRef = useRef<HTMLVideoElement>(null)
const canvasRef = useRef<HTMLCanvasElement>(null)
const [stream, setStream] = useState<MediaStream | null>(null)
const [error, setError] = useState<string | null>(null)
const [capturedImage, setCapturedImage] = useState<string | null>(null)
const { data, error: uploadError, loading } = useAction<typeof action>()
const [overlays, setOverlays] = useState<OverlayItem[]>([
{
type: "text",
x: 50,
y: 50,
text: "Camera Feed",
fontSize: 24,
color: "yellow",
strokeColor: "black",
},
{
type: "image",
x: 100,
y: 100,
src: "https://picsum.photos/seed/wow/200/300",
},
])
const captureImage = () => {
ensure(videoRef.current, "Video ref must be set before capturing image")
ensure(canvasRef.current, "Canvas ref must be set before capturing image")
const { isRecording, error, toggleRecording } = useVideo(videoRef, {
onCapture: (dataURL) => {
const formData = new FormData()
formData.append("imageData", dataURL)
submitAction(formData)
},
})
const canvas = canvasRef.current
const video = videoRef.current
const maxWidth = 1000
const maxHeight = 1000
const aspectRatio = video.videoWidth / video.videoHeight
let newWidth = maxWidth
let newHeight = maxHeight
if (aspectRatio > 1) {
newHeight = maxWidth / aspectRatio
} else {
newWidth = maxHeight * aspectRatio
}
canvas.width = newWidth
canvas.height = newHeight
const ctx = canvas.getContext("2d")
if (!ctx) return
ctx.clearRect(0, 0, canvas.width, canvas.height)
ctx.drawImage(video, 0, 0, newWidth, newHeight)
const dataURL = canvas.toDataURL("image/png")
setCapturedImage(dataURL)
// Upload the image
const formData = new FormData()
formData.append("imageData", dataURL)
submitAction(formData)
}
const startCamera = async () => {
try {
const mediaStream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: "environment" },
})
if (videoRef.current) {
videoRef.current.srcObject = mediaStream
videoRef.current.onloadedmetadata = () => {
setTimeout(captureImage, 100)
}
}
setStream(mediaStream)
setError(null)
} catch (err) {
setError("Failed to access camera")
}
}
const stopCamera = () => {
if (!stream) return
stream.getTracks().forEach((track) => track.stop())
setStream(null)
setCapturedImage(null)
if (videoRef.current) {
videoRef.current.srcObject = null
}
}
useEffect(() => {
if (!stream) return
const interval = setInterval(() => {
captureImage()
}, 1000)
return () => clearInterval(interval)
}, [stream])
// Update overlays when camera state changes
useEffect(() => {}, [isRecording])
return (
<div class="p-5">
@ -123,33 +71,23 @@ export default function Camera() {
)}
<div class="mb-4">
{!stream ? (
<button onClick={startCamera} class="bg-green-500 text-white px-4 py-2 rounded hover:bg-green-600">
Start Camera
</button>
) : (
<button onClick={stopCamera} class="bg-red-500 text-white px-4 py-2 rounded hover:bg-red-600">
Stop Camera
</button>
)}
<button
onClick={toggleRecording}
class={`px-4 py-2 rounded text-white ${
isRecording ? "bg-red-500 hover:bg-red-600" : "bg-green-500 hover:bg-green-600"
}`}
>
{isRecording ? "Stop Camera" : "Start Camera"}
</button>
</div>
{uploadError && (
<div class="bg-red-100 border border-red-400 text-red-700 px-4 py-3 rounded mb-4">{uploadError}</div>
)}
{data?.success && (
<div class="bg-green-100 border border-green-400 text-green-700 px-4 py-3 rounded mb-4">
Upload successful! File: {data.filename}
</div>
)}
{capturedImage && (
<img src={capturedImage} alt="Captured" class="w-full max-w-lg border-2 border-gray-300 rounded" />
)}
<canvas ref={canvasRef} style={{ display: "none" }} />
<video ref={videoRef} autoPlay muted playsInline class="w-full max-w-lg object-cover" />
<VideoOverlay overlays={overlays} isRecording={isRecording}>
<video ref={videoRef} autoPlay muted playsInline class="w-full max-w-lg object-cover" />
</VideoOverlay>
<script src="https://cdn.jsdelivr.net/npm/eruda"></script>
<script>eruda.init()</script>

View File

@ -1,62 +1,23 @@
import { useEffect, useRef, useState } from "hono/jsx"
import { StreamingResponse } from "../streamingAI"
import { useStreamingAI } from "../useStreamingAI"
import "../index.css"
export default function Voice() {
const [audioError, setAudioError] = useState<string>("")
const [transcript, setTranscript] = useState<string>("")
const [isRecording, setIsRecording] = useState(false)
const streamingResponseRef = useRef<StreamingResponse>(null)
const { audioError, transcript, isRecording, waitingForResponse } = useStreamingAI()
const startRecording = async () => {
setAudioError("")
setTranscript("")
streamingResponseRef.current = new StreamingResponse((error) => setAudioError(error))
await streamingResponseRef.current.start()
setIsRecording(true)
}
const endRecording = async () => {
setIsRecording(false)
try {
const reader = await streamingResponseRef.current!.stop()
const decoder = new TextDecoder()
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
setTranscript((prev) => prev + chunk)
}
} catch (error) {
console.error("Error during streaming:", error)
setAudioError(`Streaming failed: ${error}`)
}
}
useEffect(() => {
return () => endRecording()
}, [])
let recordingStateClass = ""
if (isRecording) recordingStateClass = "border-red-500 border-4"
else if (waitingForResponse) recordingStateClass = "border-yellow-500 border-4"
return (
<div>
{audioError && <p>Audio Error: {audioError}</p>}
<div class={`fixed inset-0 p-5 transition-all duration-300 ${recordingStateClass}`}>
{audioError && <p class="text-red-500">Audio Error: {audioError}</p>}
<div>
<h3>Audio Recording</h3>
<button onClick={isRecording ? endRecording : startRecording}>
{isRecording ? "Stop Recording" : "Start Recording"}
</button>
{isRecording && <p>🎤 Recording...</p>}
<h3 class="text-xl font-bold">Voice Control</h3>
<div class="text-gray-600">Hold Space key to record, release to transcribe</div>
</div>
{transcript && (
<div>
<h4>Transcript:</h4>
<p>{transcript}</p>
</div>
)}
{transcript && <div class="mt-5 bg-white/90 p-4 rounded-lg">{transcript}</div>}
</div>
)
}

View File

@ -3,6 +3,7 @@ import { OpenAI } from "openai"
import { Agent, run, type AgentInputItem } from "@openai/agents"
import fs from "node:fs"
import { getErrorMessage } from "@workshop/shared/errors"
import { tools } from "./tools"
Bun.serve({
port: 3000,
@ -37,6 +38,7 @@ const streamResponse = async (req: Request) => {
name: "Whiteboard Assistant",
model: "gpt-4o",
instructions: "You are a helpful assistant that talks about a whiteboard.",
tools,
})
const imagePath = "public/whiteboard.png"

View File

@ -0,0 +1,14 @@
import { tool } from "@openai/agents"
import z from "zod"
export const tools = [
tool({
name: "embed video",
description: "Embed a video into the whiteboard",
parameters: z.object({ video: z.string().url() }),
execute(input, context) {
const { video } = input
return `Video embedded: ${video}`
},
}),
]

View File

@ -0,0 +1,74 @@
import { useEffect, useRef, useState } from "hono/jsx"
import { StreamingResponse } from "./streamingAI"
export function useStreamingAI() {
const [audioError, setAudioError] = useState<string>("")
const [transcript, setTranscript] = useState<string>("")
const [isRecording, setIsRecording] = useState(false)
const [waitingForResponse, setWaitingForResponse] = useState(false)
const streamingResponseRef = useRef<StreamingResponse>(null)
const startRecording = async () => {
setAudioError("")
setTranscript("")
streamingResponseRef.current = new StreamingResponse((error) => setAudioError(error))
await streamingResponseRef.current.start()
setIsRecording(true)
}
const endRecording = async () => {
setIsRecording(false)
try {
setWaitingForResponse(true)
const reader = await streamingResponseRef.current!.stop()
setWaitingForResponse(false)
const decoder = new TextDecoder()
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
setTranscript((prev) => prev + chunk)
}
} catch (error) {
console.error("Error during streaming:", error)
setAudioError(`Streaming failed: ${error}`)
} finally {
setWaitingForResponse(false)
}
}
useEffect(() => {
const handleKeyDown = (event: KeyboardEvent) => {
if (event.code === "Space" && !event.repeat && !isRecording) {
event.preventDefault()
startRecording()
}
}
const handleKeyUp = (event: KeyboardEvent) => {
if (event.code === "Space" && isRecording) {
event.preventDefault()
endRecording()
}
}
window.addEventListener("keydown", handleKeyDown)
window.addEventListener("keyup", handleKeyUp)
return () => {
window.removeEventListener("keydown", handleKeyDown)
window.removeEventListener("keyup", handleKeyUp)
}
}, [isRecording])
return {
audioError,
transcript,
isRecording,
waitingForResponse,
startRecording,
endRecording
}
}

View File

@ -0,0 +1,107 @@
import { useState, useEffect, type RefObject } from "hono/jsx"
import { ensure } from "@workshop/shared/utils"
interface UseVideoOptions {
captureInterval?: number
onCapture?: (dataUrl: string) => void
}
export function useVideo(videoRef: RefObject<HTMLVideoElement>, options: UseVideoOptions = {}) {
const { captureInterval = 1000, onCapture } = options
const [isRecording, setIsRecording] = useState(false)
const [error, setError] = useState<string | null>(null)
const captureImage = () => {
ensure(videoRef.current, "Video ref must be set before capturing image")
const video = videoRef.current
const canvas = document.createElement("canvas")
const maxWidth = 1000
const maxHeight = 1000
const aspectRatio = video.videoWidth / video.videoHeight
let newWidth = maxWidth
let newHeight = maxHeight
if (aspectRatio > 1) {
newHeight = maxWidth / aspectRatio
} else {
newWidth = maxHeight * aspectRatio
}
canvas.width = newWidth
canvas.height = newHeight
const ctx = canvas.getContext("2d")
if (!ctx) return
ctx.clearRect(0, 0, canvas.width, canvas.height)
ctx.drawImage(video, 0, 0, newWidth, newHeight)
const dataURL = canvas.toDataURL("image/png")
if (onCapture) {
onCapture(dataURL)
}
return dataURL
}
const startCamera = async () => {
try {
const mediaStream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: "environment" },
})
if (videoRef.current) {
videoRef.current.srcObject = mediaStream
videoRef.current.onloadedmetadata = () => {
setTimeout(captureImage, 100)
}
}
setIsRecording(true)
setError(null)
} catch (err) {
setError("Failed to access camera")
setIsRecording(false)
}
}
const stopCamera = () => {
if (!isRecording || !videoRef.current?.srcObject) return
const stream = videoRef.current.srcObject as MediaStream
stream.getTracks().forEach((track) => track.stop())
if (videoRef.current) {
videoRef.current.srcObject = null
}
setIsRecording(false)
}
const toggleRecording = () => {
if (isRecording) {
stopCamera()
} else {
startCamera()
}
}
useEffect(() => {
if (!isRecording) return
const interval = setInterval(() => {
captureImage()
}, captureInterval)
return () => clearInterval(interval)
}, [isRecording, captureInterval])
return {
isRecording,
error,
toggleRecording,
}
}

View File

@ -0,0 +1,121 @@
import { useRef, useEffect } from "hono/jsx"
export interface TextOverlay {
type: "text"
x: number
y: number
text: string
fontSize?: number
fontFamily?: string
color?: string
strokeColor?: string
strokeWidth?: number
}
export interface ImageOverlay {
type: "image"
x: number
y: number
src: string
width?: number
height?: number
}
export type OverlayItem = TextOverlay | ImageOverlay
interface VideoOverlayProps {
overlays: OverlayItem[]
children: any // ick
isRecording?: boolean
}
export function VideoOverlay({ overlays, children, isRecording }: VideoOverlayProps) {
const canvasRef = useRef<HTMLCanvasElement>(null)
const containerRef = useRef<HTMLDivElement>(null)
const drawOverlays = () => {
const canvas = canvasRef.current
const container = containerRef.current
if (!canvas || !container) return
const video = container.querySelector("video")
if (!video) return
const ctx = canvas.getContext("2d")
if (!ctx) return
// Match canvas size to video's rendered size
const rect = video.getBoundingClientRect()
canvas.width = rect.width
canvas.height = rect.height
// Clear canvas
ctx.clearRect(0, 0, canvas.width, canvas.height)
// Draw overlays
for (const overlay of overlays) {
if (overlay.type === "text") {
drawText(ctx, overlay)
} else if (overlay.type === "image") {
drawImage(ctx, overlay)
}
}
}
// Redraw when overlay data changes or recording state changes
useEffect(() => {
setTimeout(() => {
console.log(`🌭 `, canvasRef.current?.width, canvasRef.current?.height)
drawOverlays()
}, 1000)
}, [overlays, isRecording])
// Redraw on window resize (video size might change)
useEffect(() => {
window.addEventListener("resize", drawOverlays)
return () => window.removeEventListener("resize", drawOverlays)
}, [])
const drawText = (ctx: CanvasRenderingContext2D, overlay: TextOverlay) => {
const {
x,
y,
text,
fontSize = 20,
fontFamily = "Arial",
color = "white",
strokeColor = "black",
strokeWidth = 2,
} = overlay
ctx.font = `${fontSize}px ${fontFamily}`
ctx.fillStyle = color
ctx.strokeStyle = strokeColor
ctx.lineWidth = strokeWidth
ctx.strokeText(text, x, y)
ctx.fillText(text, x, y)
}
const drawImage = (ctx: CanvasRenderingContext2D, overlay: ImageOverlay) => {
const { x, y, src, width, height } = overlay
const img = new Image()
img.crossOrigin = "anonymous"
img.onload = () => {
const drawWidth = width || img.width
const drawHeight = height || img.height
ctx.drawImage(img, x, y, drawWidth, drawHeight)
}
img.src = src
}
return (
<div ref={containerRef} class="relative inline-block">
{children}
<canvas ref={canvasRef} class="absolute top-0 left-0 pointer-events-none z-10" />
</div>
)
}