import { Hono } from "hono"; import { cors } from "hono/cors"; import { serve } from "@hono/node-server"; import { BenchmarkQueue, BenchmarkRequest } from "./queue.js"; import { BenchmarkStorage } from "./storage.js"; import { HFDatasetUploader } from "./hf-dataset.js"; import { randomUUID } from "crypto"; import { z } from "zod"; import { config as dotenvConfig } from "dotenv"; // Load environment variables dotenvConfig(); const app = new Hono(); const queue = new BenchmarkQueue(); const storage = new BenchmarkStorage(); // Initialize HF Dataset uploader if configured const hfUploader = new HFDatasetUploader( process.env.HF_DATASET_REPO && process.env.HF_TOKEN ? { repo: process.env.HF_DATASET_REPO, token: process.env.HF_TOKEN, } : undefined ); if (hfUploader.isEnabled()) { console.log(`📤 HF Dataset upload enabled: ${process.env.HF_DATASET_REPO}`); } else { console.log("📤 HF Dataset upload disabled (set HF_DATASET_REPO and HF_TOKEN to enable)"); } // Enable CORS for development app.use("/*", cors()); // Store completed benchmarks to file and upload to HF Dataset queue.on("completed", async (benchmark) => { try { await storage.appendResult(benchmark); console.log(`✓ Benchmark ${benchmark.id} saved to file`); } catch (error) { console.error(`✗ Failed to save benchmark ${benchmark.id}:`, error); } // Upload to HF Dataset if enabled if (hfUploader.isEnabled()) { try { await hfUploader.uploadResult(benchmark); } catch (error) { console.error(`✗ Failed to upload benchmark ${benchmark.id} to HF Dataset:`, error); // Don't fail the whole operation if HF upload fails } } }); queue.on("failed", async (benchmark) => { try { await storage.appendResult(benchmark); console.log(`✗ Failed benchmark ${benchmark.id} saved to file`); } catch (error) { console.error(`✗ Failed to save failed benchmark ${benchmark.id}:`, error); } // Don't upload failed benchmarks to HF Dataset // Only local storage keeps track of failures }); // Log queue events queue.on("added", (benchmark) => { console.log(`📥 Added to queue: ${benchmark.id} (${benchmark.platform}/${benchmark.modelId})`); }); queue.on("started", (benchmark) => { console.log(`🚀 Started: ${benchmark.id}`); }); queue.on("completed", (benchmark) => { console.log(`✅ Completed: ${benchmark.id} in ${(benchmark.completedAt! - benchmark.startedAt!) / 1000}s`); }); queue.on("failed", (benchmark) => { console.log(`❌ Failed: ${benchmark.id} - ${benchmark.error}`); }); // API Endpoints // Zod schema for benchmark request validation const BenchmarkRequestSchema = z.object({ platform: z.enum(["node", "web"]).default("node"), modelId: z.string().min(1, "modelId is required"), task: z.string().min(1, "task is required"), mode: z.enum(["warm", "cold"]).default("warm"), repeats: z.number().int().positive().default(3), dtype: z.enum(["fp32", "fp16", "q8", "int8", "uint8", "q4", "bnb4", "q4f16"]).optional(), batchSize: z.number().int().positive().default(1), device: z.string().default("webgpu"), browser: z.enum(["chromium", "firefox", "webkit"]).default("chromium"), headed: z.boolean().default(false), }); /** * POST /api/benchmark * Submit a new benchmark request */ app.post("/api/benchmark", async (c) => { try { const body = await c.req.json(); const validated = BenchmarkRequestSchema.parse(body); const request: BenchmarkRequest = { id: randomUUID(), platform: validated.platform, modelId: validated.modelId, task: validated.task, mode: validated.mode, repeats: validated.repeats, dtype: validated.dtype, batchSize: validated.batchSize, device: validated.device, browser: validated.browser, headed: validated.headed, timestamp: Date.now(), }; queue.addBenchmark(request); return c.json({ id: request.id, message: "Benchmark queued", position: queue.getQueueStatus().pending, }); } catch (error) { if (error instanceof z.ZodError) { return c.json({ error: "Validation error", details: error.format() }, 400); } return c.json({ error: "Invalid request" }, 400); } }); /** * GET /api/benchmark/:id * Get benchmark status/result by ID */ app.get("/api/benchmark/:id", async (c) => { const id = c.req.param("id"); // Check queue first (for pending/running benchmarks) const queued = queue.getBenchmark(id); if (queued) { return c.json(queued); } // Check storage (for completed benchmarks) const stored = await storage.getResultById(id); if (stored) { return c.json(stored); } return c.json({ error: "Benchmark not found" }, 404); }); /** * GET /api/benchmarks * Get all benchmark results from storage * Query params: * - modelId: Filter by model ID */ app.get("/api/benchmarks", async (c) => { const modelId = c.req.query("modelId"); let results; if (modelId) { results = await storage.getResultsByModel(modelId); } else { results = await storage.getAllResults(); } return c.json({ total: results.length, results, }); }); /** * GET /api/queue * Get current queue status */ app.get("/api/queue", (c) => { const status = queue.getQueueStatus(); const allBenchmarks = queue.getAllBenchmarks(); return c.json({ status, queue: allBenchmarks, }); }); /** * DELETE /api/benchmarks * Clear all stored results */ app.delete("/api/benchmarks", async (c) => { await storage.clearResults(); return c.json({ message: "All results cleared" }); }); /** * GET / * Simple status page */ app.get("/", (c) => { const status = queue.getQueueStatus(); return c.html(`
Pending: ${status.pending} | Running: ${status.running} | Completed: ${status.completed} | Failed: ${status.failed}
/api/benchmark - Submit benchmark request
/api/benchmark/:id - Get benchmark status/result
/api/benchmarks - Get all stored results
/api/queue - Get queue status
/api/benchmarks - Clear all results
curl -X POST http://localhost:3000/api/benchmark \\
-H "Content-Type: application/json" \\
-d '{
"platform": "node",
"modelId": "Xenova/all-MiniLM-L6-v2",
"task": "feature-extraction",
"mode": "warm",
"repeats": 3,
"batchSize": 1
}'
`);
});
const PORT = Number(process.env.PORT) || 7860;
serve({
fetch: app.fetch,
port: PORT,
}, (info) => {
console.log(`
🚀 Benchmark Server running on http://localhost:${info.port}
API Endpoints:
POST /api/benchmark - Submit benchmark
GET /api/benchmark/:id - Get result
GET /api/benchmarks - List all results
GET /api/queue - Queue status
DELETE /api/benchmarks - Clear results
`);
});