Spaces:
Runtime error
Runtime error
Add environmental metrics
Browse files- bench/src/core/sysinfo.ts +37 -0
- bench/src/core/types.ts +6 -1
- bench/src/node/benchmark.ts +3 -0
- bench/src/web/benchmark.ts +106 -38
- bench/src/web/cli.ts +26 -1
- bench/src/web/envinfo.ts +68 -0
bench/src/core/sysinfo.ts
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os from "os";
|
| 2 |
+
|
| 3 |
+
export interface SystemInfo {
|
| 4 |
+
cpu: {
|
| 5 |
+
model: string;
|
| 6 |
+
cores: number;
|
| 7 |
+
threads: number;
|
| 8 |
+
};
|
| 9 |
+
memory: {
|
| 10 |
+
total: string;
|
| 11 |
+
available: string;
|
| 12 |
+
};
|
| 13 |
+
platform: string;
|
| 14 |
+
arch: string;
|
| 15 |
+
nodeVersion: string;
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
export function getSystemInfo(): SystemInfo {
|
| 19 |
+
const cpus = os.cpus();
|
| 20 |
+
const totalMemory = os.totalmem();
|
| 21 |
+
const freeMemory = os.freemem();
|
| 22 |
+
|
| 23 |
+
return {
|
| 24 |
+
cpu: {
|
| 25 |
+
model: cpus[0]?.model || "Unknown",
|
| 26 |
+
cores: os.cpus().length,
|
| 27 |
+
threads: os.cpus().length, // In Node.js, this is the same as logical cores
|
| 28 |
+
},
|
| 29 |
+
memory: {
|
| 30 |
+
total: `${(totalMemory / 1024 / 1024 / 1024).toFixed(2)} GB`,
|
| 31 |
+
available: `${(freeMemory / 1024 / 1024 / 1024).toFixed(2)} GB`,
|
| 32 |
+
},
|
| 33 |
+
platform: os.platform(),
|
| 34 |
+
arch: os.arch(),
|
| 35 |
+
nodeVersion: process.version,
|
| 36 |
+
};
|
| 37 |
+
}
|
bench/src/core/types.ts
CHANGED
|
@@ -16,10 +16,15 @@ export interface BenchmarkResult {
|
|
| 16 |
repeats: number;
|
| 17 |
batchSize: number;
|
| 18 |
dtype?: string;
|
| 19 |
-
metrics
|
| 20 |
load_ms: { p50: number; p90: number; raw: number[] };
|
| 21 |
first_infer_ms: { p50: number; p90: number; raw: number[] };
|
| 22 |
subsequent_infer_ms: { p50: number; p90: number; raw: number[] };
|
| 23 |
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
[key: string]: any;
|
| 25 |
}
|
|
|
|
| 16 |
repeats: number;
|
| 17 |
batchSize: number;
|
| 18 |
dtype?: string;
|
| 19 |
+
metrics?: {
|
| 20 |
load_ms: { p50: number; p90: number; raw: number[] };
|
| 21 |
first_infer_ms: { p50: number; p90: number; raw: number[] };
|
| 22 |
subsequent_infer_ms: { p50: number; p90: number; raw: number[] };
|
| 23 |
};
|
| 24 |
+
error?: {
|
| 25 |
+
type: string;
|
| 26 |
+
message: string;
|
| 27 |
+
stage?: "load" | "inference";
|
| 28 |
+
};
|
| 29 |
[key: string]: any;
|
| 30 |
}
|
bench/src/node/benchmark.ts
CHANGED
|
@@ -4,6 +4,7 @@ import path from "node:path";
|
|
| 4 |
import { BenchmarkOptions, BenchmarkResult } from "../core/types.js";
|
| 5 |
import { BenchmarkRawResult, aggregateMetrics } from "../core/metrics.js";
|
| 6 |
import { ensureEmptyDir } from "./cache.js";
|
|
|
|
| 7 |
|
| 8 |
async function benchOnce(
|
| 9 |
modelId: string,
|
|
@@ -79,6 +80,7 @@ export async function runNodeBenchmark(options: BenchmarkOptions): Promise<Bench
|
|
| 79 |
}
|
| 80 |
|
| 81 |
const metrics = aggregateMetrics(results);
|
|
|
|
| 82 |
|
| 83 |
const result: BenchmarkResult = {
|
| 84 |
platform: "node",
|
|
@@ -90,6 +92,7 @@ export async function runNodeBenchmark(options: BenchmarkOptions): Promise<Bench
|
|
| 90 |
batchSize,
|
| 91 |
cacheDir,
|
| 92 |
metrics,
|
|
|
|
| 93 |
};
|
| 94 |
|
| 95 |
if (dtype) result.dtype = dtype;
|
|
|
|
| 4 |
import { BenchmarkOptions, BenchmarkResult } from "../core/types.js";
|
| 5 |
import { BenchmarkRawResult, aggregateMetrics } from "../core/metrics.js";
|
| 6 |
import { ensureEmptyDir } from "./cache.js";
|
| 7 |
+
import { getSystemInfo } from "../core/sysinfo.js";
|
| 8 |
|
| 9 |
async function benchOnce(
|
| 10 |
modelId: string,
|
|
|
|
| 80 |
}
|
| 81 |
|
| 82 |
const metrics = aggregateMetrics(results);
|
| 83 |
+
const sysInfo = getSystemInfo();
|
| 84 |
|
| 85 |
const result: BenchmarkResult = {
|
| 86 |
platform: "node",
|
|
|
|
| 92 |
batchSize,
|
| 93 |
cacheDir,
|
| 94 |
metrics,
|
| 95 |
+
environment: sysInfo,
|
| 96 |
};
|
| 97 |
|
| 98 |
if (dtype) result.dtype = dtype;
|
bench/src/web/benchmark.ts
CHANGED
|
@@ -2,6 +2,7 @@ import { pipeline } from "@huggingface/transformers";
|
|
| 2 |
import { BenchmarkRawResult, aggregateMetrics } from "../core/metrics.js";
|
| 3 |
import { BenchmarkResult } from "../core/types.js";
|
| 4 |
import { clearCaches } from "./cache.js";
|
|
|
|
| 5 |
|
| 6 |
function now() {
|
| 7 |
return performance.now();
|
|
@@ -13,34 +14,55 @@ async function benchOnce(
|
|
| 13 |
device: string,
|
| 14 |
dtype: string | undefined,
|
| 15 |
batchSize: number
|
| 16 |
-
): Promise<BenchmarkRawResult> {
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
const t3 = now();
|
| 29 |
-
|
| 30 |
-
// Run additional inferences to measure subsequent performance
|
| 31 |
-
const subsequentTimes: number[] = [];
|
| 32 |
-
for (let i = 0; i < 3; i++) {
|
| 33 |
-
const t4 = now();
|
| 34 |
await pipe(inputs);
|
| 35 |
-
const
|
| 36 |
-
subsequentTimes.push(+(t5 - t4).toFixed(1));
|
| 37 |
-
}
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
}
|
| 45 |
|
| 46 |
export async function runWebBenchmarkCold(
|
|
@@ -54,12 +76,18 @@ export async function runWebBenchmarkCold(
|
|
| 54 |
await clearCaches();
|
| 55 |
|
| 56 |
const results: BenchmarkRawResult[] = [];
|
|
|
|
|
|
|
| 57 |
for (let i = 0; i < repeats; i++) {
|
| 58 |
const r = await benchOnce(modelId, task, device, dtype, batchSize);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
results.push(r);
|
| 60 |
}
|
| 61 |
|
| 62 |
-
const
|
| 63 |
|
| 64 |
const result: BenchmarkResult = {
|
| 65 |
platform: "browser",
|
|
@@ -70,9 +98,17 @@ export async function runWebBenchmarkCold(
|
|
| 70 |
model: modelId,
|
| 71 |
task,
|
| 72 |
device,
|
| 73 |
-
|
| 74 |
notes: "Only the 1st iteration is strictly cold in a single page session.",
|
| 75 |
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
if (dtype) result.dtype = dtype;
|
| 77 |
return result;
|
| 78 |
}
|
|
@@ -85,20 +121,44 @@ export async function runWebBenchmarkWarm(
|
|
| 85 |
dtype?: string,
|
| 86 |
batchSize: number = 1
|
| 87 |
): Promise<BenchmarkResult> {
|
|
|
|
|
|
|
| 88 |
// Prefetch/warmup
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
const results: BenchmarkRawResult[] = [];
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
}
|
| 100 |
|
| 101 |
-
const
|
| 102 |
|
| 103 |
const result: BenchmarkResult = {
|
| 104 |
platform: "browser",
|
|
@@ -109,8 +169,16 @@ export async function runWebBenchmarkWarm(
|
|
| 109 |
model: modelId,
|
| 110 |
task,
|
| 111 |
device,
|
| 112 |
-
|
| 113 |
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
if (dtype) result.dtype = dtype;
|
| 115 |
return result;
|
| 116 |
}
|
|
|
|
| 2 |
import { BenchmarkRawResult, aggregateMetrics } from "../core/metrics.js";
|
| 3 |
import { BenchmarkResult } from "../core/types.js";
|
| 4 |
import { clearCaches } from "./cache.js";
|
| 5 |
+
import { getBrowserEnvInfo } from "./envinfo.js";
|
| 6 |
|
| 7 |
function now() {
|
| 8 |
return performance.now();
|
|
|
|
| 14 |
device: string,
|
| 15 |
dtype: string | undefined,
|
| 16 |
batchSize: number
|
| 17 |
+
): Promise<BenchmarkRawResult | { error: { type: string; message: string; stage: "load" | "inference" } }> {
|
| 18 |
+
try {
|
| 19 |
+
const t0 = now();
|
| 20 |
+
const options: any = { device };
|
| 21 |
+
if (dtype) options.dtype = dtype;
|
| 22 |
+
const pipe = await pipeline(task, modelId, options);
|
| 23 |
+
const t1 = now();
|
| 24 |
+
|
| 25 |
+
// Prepare batch input
|
| 26 |
+
const inputs = Array(batchSize).fill("The quick brown fox jumps over the lazy dog.");
|
| 27 |
+
|
| 28 |
+
const t2 = now();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
await pipe(inputs);
|
| 30 |
+
const t3 = now();
|
|
|
|
|
|
|
| 31 |
|
| 32 |
+
// Run additional inferences to measure subsequent performance
|
| 33 |
+
const subsequentTimes: number[] = [];
|
| 34 |
+
for (let i = 0; i < 3; i++) {
|
| 35 |
+
const t4 = now();
|
| 36 |
+
await pipe(inputs);
|
| 37 |
+
const t5 = now();
|
| 38 |
+
subsequentTimes.push(+(t5 - t4).toFixed(1));
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
return {
|
| 42 |
+
load_ms: +(t1 - t0).toFixed(1),
|
| 43 |
+
first_infer_ms: +(t3 - t2).toFixed(1),
|
| 44 |
+
subsequent_infer_ms: subsequentTimes,
|
| 45 |
+
};
|
| 46 |
+
} catch (error: any) {
|
| 47 |
+
// Determine error type and stage
|
| 48 |
+
const errorMessage = error?.message || String(error);
|
| 49 |
+
let errorType = "runtime_error";
|
| 50 |
+
let stage: "load" | "inference" = "load";
|
| 51 |
+
|
| 52 |
+
if (errorMessage.includes("Aborted") || errorMessage.includes("out of memory")) {
|
| 53 |
+
errorType = "memory_error";
|
| 54 |
+
} else if (errorMessage.includes("Failed to fetch") || errorMessage.includes("network")) {
|
| 55 |
+
errorType = "network_error";
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
return {
|
| 59 |
+
error: {
|
| 60 |
+
type: errorType,
|
| 61 |
+
message: errorMessage,
|
| 62 |
+
stage,
|
| 63 |
+
},
|
| 64 |
+
};
|
| 65 |
+
}
|
| 66 |
}
|
| 67 |
|
| 68 |
export async function runWebBenchmarkCold(
|
|
|
|
| 76 |
await clearCaches();
|
| 77 |
|
| 78 |
const results: BenchmarkRawResult[] = [];
|
| 79 |
+
let error: { type: string; message: string; stage: "load" | "inference" } | undefined;
|
| 80 |
+
|
| 81 |
for (let i = 0; i < repeats; i++) {
|
| 82 |
const r = await benchOnce(modelId, task, device, dtype, batchSize);
|
| 83 |
+
if ('error' in r) {
|
| 84 |
+
error = r.error;
|
| 85 |
+
break;
|
| 86 |
+
}
|
| 87 |
results.push(r);
|
| 88 |
}
|
| 89 |
|
| 90 |
+
const envInfo = await getBrowserEnvInfo();
|
| 91 |
|
| 92 |
const result: BenchmarkResult = {
|
| 93 |
platform: "browser",
|
|
|
|
| 98 |
model: modelId,
|
| 99 |
task,
|
| 100 |
device,
|
| 101 |
+
environment: envInfo,
|
| 102 |
notes: "Only the 1st iteration is strictly cold in a single page session.",
|
| 103 |
};
|
| 104 |
+
|
| 105 |
+
if (error) {
|
| 106 |
+
result.error = error;
|
| 107 |
+
} else {
|
| 108 |
+
const metrics = aggregateMetrics(results);
|
| 109 |
+
result.metrics = metrics;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
if (dtype) result.dtype = dtype;
|
| 113 |
return result;
|
| 114 |
}
|
|
|
|
| 121 |
dtype?: string,
|
| 122 |
batchSize: number = 1
|
| 123 |
): Promise<BenchmarkResult> {
|
| 124 |
+
let error: { type: string; message: string; stage: "load" | "inference" } | undefined;
|
| 125 |
+
|
| 126 |
// Prefetch/warmup
|
| 127 |
+
try {
|
| 128 |
+
const options: any = { device };
|
| 129 |
+
if (dtype) options.dtype = dtype;
|
| 130 |
+
const p = await pipeline(task, modelId, options);
|
| 131 |
+
const warmupInputs = Array(batchSize).fill("warmup");
|
| 132 |
+
await p(warmupInputs);
|
| 133 |
+
} catch (err: any) {
|
| 134 |
+
const errorMessage = err?.message || String(err);
|
| 135 |
+
let errorType = "runtime_error";
|
| 136 |
+
if (errorMessage.includes("Aborted") || errorMessage.includes("out of memory")) {
|
| 137 |
+
errorType = "memory_error";
|
| 138 |
+
} else if (errorMessage.includes("Failed to fetch") || errorMessage.includes("network")) {
|
| 139 |
+
errorType = "network_error";
|
| 140 |
+
}
|
| 141 |
+
error = {
|
| 142 |
+
type: errorType,
|
| 143 |
+
message: errorMessage,
|
| 144 |
+
stage: "load",
|
| 145 |
+
};
|
| 146 |
+
}
|
| 147 |
|
| 148 |
const results: BenchmarkRawResult[] = [];
|
| 149 |
+
|
| 150 |
+
if (!error) {
|
| 151 |
+
for (let i = 0; i < repeats; i++) {
|
| 152 |
+
const r = await benchOnce(modelId, task, device, dtype, batchSize);
|
| 153 |
+
if ('error' in r) {
|
| 154 |
+
error = r.error;
|
| 155 |
+
break;
|
| 156 |
+
}
|
| 157 |
+
results.push(r);
|
| 158 |
+
}
|
| 159 |
}
|
| 160 |
|
| 161 |
+
const envInfo = await getBrowserEnvInfo();
|
| 162 |
|
| 163 |
const result: BenchmarkResult = {
|
| 164 |
platform: "browser",
|
|
|
|
| 169 |
model: modelId,
|
| 170 |
task,
|
| 171 |
device,
|
| 172 |
+
environment: envInfo,
|
| 173 |
};
|
| 174 |
+
|
| 175 |
+
if (error) {
|
| 176 |
+
result.error = error;
|
| 177 |
+
} else {
|
| 178 |
+
const metrics = aggregateMetrics(results);
|
| 179 |
+
result.metrics = metrics;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
if (dtype) result.dtype = dtype;
|
| 183 |
return result;
|
| 184 |
}
|
bench/src/web/cli.ts
CHANGED
|
@@ -48,7 +48,12 @@ async function main() {
|
|
| 48 |
|
| 49 |
// Build args based on mode
|
| 50 |
const args = device === "wasm"
|
| 51 |
-
? [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
: [
|
| 53 |
// Official WebGPU flags from Chrome team
|
| 54 |
// https://developer.chrome.com/blog/supercharge-web-ai-testing#enable-webgpu
|
|
@@ -96,6 +101,11 @@ async function main() {
|
|
| 96 |
}
|
| 97 |
});
|
| 98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
// Navigate to the app
|
| 100 |
await page.goto(url);
|
| 101 |
|
|
@@ -152,6 +162,21 @@ async function main() {
|
|
| 152 |
|
| 153 |
console.log("\n" + JSON.stringify(result, null, 2));
|
| 154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
} finally {
|
| 156 |
await browser.close();
|
| 157 |
await server.close();
|
|
|
|
| 48 |
|
| 49 |
// Build args based on mode
|
| 50 |
const args = device === "wasm"
|
| 51 |
+
? [
|
| 52 |
+
"--disable-gpu",
|
| 53 |
+
"--disable-software-rasterizer",
|
| 54 |
+
// Increase WASM memory limits for large models
|
| 55 |
+
"--js-flags=--max-old-space-size=8192",
|
| 56 |
+
]
|
| 57 |
: [
|
| 58 |
// Official WebGPU flags from Chrome team
|
| 59 |
// https://developer.chrome.com/blog/supercharge-web-ai-testing#enable-webgpu
|
|
|
|
| 101 |
}
|
| 102 |
});
|
| 103 |
|
| 104 |
+
// Catch page errors
|
| 105 |
+
page.on("pageerror", (error) => {
|
| 106 |
+
console.error(`[browser error]`, error.message);
|
| 107 |
+
});
|
| 108 |
+
|
| 109 |
// Navigate to the app
|
| 110 |
await page.goto(url);
|
| 111 |
|
|
|
|
| 162 |
|
| 163 |
console.log("\n" + JSON.stringify(result, null, 2));
|
| 164 |
|
| 165 |
+
// Log helpful messages if there's an error
|
| 166 |
+
if (result.error) {
|
| 167 |
+
console.error("\n❌ Benchmark completed with error:");
|
| 168 |
+
console.error(` Type: ${result.error.type}`);
|
| 169 |
+
console.error(` Stage: ${result.error.stage}`);
|
| 170 |
+
console.error(` Message: ${result.error.message}`);
|
| 171 |
+
|
| 172 |
+
if (result.error.type === "memory_error" && device === "wasm") {
|
| 173 |
+
console.error("\nSuggestions:");
|
| 174 |
+
console.error(" 1. Try using --device=webgpu instead of --device=wasm");
|
| 175 |
+
console.error(" 2. Use a smaller model variant");
|
| 176 |
+
console.error(" 3. Reduce the batch size with --batch-size=1");
|
| 177 |
+
}
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
} finally {
|
| 181 |
await browser.close();
|
| 182 |
await server.close();
|
bench/src/web/envinfo.ts
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export interface BrowserEnvInfo {
|
| 2 |
+
userAgent: string;
|
| 3 |
+
platform: string;
|
| 4 |
+
cpuCores: number;
|
| 5 |
+
memory?: {
|
| 6 |
+
deviceMemory?: number; // GB
|
| 7 |
+
};
|
| 8 |
+
gpu?: {
|
| 9 |
+
vendor?: string;
|
| 10 |
+
renderer?: string;
|
| 11 |
+
webgpuAdapter?: string;
|
| 12 |
+
};
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
export async function getBrowserEnvInfo(): Promise<BrowserEnvInfo> {
|
| 16 |
+
const info: BrowserEnvInfo = {
|
| 17 |
+
userAgent: navigator.userAgent,
|
| 18 |
+
platform: navigator.platform,
|
| 19 |
+
cpuCores: navigator.hardwareConcurrency || 0,
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
+
// Memory info (Chrome only)
|
| 23 |
+
if ('deviceMemory' in navigator) {
|
| 24 |
+
info.memory = {
|
| 25 |
+
deviceMemory: (navigator as any).deviceMemory,
|
| 26 |
+
};
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
// GPU info
|
| 30 |
+
const gpu: BrowserEnvInfo['gpu'] = {};
|
| 31 |
+
|
| 32 |
+
// Try to get WebGL renderer info
|
| 33 |
+
try {
|
| 34 |
+
const canvas = document.createElement('canvas');
|
| 35 |
+
const gl = canvas.getContext('webgl') || canvas.getContext('experimental-webgl');
|
| 36 |
+
if (gl) {
|
| 37 |
+
const debugInfo = (gl as any).getExtension('WEBGL_debug_renderer_info');
|
| 38 |
+
if (debugInfo) {
|
| 39 |
+
gpu.vendor = gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL);
|
| 40 |
+
gpu.renderer = gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL);
|
| 41 |
+
}
|
| 42 |
+
}
|
| 43 |
+
} catch (e) {
|
| 44 |
+
// WebGL not available or blocked
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// Try to get WebGPU adapter info
|
| 48 |
+
if ('gpu' in navigator) {
|
| 49 |
+
try {
|
| 50 |
+
const adapter = await (navigator as any).gpu.requestAdapter();
|
| 51 |
+
if (adapter?.info) {
|
| 52 |
+
gpu.webgpuAdapter = adapter.info.description ||
|
| 53 |
+
adapter.info.vendor ||
|
| 54 |
+
'WebGPU Available';
|
| 55 |
+
} else if (adapter) {
|
| 56 |
+
gpu.webgpuAdapter = 'WebGPU Available';
|
| 57 |
+
}
|
| 58 |
+
} catch (e) {
|
| 59 |
+
// WebGPU not available
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
if (Object.keys(gpu).length > 0) {
|
| 64 |
+
info.gpu = gpu;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
return info;
|
| 68 |
+
}
|