▼ code
▼ output
▶ uv-logs
|
Cell: benchmark | 38.65s
|
Raw
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "numpy",
# "torch",
# "kernels-benchmark-tools",
# "kernels",
# ]
#
# [tool.uv.sources]
# kernels-benchmark-tools = { git = "https://github.com/drbh/kernels-benchmark-tools.git", branch = "main" }
# ///
import torch
import sys
import os
import kernels_benchmark_tools as kbt
from kernels import get_kernel
hf_kernels_flash_attn = get_kernel("kernels-community/flash-attn", revision="v0.0.2")
def hf_flash_attention(query, key, value):
"""HuggingFace Kernels Flash Attention"""
return hf_kernels_flash_attn.fwd(query, key, value, is_causal=False)[0]
kbt.add(
"hf_kernels_flash_attn",
hf_flash_attention,
tags={"family": "hf-kernels", "backend": "flash-attn", "compile": "none"},
)
if __name__ == "__main__":
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cpu":
print("HF Kernels Flash Attention requires CUDA - skipping benchmark")
sys.exit(0)
dtype = "bfloat16"
# Flux-like workloads
base = 1024
flux_sizes = [128, 256, 320, 384, 448, 512]
heads = 24
head_dim = 128
wl = []
for L in flux_sizes:
wl.append(
{
"name": f"flux_L{L}",
"batch": 1,
"seq_len": base + L,
"heads": heads,
"head_dim": head_dim,
"dtype": dtype,
"device": device,
"seed": 0,
}
)
kbt.run(
wl,
jsonl="attn.jsonl",
reps=5,
warmup=2,
gen=kbt.attn.gen_qkv,
ref=kbt.attn.ref_math,
cmp=kbt.attn.cmp_allclose,
)
kbt.summarize(["attn.jsonl"])
impl wl p50(ms) ok
hf_kernels_flash_attn flux_L128 0.35 True
hf_kernels_flash_attn flux_L256 0.38 True
hf_kernels_flash_attn flux_L320 0.49 True
hf_kernels_flash_attn flux_L384 0.52 True
hf_kernels_flash_attn flux_L448 0.54 True
hf_kernels_flash_attn flux_L512 0.56 True
▶ UV Install Logs
Fetching 20 files: 0%| | 0/20 [00:00<?, ?it/s]
Fetching 20 files: 5%|▌ | 1/20 [00:00<00:03, 5.70it/s]
Fetching 20 files: 10%|█ | 2/20 [00:01<00:13, 1.36it/s]
Fetching 20 files: 100%|██████████| 20/20 [00:01<00:00, 15.31it/s]