| {"benchmark_config_hash": "a6ae95a4ff9612efe2180b1f76599e1105627d9226e6b752fbc58778bc9b037d", "config": {"attn_implementation": "flex_attention", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-flex_attention-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.16584114998113364], "gpu_metrics": [{"memory_used": [1.8287450075149536e-05], "monitoring_status": "success", "timestamp_0": 1760647170.1845338, "timestamps": [0.0], "utilization": [0]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.13130117394030094, 0.14007350301835686, 0.14863318600691855, 0.15717142901849002, 0.16568909003399312, 0.16581220901571214]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "172461638063bd19075f5737c25a4877623d6053", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-16T20:39:30.245595+00:00"}} | |
| {"benchmark_config_hash": "c9dbfb6b2b27ac313972a368dcbc99c91035eeda27063ff87e7d84d267d60577", "config": {"attn_implementation": "eager", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-eager-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.0755244450410828], "gpu_metrics": [{"memory_used": [1.8285587430000305e-05], "monitoring_status": "success", "timestamp_0": 1760647199.7972832, "timestamps": [0.0], "utilization": [0]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.03748321393504739, 0.047307261964306235, 0.05671410297509283, 0.06605599995236844, 0.07538218493573368, 0.07550645398441702]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "172461638063bd19075f5737c25a4877623d6053", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-16T20:39:59.797939+00:00"}} | |
| {"benchmark_config_hash": "287b04d98eb051e4e2310293a8561a23cd754afb7d8b6fb8be6c3fc199556d9a", "config": {"attn_implementation": "flash_attention_2", "batch_size": 1, "compile_mode": null, "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-flash_attention_2-uncompiled-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.1624490440590307], "gpu_metrics": [{"memory_used": [1.8283724784851074e-05], "monitoring_status": "success", "timestamp_0": 1760647208.4838204, "timestamps": [0.0], "utilization": [8]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.03385908110067248, 0.06587888801004738, 0.0979673300171271, 0.1301550380885601, 0.1622668420895934, 0.16238892008550465]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "172461638063bd19075f5737c25a4877623d6053", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-16T20:40:08.543734+00:00"}} | |