{"benchmark_config_hash": "a6ae95a4ff9612efe2180b1f76599e1105627d9226e6b752fbc58778bc9b037d", "config": {"attn_implementation": "flex_attention", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-flex_attention-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.1680745700141415], "gpu_metrics": [{"memory_used": [1.8287450075149536e-05], "monitoring_status": "success", "timestamp_0": 1760645527.3762913, "timestamps": [0.0], "utilization": [0]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.13334437110461295, 0.1421145290369168, 0.1507436370011419, 0.15935761504806578, 0.1679225090192631, 0.16804650810081512]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "ed0ce751ce264343d65432bd98c77983203fa592", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-16T20:12:07.439822+00:00"}} {"benchmark_config_hash": "c9dbfb6b2b27ac313972a368dcbc99c91035eeda27063ff87e7d84d267d60577", "config": {"attn_implementation": "eager", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-eager-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.07497831690125167], "gpu_metrics": [{"memory_used": [1.8285587430000305e-05], "monitoring_status": "success", "timestamp_0": 1760645557.6526833, "timestamps": [0.0], "utilization": [0]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.03745339100714773, 0.047011121991090477, 0.05629513401072472, 0.06557491701096296, 0.0748380379518494, 0.07496014598291367]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "ed0ce751ce264343d65432bd98c77983203fa592", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-16T20:12:37.653404+00:00"}} {"benchmark_config_hash": "287b04d98eb051e4e2310293a8561a23cd754afb7d8b6fb8be6c3fc199556d9a", "config": {"attn_implementation": "flash_attention_2", "batch_size": 1, "compile_mode": null, "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-flash_attention_2-uncompiled-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.1649869829416275], "gpu_metrics": [{"memory_used": [1.8283724784851074e-05], "monitoring_status": "success", "timestamp_0": 1760645566.5239754, "timestamps": [0.0], "utilization": [3]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.03463126195129007, 0.06736567697953433, 0.09985240490641445, 0.13243451993912458, 0.1648113209521398, 0.16492876899428666]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "ed0ce751ce264343d65432bd98c77983203fa592", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-16T20:12:46.587142+00:00"}}