| {"benchmark_config_hash": "7270d2d09828b77f58bf99e97af10c4e2c7e76e077cfe9f2df4e7a9745446502", "config": {"attn_implementation": "flex_attention", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": false, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-unmonitored-b1_s32_n5-flex_attention-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.3041414029999032], "gpu_metrics": null, "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.15716620399985004, 0.1941495969999778, 0.23144788599984167, 0.2677782139999181, 0.30392154899982415, 0.3040946219998659]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "9d8071617aa14610e3905bf792c954aa2d0ce7f9", "commit_message": "refactor: extract push to hub in fn", "hardware_info": {"gpu_memory_total_gb": 22.30126953125, "gpu_name": "NVIDIA A10G", "python_version": "3.10.12", "torch_version": "2.8.0+cu128"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T16:15:34.973009+00:00"}} | |
| {"benchmark_config_hash": "a6ae95a4ff9612efe2180b1f76599e1105627d9226e6b752fbc58778bc9b037d", "config": {"attn_implementation": "flex_attention", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-flex_attention-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.30297322699993856], "gpu_metrics": [{"memory_used": [1.5011988580226898e-05, 1.501571387052536e-05], "monitoring_status": "success", "timestamp_0": 1760717781.2524352, "timestamps": [0.0, 0.20119452476501465], "utilization": [65, 34]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.15646053399996163, 0.19354905900013364, 0.22991954999997688, 0.2664893240000765, 0.3027854730000854, 0.3029307560000234]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "9d8071617aa14610e3905bf792c954aa2d0ce7f9", "commit_message": "refactor: extract push to hub in fn", "hardware_info": {"gpu_memory_total_gb": 22.30126953125, "gpu_name": "NVIDIA A10G", "python_version": "3.10.12", "torch_version": "2.8.0+cu128"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T16:16:21.454185+00:00"}} | |
| {"benchmark_config_hash": "c9dbfb6b2b27ac313972a368dcbc99c91035eeda27063ff87e7d84d267d60577", "config": {"attn_implementation": "eager", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-eager-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.19824662699988949], "gpu_metrics": [{"memory_used": [1.5008263289928436e-05], "monitoring_status": "success", "timestamp_0": 1760717812.7323098, "timestamps": [0.0], "utilization": [0]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.04797185300003548, 0.0856035479998809, 0.12295031700000436, 0.16093133899994427, 0.1980559030000677, 0.19821649599998636]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "9d8071617aa14610e3905bf792c954aa2d0ce7f9", "commit_message": "refactor: extract push to hub in fn", "hardware_info": {"gpu_memory_total_gb": 22.30126953125, "gpu_name": "NVIDIA A10G", "python_version": "3.10.12", "torch_version": "2.8.0+cu128"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T16:16:52.829688+00:00"}} | |
| {"benchmark_config_hash": "287b04d98eb051e4e2310293a8561a23cd754afb7d8b6fb8be6c3fc199556d9a", "config": {"attn_implementation": "flash_attention_2", "batch_size": 1, "compile_mode": null, "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-flash_attention_2-uncompiled-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.22922906500002682], "gpu_metrics": [{"memory_used": [1.4987774193286896e-05, 1.4987774193286896e-05], "monitoring_status": "success", "timestamp_0": 1760717822.2332113, "timestamps": [0.0, 0.20158982276916504], "utilization": [80, 81]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.05046448699999928, 0.09512452000012672, 0.13967062200003966, 0.18419602300014049, 0.22898125000006075, 0.22912861300005716]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "9d8071617aa14610e3905bf792c954aa2d0ce7f9", "commit_message": "refactor: extract push to hub in fn", "hardware_info": {"gpu_memory_total_gb": 22.30126953125, "gpu_name": "NVIDIA A10G", "python_version": "3.10.12", "torch_version": "2.8.0+cu128"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T16:17:02.435521+00:00"}} |