| {"benchmark_config_hash": "7270d2d09828b77f58bf99e97af10c4e2c7e76e077cfe9f2df4e7a9745446502", "config": {"attn_implementation": "flex_attention", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": false, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-unmonitored-b1_s32_n5-flex_attention-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.16401455295272171], "gpu_metrics": null, "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.12937415193300694, 0.13816331699490547, 0.1467598279705271, 0.15533767000306398, 0.1638675699941814, 0.16398540197405964]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "172461638063bd19075f5737c25a4877623d6053", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T14:47:30.223605+00:00"}} | |
| {"benchmark_config_hash": "b48c4a4b4391aab327d4192d9cc4b767758bae3e1387f1bdeb8b80388f3f269e", "config": {"attn_implementation": "eager", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": false, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-unmonitored-b1_s32_n5-eager-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.07302379794418812], "gpu_metrics": null, "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.03570388292428106, 0.04520203196443617, 0.054447156959213316, 0.06369225098751485, 0.07288966502528638, 0.07300528697669506]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "172461638063bd19075f5737c25a4877623d6053", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T14:47:59.568368+00:00"}} | |
| {"benchmark_config_hash": "13d589b91f3120dd28f7dbe6fd35924a4ea3631ace3653805107fc833b7a6a3a", "config": {"attn_implementation": "flash_attention_2", "batch_size": 1, "compile_mode": null, "compile_options": {}, "gpu_monitoring": false, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-unmonitored-b1_s32_n5-flash_attention_2-uncompiled-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.15901796403340995], "gpu_metrics": null, "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.03295301808975637, 0.06441790598910302, 0.09587157401256263, 0.12735522200819105, 0.15882957004942, 0.15895813307724893]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "172461638063bd19075f5737c25a4877623d6053", "commit_message": "wip", "hardware_info": {"gpu_memory_total_gb": 79.4371337890625, "gpu_name": "NVIDIA H100 80GB HBM3", "python_version": "3.13.5", "torch_version": "2.7.1+cu126"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T14:48:08.315064+00:00"}} | |