transformers / benchmark_run_20251017_155806.jsonl
mcpotato's picture
mcpotato HF Staff
Upload benchmark_run_20251017_155806.jsonl with huggingface_hub
0cf1c4c verified
{"benchmark_config_hash": "7270d2d09828b77f58bf99e97af10c4e2c7e76e077cfe9f2df4e7a9745446502", "config": {"attn_implementation": "flex_attention", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": false, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-unmonitored-b1_s32_n5-flex_attention-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.3026659859999654], "gpu_metrics": null, "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.15688081500002227, 0.19384965500012186, 0.23008350100008101, 0.2661790250001559, 0.3024846719999914, 0.30262457499998163]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "a7ad8ae3b9efaf5364518664f8a9cc2383f50a04", "commit_message": "refactor: enable gpu monitoring by default", "hardware_info": {"gpu_memory_total_gb": 22.30126953125, "gpu_name": "NVIDIA A10G", "python_version": "3.10.12", "torch_version": "2.8.0+cu128"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T15:59:54.849918+00:00"}}
{"benchmark_config_hash": "a6ae95a4ff9612efe2180b1f76599e1105627d9226e6b752fbc58778bc9b037d", "config": {"attn_implementation": "flex_attention", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-flex_attention-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.3039931000000706], "gpu_metrics": [{"memory_used": [1.5011988580226898e-05, 1.501571387052536e-05], "monitoring_status": "success", "timestamp_0": 1760716841.1127856, "timestamps": [0.0, 0.20124602317810059], "utilization": [73, 33]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.15727252800002134, 0.19433358299988868, 0.23079443699998592, 0.2674020940000901, 0.30379783600005794, 0.3039506089999122]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "a7ad8ae3b9efaf5364518664f8a9cc2383f50a04", "commit_message": "refactor: enable gpu monitoring by default", "hardware_info": {"gpu_memory_total_gb": 22.30126953125, "gpu_name": "NVIDIA A10G", "python_version": "3.10.12", "torch_version": "2.8.0+cu128"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T16:00:41.314591+00:00"}}
{"benchmark_config_hash": "c9dbfb6b2b27ac313972a368dcbc99c91035eeda27063ff87e7d84d267d60577", "config": {"attn_implementation": "eager", "batch_size": 1, "compile_mode": "default", "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-eager-compiled_default-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.19554319100006978], "gpu_metrics": [{"memory_used": [1.5008263289928436e-05], "monitoring_status": "success", "timestamp_0": 1760716872.5733202, "timestamps": [0.0], "utilization": [0]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.04732201900014843, 0.08486185600008866, 0.12167292899994209, 0.15854771399995116, 0.1953713970001445, 0.1955159200001617]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "a7ad8ae3b9efaf5364518664f8a9cc2383f50a04", "commit_message": "refactor: enable gpu monitoring by default", "hardware_info": {"gpu_memory_total_gb": 22.30126953125, "gpu_name": "NVIDIA A10G", "python_version": "3.10.12", "torch_version": "2.8.0+cu128"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T16:01:12.667792+00:00"}}
{"benchmark_config_hash": "287b04d98eb051e4e2310293a8561a23cd754afb7d8b6fb8be6c3fc199556d9a", "config": {"attn_implementation": "flash_attention_2", "batch_size": 1, "compile_mode": null, "compile_options": {}, "gpu_monitoring": true, "kernelize": false, "measurement_iterations": 1, "name": "w1_i1-monitored-b1_s32_n5-flash_attention_2-uncompiled-unkernelized", "num_tokens_to_generate": 5, "sdpa_backend": null, "sequence_length": 32, "warmup_iterations": 1}, "measurements": {"e2e_latency": [0.22783440199987126], "gpu_metrics": [{"memory_used": [1.4987774193286896e-05, 1.4987774193286896e-05], "monitoring_status": "success", "timestamp_0": 1760716882.205188, "timestamps": [0.0, 0.20102262496948242], "utilization": [35, 82]}], "shape_and_decoded_outputs": ["(1, 37) | 18 Brumaire in"], "token_generation_times": [[0.05009788799998205, 0.09451020899996365, 0.13892345899989778, 0.18322076699996614, 0.22762482799998907, 0.22775470999999925]]}, "metadata": {"branch_name": "feat/benchmark_v2_ci", "commit_id": "a7ad8ae3b9efaf5364518664f8a9cc2383f50a04", "commit_message": "refactor: enable gpu monitoring by default", "hardware_info": {"gpu_memory_total_gb": 22.30126953125, "gpu_name": "NVIDIA A10G", "python_version": "3.10.12", "torch_version": "2.8.0+cu128"}, "model_id": "meta-llama/Llama-3.1-8B-Instruct", "timestamp": "2025-10-17T16:01:22.406842+00:00"}}