Spaces:
Running
Running
| import csv | |
| import os | |
| import sys | |
| def agg_S2R_metrics(verilog_eval_rtl, rtllm): | |
| if not verilog_eval_rtl or not rtllm: | |
| return None | |
| w1 = 155 | |
| w2 = 47 | |
| result = (w1 * verilog_eval_rtl + w2 * rtllm) / (w1 + w2) | |
| return round(result, 2) | |
| def agg_MC_metrics(verilog_eval_cc, verigen): | |
| if not verilog_eval_cc or not verigen: | |
| return None | |
| w1 = 155 | |
| w2 = 17 | |
| result = (w1 * verilog_eval_cc + w2 * verigen) / (w1 + w2) | |
| return round(result, 2) | |
| def avg_ppa(metric): | |
| result = ( | |
| float(metric["Power"]) + float(metric["Performance"]) + float(metric["Area"]) | |
| ) / 3 | |
| return round(result, 2) | |
| if __name__ == "__main__": | |
| if len(sys.argv) < 2: | |
| print(f"Usage: python {sys.argv[0]} <path_to_input_csv>") | |
| sys.exit(1) | |
| input_csv_path = sys.argv[1] | |
| if not os.path.exists(input_csv_path): | |
| print(f"Error: File not found at {input_csv_path}") | |
| sys.exit(1) | |
| dir_name = os.path.dirname(input_csv_path) | |
| base_name = os.path.basename(input_csv_path) | |
| output_base_name = base_name.replace("results", "aggregated_scores") | |
| output_csv_path = os.path.join(dir_name, output_base_name) | |
| print(f"Processing {input_csv_path}...") | |
| agg_csv_data = [] | |
| header = [ | |
| "Model", | |
| "Agg S2R", | |
| "Agg MC", | |
| "Agg VerilogEval S2R", | |
| "Agg VerilogEval MC", | |
| "Agg RTLLM", | |
| "Agg VeriGen", | |
| ] | |
| agg_csv_data.append(header) | |
| with open(input_csv_path, newline="", encoding="utf-8") as csvfile: | |
| reader = csv.reader(csvfile) | |
| goals_row = next(reader)[1:] # Skip the first column (model name) | |
| benchmarks_row = next(reader)[1:] | |
| for model_metrics in reader: | |
| if not model_metrics or not model_metrics[0].strip(): | |
| continue # Skip empty lines | |
| model = model_metrics[0] | |
| # Create a dict of dicts with keys benchmark, goal | |
| results = {} | |
| for i, metric in enumerate(model_metrics[1:]): | |
| benchmark = benchmarks_row[i] | |
| goal = goals_row[i] | |
| if benchmark not in results: | |
| results[benchmark] = {} | |
| results[benchmark][goal] = metric | |
| # Agg metrics | |
| agg_csv_row = [model] | |
| agg_verilog_eval_s2r = avg_ppa(results.get("VerilogEval S2R", {})) | |
| agg_rtllm = avg_ppa(results.get("RTLLM", {})) | |
| agg_s2r = agg_S2R_metrics(agg_verilog_eval_s2r, agg_rtllm) | |
| agg_verilog_eval_mc = avg_ppa(results.get("VerilogEval MC", {})) | |
| agg_verigen = avg_ppa(results.get("VeriGen", {})) | |
| agg_mc = agg_MC_metrics(agg_verilog_eval_mc, agg_verigen) | |
| # Append all calculated values in the correct order | |
| agg_csv_row.extend( | |
| [ | |
| agg_s2r, | |
| agg_mc, | |
| agg_verilog_eval_s2r, | |
| agg_verilog_eval_mc, | |
| agg_rtllm, | |
| agg_verigen, | |
| ] | |
| ) | |
| agg_csv_data.append(agg_csv_row) | |
| with open(output_csv_path, "w", newline="", encoding="utf-8") as csvfile: | |
| writer = csv.writer(csvfile) | |
| writer.writerows(agg_csv_data) | |
| print(f"Done") | |