update from https://github.com/ArneBinder/argumentation-structure-identification/pull/529
d868d2e
verified
| import json | |
| from typing import Dict, List, Optional, Tuple | |
| import pandas as pd | |
| def parse_identifier( | |
| identifier_str, defaults: Dict[str, str], parts_sep: str = ",", key_val_sep: str = "=" | |
| ) -> Dict[str, str]: | |
| parts = [ | |
| part.split(key_val_sep) | |
| for part in identifier_str.strip().split(parts_sep) | |
| if key_val_sep in part | |
| ] | |
| parts_dict = dict(parts) | |
| return {**defaults, **parts_dict} | |
| def read_nested_json(path: str) -> pd.DataFrame: | |
| # Read the nested JSON data into a pandas DataFrame | |
| with open(path, "r") as f: | |
| data = json.load(f) | |
| result = pd.json_normalize(data, sep="/") | |
| result.index.name = "entry" | |
| return result | |
| def read_nested_jsons( | |
| json_paths: List[Tuple[str, str]], | |
| default_key_values: Optional[Dict[str, str]] = None, | |
| column_level_names: Optional[List[str]] = None, | |
| ) -> pd.DataFrame: | |
| dfs = [read_nested_json(json_path) for identifier_str, json_path in json_paths] | |
| new_index_levels = pd.MultiIndex.from_frame( | |
| pd.DataFrame( | |
| [ | |
| parse_identifier(identifier_str, default_key_values or {}) | |
| for identifier_str, _ in json_paths | |
| ] | |
| ) | |
| ) | |
| if len(set(list(new_index_levels))) == len(list(new_index_levels)): | |
| dfs_concat = pd.concat( | |
| dfs, keys=list(new_index_levels), names=new_index_levels.names, axis=0 | |
| ) | |
| else: | |
| dfs_new = [] | |
| ids_unique = [] | |
| for identifier_str in new_index_levels: | |
| if identifier_str not in ids_unique: | |
| ids_unique.append(identifier_str) | |
| # first combine the dataframes with same ids along the columns | |
| for identifier_str in ids_unique: | |
| dfs_with_id = [df for df, idx in zip(dfs, new_index_levels) if idx == identifier_str] | |
| # assert that all columns are distinct | |
| if len(set([tuple(col) for df in dfs_with_id for col in df.columns])) != sum( | |
| [len(df.columns) for df in dfs_with_id] | |
| ): | |
| raise ValueError( | |
| "There are duplicate columns across the dataframes with the same identifier." | |
| ) | |
| dfs_id_concat = pd.concat(dfs_with_id, axis=1) | |
| dfs_new.append(dfs_id_concat) | |
| dfs_concat = pd.concat(dfs_new, keys=ids_unique, names=new_index_levels.names, axis=0) | |
| dfs_concat.columns = pd.MultiIndex.from_tuples( | |
| [col.split("/") for col in dfs_concat.columns], names=column_level_names | |
| ) | |
| return dfs_concat | |