Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -288,6 +288,10 @@ df_combined["chaos_score"] = np.log1p(df_combined.get("diag_srl_gamma", 0)) / (d
|
|
| 288 |
# ---------------------------------------------------------------
|
| 289 |
# Core Visualization and Analysis Functions
|
| 290 |
# ---------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
def resolve_audio_path(row: pd.Series) -> str:
|
| 292 |
"""
|
| 293 |
Intelligently reconstructs the full path to an audio file
|
|
@@ -299,69 +303,70 @@ def resolve_audio_path(row: pd.Series) -> str:
|
|
| 299 |
basename = str(row.get("filepath", ""))
|
| 300 |
source = row.get("source", "")
|
| 301 |
label = row.get("label", "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 302 |
|
| 303 |
-
#
|
| 304 |
-
print(f"🔍 Resolving audio path for: {basename} ({source}, {label})")
|
| 305 |
|
| 306 |
# For "Dog" data, the structure is: combined/{label}/{filename}
|
| 307 |
if source == "Dog":
|
| 308 |
# Try with label subdirectory first
|
| 309 |
expected_path = os.path.join(DOG_AUDIO_BASE_PATH, label, basename)
|
| 310 |
-
print(f" Trying dog path: {expected_path} (exists: {os.path.exists(expected_path)})")
|
| 311 |
if os.path.exists(expected_path):
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
return expected_path
|
| 319 |
|
| 320 |
# For "Human" data, search within all "Actor_XX" subfolders
|
| 321 |
elif source == "Human":
|
| 322 |
if os.path.isdir(HUMAN_AUDIO_BASE_PATH):
|
| 323 |
-
print(f" Searching in human base: {HUMAN_AUDIO_BASE_PATH}")
|
| 324 |
for actor_folder in os.listdir(HUMAN_AUDIO_BASE_PATH):
|
| 325 |
if actor_folder.startswith("Actor_"):
|
| 326 |
expected_path = os.path.join(HUMAN_AUDIO_BASE_PATH, actor_folder, basename)
|
| 327 |
-
print(f" Trying: {expected_path} (exists: {os.path.exists(expected_path)})")
|
| 328 |
if os.path.exists(expected_path):
|
| 329 |
-
|
|
|
|
| 330 |
|
| 331 |
# Try without subdirectory in case files are flat
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
# Fallback for dummy data or other cases
|
| 338 |
-
print(f" Trying basename directly: {basename} (exists: {os.path.exists(basename)})")
|
| 339 |
-
if os.path.exists(basename):
|
| 340 |
-
return basename
|
| 341 |
|
| 342 |
# Try in local directories (for dummy data)
|
| 343 |
-
if
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 347 |
if os.path.exists(local_path):
|
| 348 |
-
|
| 349 |
-
elif source == "Human":
|
| 350 |
-
local_path = os.path.join(HUMAN_DIR, "Actor_01", basename)
|
| 351 |
-
print(f" Trying local human path: {local_path} (exists: {os.path.exists(local_path)})")
|
| 352 |
-
if os.path.exists(local_path):
|
| 353 |
-
return local_path
|
| 354 |
|
| 355 |
-
#
|
| 356 |
-
|
| 357 |
-
return
|
| 358 |
|
| 359 |
def get_cmt_data(filepath: str, lens: str):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
try:
|
| 361 |
y, _ = sf.read(filepath)
|
| 362 |
if y.ndim > 1: y = np.mean(y, axis=1)
|
| 363 |
except Exception as e:
|
| 364 |
-
print(f"Error reading audio file {filepath}: {e}")
|
| 365 |
return None
|
| 366 |
|
| 367 |
cmt = ExpandedCMT()
|
|
@@ -371,11 +376,15 @@ def get_cmt_data(filepath: str, lens: str):
|
|
| 371 |
# The _apply_lens function now returns additional diagnostic info
|
| 372 |
phi, w, z, original_count, final_count = cmt._apply_lens(encoded, lens)
|
| 373 |
|
| 374 |
-
|
| 375 |
"phi": phi, "w": w, "z": z,
|
| 376 |
"original_count": original_count,
|
| 377 |
"final_count": final_count
|
| 378 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 379 |
|
| 380 |
def generate_holographic_field(z: np.ndarray, phi: np.ndarray, resolution: int):
|
| 381 |
if z is None or phi is None or len(z) < 4: return None
|
|
@@ -725,7 +734,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan")) a
|
|
| 725 |
def update_cross_species_view(species, primary_file, neighbor_file, lens, resolution, wavelength):
|
| 726 |
if not primary_file:
|
| 727 |
empty_fig = go.Figure(layout={"title": "Please select a primary file."})
|
| 728 |
-
return empty_fig, empty_fig, "", "", None, None
|
| 729 |
|
| 730 |
# Get primary row
|
| 731 |
primary_row = df_combined[
|
|
@@ -756,13 +765,16 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan")) a
|
|
| 756 |
(df_combined["source"] == opposite_species)
|
| 757 |
]) > 0 else None
|
| 758 |
|
| 759 |
-
# Get CMT data for both files
|
| 760 |
primary_fp = resolve_audio_path(primary_row)
|
|
|
|
| 761 |
primary_cmt = get_cmt_data(primary_fp, lens)
|
| 762 |
|
| 763 |
neighbor_cmt = None
|
|
|
|
| 764 |
if neighbor_row is not None:
|
| 765 |
neighbor_fp = resolve_audio_path(neighbor_row)
|
|
|
|
| 766 |
neighbor_cmt = get_cmt_data(neighbor_fp, lens)
|
| 767 |
|
| 768 |
# Create visualizations
|
|
@@ -840,12 +852,22 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan")) a
|
|
| 840 |
primary_info_html, neighbor_info_html,
|
| 841 |
primary_audio_out, neighbor_audio_out]
|
| 842 |
|
| 843 |
-
|
| 844 |
-
|
| 845 |
-
|
| 846 |
-
|
| 847 |
-
|
| 848 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 849 |
|
| 850 |
if __name__ == "__main__":
|
| 851 |
demo.launch(share=True, debug=True)
|
|
|
|
| 288 |
# ---------------------------------------------------------------
|
| 289 |
# Core Visualization and Analysis Functions
|
| 290 |
# ---------------------------------------------------------------
|
| 291 |
+
# Cache for resolved audio paths and CMT data to avoid repeated computations
|
| 292 |
+
_audio_path_cache = {}
|
| 293 |
+
_cmt_data_cache = {}
|
| 294 |
+
|
| 295 |
def resolve_audio_path(row: pd.Series) -> str:
|
| 296 |
"""
|
| 297 |
Intelligently reconstructs the full path to an audio file
|
|
|
|
| 303 |
basename = str(row.get("filepath", ""))
|
| 304 |
source = row.get("source", "")
|
| 305 |
label = row.get("label", "")
|
| 306 |
+
|
| 307 |
+
# Check cache first
|
| 308 |
+
cache_key = f"{source}:{label}:{basename}"
|
| 309 |
+
if cache_key in _audio_path_cache:
|
| 310 |
+
return _audio_path_cache[cache_key]
|
| 311 |
|
| 312 |
+
resolved_path = basename # Default fallback
|
|
|
|
| 313 |
|
| 314 |
# For "Dog" data, the structure is: combined/{label}/{filename}
|
| 315 |
if source == "Dog":
|
| 316 |
# Try with label subdirectory first
|
| 317 |
expected_path = os.path.join(DOG_AUDIO_BASE_PATH, label, basename)
|
|
|
|
| 318 |
if os.path.exists(expected_path):
|
| 319 |
+
resolved_path = expected_path
|
| 320 |
+
else:
|
| 321 |
+
# Try without subdirectory in case files are flat
|
| 322 |
+
expected_path = os.path.join(DOG_AUDIO_BASE_PATH, basename)
|
| 323 |
+
if os.path.exists(expected_path):
|
| 324 |
+
resolved_path = expected_path
|
|
|
|
| 325 |
|
| 326 |
# For "Human" data, search within all "Actor_XX" subfolders
|
| 327 |
elif source == "Human":
|
| 328 |
if os.path.isdir(HUMAN_AUDIO_BASE_PATH):
|
|
|
|
| 329 |
for actor_folder in os.listdir(HUMAN_AUDIO_BASE_PATH):
|
| 330 |
if actor_folder.startswith("Actor_"):
|
| 331 |
expected_path = os.path.join(HUMAN_AUDIO_BASE_PATH, actor_folder, basename)
|
|
|
|
| 332 |
if os.path.exists(expected_path):
|
| 333 |
+
resolved_path = expected_path
|
| 334 |
+
break
|
| 335 |
|
| 336 |
# Try without subdirectory in case files are flat
|
| 337 |
+
if resolved_path == basename:
|
| 338 |
+
expected_path = os.path.join(HUMAN_AUDIO_BASE_PATH, basename)
|
| 339 |
+
if os.path.exists(expected_path):
|
| 340 |
+
resolved_path = expected_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 341 |
|
| 342 |
# Try in local directories (for dummy data)
|
| 343 |
+
if resolved_path == basename:
|
| 344 |
+
if source == "Dog":
|
| 345 |
+
for label_dir in ["bark", "growl", "whine", "pant"]:
|
| 346 |
+
local_path = os.path.join(DOG_DIR, label_dir, basename)
|
| 347 |
+
if os.path.exists(local_path):
|
| 348 |
+
resolved_path = local_path
|
| 349 |
+
break
|
| 350 |
+
elif source == "Human":
|
| 351 |
+
local_path = os.path.join(HUMAN_DIR, "Actor_01", basename)
|
| 352 |
if os.path.exists(local_path):
|
| 353 |
+
resolved_path = local_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 354 |
|
| 355 |
+
# Cache the result
|
| 356 |
+
_audio_path_cache[cache_key] = resolved_path
|
| 357 |
+
return resolved_path
|
| 358 |
|
| 359 |
def get_cmt_data(filepath: str, lens: str):
|
| 360 |
+
# Check cache first
|
| 361 |
+
cache_key = f"{filepath}:{lens}"
|
| 362 |
+
if cache_key in _cmt_data_cache:
|
| 363 |
+
return _cmt_data_cache[cache_key]
|
| 364 |
+
|
| 365 |
try:
|
| 366 |
y, _ = sf.read(filepath)
|
| 367 |
if y.ndim > 1: y = np.mean(y, axis=1)
|
| 368 |
except Exception as e:
|
| 369 |
+
print(f"Error reading audio file {os.path.basename(filepath)}: {e}")
|
| 370 |
return None
|
| 371 |
|
| 372 |
cmt = ExpandedCMT()
|
|
|
|
| 376 |
# The _apply_lens function now returns additional diagnostic info
|
| 377 |
phi, w, z, original_count, final_count = cmt._apply_lens(encoded, lens)
|
| 378 |
|
| 379 |
+
result = {
|
| 380 |
"phi": phi, "w": w, "z": z,
|
| 381 |
"original_count": original_count,
|
| 382 |
"final_count": final_count
|
| 383 |
}
|
| 384 |
+
|
| 385 |
+
# Cache the result
|
| 386 |
+
_cmt_data_cache[cache_key] = result
|
| 387 |
+
return result
|
| 388 |
|
| 389 |
def generate_holographic_field(z: np.ndarray, phi: np.ndarray, resolution: int):
|
| 390 |
if z is None or phi is None or len(z) < 4: return None
|
|
|
|
| 734 |
def update_cross_species_view(species, primary_file, neighbor_file, lens, resolution, wavelength):
|
| 735 |
if not primary_file:
|
| 736 |
empty_fig = go.Figure(layout={"title": "Please select a primary file."})
|
| 737 |
+
return empty_fig, empty_fig, "", "", None, None
|
| 738 |
|
| 739 |
# Get primary row
|
| 740 |
primary_row = df_combined[
|
|
|
|
| 765 |
(df_combined["source"] == opposite_species)
|
| 766 |
]) > 0 else None
|
| 767 |
|
| 768 |
+
# Get CMT data for both files (with progress indication)
|
| 769 |
primary_fp = resolve_audio_path(primary_row)
|
| 770 |
+
print(f"🎵 Processing primary audio: {os.path.basename(primary_fp)}")
|
| 771 |
primary_cmt = get_cmt_data(primary_fp, lens)
|
| 772 |
|
| 773 |
neighbor_cmt = None
|
| 774 |
+
neighbor_fp = None
|
| 775 |
if neighbor_row is not None:
|
| 776 |
neighbor_fp = resolve_audio_path(neighbor_row)
|
| 777 |
+
print(f"🎵 Processing neighbor audio: {os.path.basename(neighbor_fp)}")
|
| 778 |
neighbor_cmt = get_cmt_data(neighbor_fp, lens)
|
| 779 |
|
| 780 |
# Create visualizations
|
|
|
|
| 852 |
primary_info_html, neighbor_info_html,
|
| 853 |
primary_audio_out, neighbor_audio_out]
|
| 854 |
|
| 855 |
+
# Only bind change events, not load events to avoid overwhelming initialization
|
| 856 |
+
primary_dropdown.change(update_cross_species_view,
|
| 857 |
+
inputs=cross_species_inputs,
|
| 858 |
+
outputs=cross_species_outputs)
|
| 859 |
+
neighbor_dropdown.change(update_cross_species_view,
|
| 860 |
+
inputs=cross_species_inputs,
|
| 861 |
+
outputs=cross_species_outputs)
|
| 862 |
+
holo_lens_dropdown.change(update_cross_species_view,
|
| 863 |
+
inputs=cross_species_inputs,
|
| 864 |
+
outputs=cross_species_outputs)
|
| 865 |
+
holo_resolution_slider.change(update_cross_species_view,
|
| 866 |
+
inputs=cross_species_inputs,
|
| 867 |
+
outputs=cross_species_outputs)
|
| 868 |
+
holo_wavelength_slider.change(update_cross_species_view,
|
| 869 |
+
inputs=cross_species_inputs,
|
| 870 |
+
outputs=cross_species_outputs)
|
| 871 |
|
| 872 |
if __name__ == "__main__":
|
| 873 |
demo.launch(share=True, debug=True)
|