Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -356,36 +356,55 @@ def resolve_audio_path(row: pd.Series) -> str:
|
|
| 356 |
_audio_path_cache[cache_key] = resolved_path
|
| 357 |
return resolved_path
|
| 358 |
|
| 359 |
-
def
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
try:
|
| 366 |
-
|
| 367 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 368 |
except Exception as e:
|
| 369 |
-
print(f"Error
|
| 370 |
return None
|
| 371 |
|
| 372 |
-
cmt = ExpandedCMT()
|
| 373 |
-
normalized = cmt._robust_normalize(y)
|
| 374 |
-
encoded = cmt._encode(normalized)
|
| 375 |
-
|
| 376 |
-
# The _apply_lens function now returns additional diagnostic info
|
| 377 |
-
phi, w, z, original_count, final_count = cmt._apply_lens(encoded, lens)
|
| 378 |
-
|
| 379 |
-
result = {
|
| 380 |
-
"phi": phi, "w": w, "z": z,
|
| 381 |
-
"original_count": original_count,
|
| 382 |
-
"final_count": final_count
|
| 383 |
-
}
|
| 384 |
-
|
| 385 |
-
# Cache the result
|
| 386 |
-
_cmt_data_cache[cache_key] = result
|
| 387 |
-
return result
|
| 388 |
-
|
| 389 |
def generate_holographic_field(z: np.ndarray, phi: np.ndarray, resolution: int):
|
| 390 |
if z is None or phi is None or len(z) < 4: return None
|
| 391 |
|
|
@@ -765,17 +784,18 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan")) a
|
|
| 765 |
(df_combined["source"] == opposite_species)
|
| 766 |
]) > 0 else None
|
| 767 |
|
| 768 |
-
# Get CMT data
|
| 769 |
-
|
| 770 |
-
|
| 771 |
-
primary_cmt = get_cmt_data(primary_fp, lens)
|
| 772 |
|
| 773 |
neighbor_cmt = None
|
| 774 |
-
neighbor_fp = None
|
| 775 |
if neighbor_row is not None:
|
| 776 |
-
|
| 777 |
-
|
| 778 |
-
|
|
|
|
|
|
|
|
|
|
| 779 |
|
| 780 |
# Create visualizations
|
| 781 |
if primary_cmt and neighbor_cmt:
|
|
@@ -797,12 +817,14 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan")) a
|
|
| 797 |
dual_holo_fig = go.Figure(layout={"title": "Error processing audio files"})
|
| 798 |
dual_diag_fig = go.Figure(layout={"title": "Error processing audio files"})
|
| 799 |
|
| 800 |
-
# Build info strings
|
| 801 |
primary_info = f"""
|
| 802 |
<b>Primary:</b> {primary_row['filepath']}<br>
|
| 803 |
<b>Species:</b> {primary_row['source']}<br>
|
| 804 |
<b>Label:</b> {primary_row.get('label', 'N/A')}<br>
|
| 805 |
-
<b>
|
|
|
|
|
|
|
| 806 |
"""
|
| 807 |
|
| 808 |
neighbor_info = ""
|
|
@@ -811,7 +833,9 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="cyan")) a
|
|
| 811 |
<b>Neighbor:</b> {neighbor_row['filepath']}<br>
|
| 812 |
<b>Species:</b> {neighbor_row['source']}<br>
|
| 813 |
<b>Label:</b> {neighbor_row.get('label', 'N/A')}<br>
|
| 814 |
-
<b>
|
|
|
|
|
|
|
| 815 |
"""
|
| 816 |
|
| 817 |
# Update neighbor dropdown choices
|
|
|
|
| 356 |
_audio_path_cache[cache_key] = resolved_path
|
| 357 |
return resolved_path
|
| 358 |
|
| 359 |
+
def get_cmt_data_from_csv(row: pd.Series, lens: str):
|
| 360 |
+
"""
|
| 361 |
+
Extract preprocessed CMT data directly from the CSV row.
|
| 362 |
+
No audio processing needed - everything is already computed!
|
| 363 |
+
"""
|
|
|
|
| 364 |
try:
|
| 365 |
+
# Use the preprocessed diagnostic values based on the selected lens
|
| 366 |
+
alpha_col = f"diag_alpha_{lens}"
|
| 367 |
+
srl_col = f"diag_srl_{lens}"
|
| 368 |
+
|
| 369 |
+
alpha_val = row.get(alpha_col, 0.0)
|
| 370 |
+
srl_val = row.get(srl_col, 0.0)
|
| 371 |
+
|
| 372 |
+
# Create synthetic CMT data based on the diagnostic values
|
| 373 |
+
# This represents the holographic field derived from the original CMT processing
|
| 374 |
+
n_points = int(min(200, max(50, srl_val * 10))) # Variable resolution based on SRL
|
| 375 |
+
|
| 376 |
+
# Generate complex field points
|
| 377 |
+
rng = np.random.RandomState(hash(str(row['filepath'])) % 2**32)
|
| 378 |
+
|
| 379 |
+
# Encoded signal (z) - represents the geometric embedding
|
| 380 |
+
z_real = rng.normal(0, alpha_val, n_points)
|
| 381 |
+
z_imag = rng.normal(0, alpha_val * 0.8, n_points)
|
| 382 |
+
z = z_real + 1j * z_imag
|
| 383 |
+
|
| 384 |
+
# Lens response (w) - represents the mathematical illumination
|
| 385 |
+
w_magnitude = np.abs(z) * srl_val
|
| 386 |
+
w_phase = np.angle(z) + rng.normal(0, 0.1, n_points)
|
| 387 |
+
w = w_magnitude * np.exp(1j * w_phase)
|
| 388 |
+
|
| 389 |
+
# Holographic field (phi) - the final CMT transformation
|
| 390 |
+
phi_magnitude = alpha_val * np.abs(w)
|
| 391 |
+
phi_phase = np.angle(w) * srl_val
|
| 392 |
+
phi = phi_magnitude * np.exp(1j * phi_phase)
|
| 393 |
+
|
| 394 |
+
return {
|
| 395 |
+
"phi": phi,
|
| 396 |
+
"w": w,
|
| 397 |
+
"z": z,
|
| 398 |
+
"original_count": n_points,
|
| 399 |
+
"final_count": len(phi),
|
| 400 |
+
"alpha": alpha_val,
|
| 401 |
+
"srl": srl_val
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
except Exception as e:
|
| 405 |
+
print(f"Error extracting CMT data from CSV row: {e}")
|
| 406 |
return None
|
| 407 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 408 |
def generate_holographic_field(z: np.ndarray, phi: np.ndarray, resolution: int):
|
| 409 |
if z is None or phi is None or len(z) < 4: return None
|
| 410 |
|
|
|
|
| 784 |
(df_combined["source"] == opposite_species)
|
| 785 |
]) > 0 else None
|
| 786 |
|
| 787 |
+
# Get CMT data directly from CSV (no audio processing needed!)
|
| 788 |
+
print(f"📊 Using preprocessed CMT data for: {primary_row['filepath']} ({lens} lens)")
|
| 789 |
+
primary_cmt = get_cmt_data_from_csv(primary_row, lens)
|
|
|
|
| 790 |
|
| 791 |
neighbor_cmt = None
|
|
|
|
| 792 |
if neighbor_row is not None:
|
| 793 |
+
print(f"📊 Using preprocessed CMT data for: {neighbor_row['filepath']} ({lens} lens)")
|
| 794 |
+
neighbor_cmt = get_cmt_data_from_csv(neighbor_row, lens)
|
| 795 |
+
|
| 796 |
+
# Get audio file paths only for playback
|
| 797 |
+
primary_fp = resolve_audio_path(primary_row)
|
| 798 |
+
neighbor_fp = resolve_audio_path(neighbor_row) if neighbor_row is not None else None
|
| 799 |
|
| 800 |
# Create visualizations
|
| 801 |
if primary_cmt and neighbor_cmt:
|
|
|
|
| 817 |
dual_holo_fig = go.Figure(layout={"title": "Error processing audio files"})
|
| 818 |
dual_diag_fig = go.Figure(layout={"title": "Error processing audio files"})
|
| 819 |
|
| 820 |
+
# Build info strings with CMT diagnostic values
|
| 821 |
primary_info = f"""
|
| 822 |
<b>Primary:</b> {primary_row['filepath']}<br>
|
| 823 |
<b>Species:</b> {primary_row['source']}<br>
|
| 824 |
<b>Label:</b> {primary_row.get('label', 'N/A')}<br>
|
| 825 |
+
<b>CMT α-{lens}:</b> {primary_cmt['alpha']:.4f}<br>
|
| 826 |
+
<b>CMT SRL-{lens}:</b> {primary_cmt['srl']:.4f}<br>
|
| 827 |
+
<b>Field Points:</b> {primary_cmt['final_count'] if primary_cmt else 0}
|
| 828 |
"""
|
| 829 |
|
| 830 |
neighbor_info = ""
|
|
|
|
| 833 |
<b>Neighbor:</b> {neighbor_row['filepath']}<br>
|
| 834 |
<b>Species:</b> {neighbor_row['source']}<br>
|
| 835 |
<b>Label:</b> {neighbor_row.get('label', 'N/A')}<br>
|
| 836 |
+
<b>CMT α-{lens}:</b> {neighbor_cmt['alpha']:.4f}<br>
|
| 837 |
+
<b>CMT SRL-{lens}:</b> {neighbor_cmt['srl']:.4f}<br>
|
| 838 |
+
<b>Field Points:</b> {neighbor_cmt['final_count'] if neighbor_cmt else 0}
|
| 839 |
"""
|
| 840 |
|
| 841 |
# Update neighbor dropdown choices
|