File size: 8,714 Bytes
f9cf36d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 |
import pandas as pd
from typing import Dict, Any, List, Optional
from comparison import AnswerComparator
import phoenix as px
from phoenix.trace import SpanEvaluations
class GAIAPhoenixEvaluator:
"""Phoenix evaluator for GAIA dataset ground truth comparison."""
def __init__(self, metadata_path: str = "data/metadata.jsonl"):
self.comparator = AnswerComparator(metadata_path)
self.eval_name = "gaia_ground_truth"
def evaluate_spans(self, spans_df: pd.DataFrame) -> List[SpanEvaluations]:
"""Evaluate spans and return Phoenix SpanEvaluations."""
evaluations = []
for _, span in spans_df.iterrows():
# Extract task_id and answer from span
task_id = self._extract_task_id(span)
predicted_answer = self._extract_predicted_answer(span)
span_id = span.get("context.span_id")
if task_id and predicted_answer is not None and span_id:
evaluation = self.comparator.evaluate_answer(task_id, predicted_answer)
# Create evaluation record for Phoenix
eval_record = {
"span_id": span_id,
"score": 1.0 if evaluation["exact_match"] else evaluation["similarity_score"],
"label": "correct" if evaluation["exact_match"] else "incorrect",
"explanation": self._create_explanation(evaluation),
"task_id": task_id,
"predicted_answer": evaluation["predicted_answer"],
"ground_truth": evaluation["actual_answer"],
"exact_match": evaluation["exact_match"],
"similarity_score": evaluation["similarity_score"],
"contains_answer": evaluation["contains_answer"]
}
evaluations.append(eval_record)
if evaluations:
# Create SpanEvaluations object
eval_df = pd.DataFrame(evaluations)
return [SpanEvaluations(eval_name=self.eval_name, dataframe=eval_df)]
return []
def _extract_task_id(self, span) -> Optional[str]:
"""Extract task_id from span data."""
# Try span attributes first
attributes = span.get("attributes", {})
if isinstance(attributes, dict):
if "task_id" in attributes:
return attributes["task_id"]
# Try input data
input_data = span.get("input", {})
if isinstance(input_data, dict):
if "task_id" in input_data:
return input_data["task_id"]
# Try to extract from input value if it's a string
input_value = span.get("input.value", "")
if isinstance(input_value, str):
# Look for UUID pattern in input
import re
uuid_pattern = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
match = re.search(uuid_pattern, input_value)
if match:
return match.group(0)
# Try span name
span_name = span.get("name", "")
if isinstance(span_name, str):
import re
uuid_pattern = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
match = re.search(uuid_pattern, span_name)
if match:
return match.group(0)
return None
def _extract_predicted_answer(self, span) -> Optional[str]:
"""Extract predicted answer from span output."""
# Try different output fields
output_fields = ["output.value", "output", "response", "result"]
for field in output_fields:
value = span.get(field)
if value is not None:
return str(value)
return None
def _create_explanation(self, evaluation: Dict[str, Any]) -> str:
"""Create human-readable explanation of the evaluation."""
predicted = evaluation["predicted_answer"]
actual = evaluation["actual_answer"]
exact_match = evaluation["exact_match"]
similarity = evaluation["similarity_score"]
contains = evaluation["contains_answer"]
if actual is None:
return "❓ No ground truth available for comparison"
explanation = f"Predicted: '{predicted}' | Ground Truth: '{actual}' | "
if exact_match:
explanation += "✅ Exact match"
elif contains:
explanation += f"⚠️ Contains correct answer (similarity: {similarity:.3f})"
else:
explanation += f"❌ Incorrect (similarity: {similarity:.3f})"
return explanation
def add_gaia_evaluations_to_phoenix(spans_df: pd.DataFrame, metadata_path: str = "data/metadata.jsonl") -> List[SpanEvaluations]:
"""Add GAIA evaluation results to Phoenix spans."""
evaluator = GAIAPhoenixEvaluator(metadata_path)
return evaluator.evaluate_spans(spans_df)
def log_evaluations_to_phoenix(evaluations_df: pd.DataFrame, session_id: Optional[str] = None) -> Optional[pd.DataFrame]:
"""Log evaluation results directly to Phoenix."""
try:
client = px.Client()
# Get current spans to match evaluations to span_ids
spans_df = client.get_spans_dataframe()
if spans_df is None or spans_df.empty:
print("No spans found to attach evaluations to")
return None
# Create evaluation records for Phoenix
evaluation_records = []
spans_with_evals = []
for _, eval_row in evaluations_df.iterrows():
task_id = eval_row["task_id"]
# Try to find matching span by searching for task_id in span input
matching_spans = spans_df[
spans_df['input.value'].astype(str).str.contains(task_id, na=False, case=False)
]
if len(matching_spans) == 0:
# Try alternative search in span attributes or name
matching_spans = spans_df[
spans_df['name'].astype(str).str.contains(task_id, na=False, case=False)
]
if len(matching_spans) > 0:
span_id = matching_spans.iloc[0]['context.span_id']
# Create evaluation record in Phoenix format
evaluation_record = {
"span_id": span_id,
"name": "gaia_ground_truth",
"score": eval_row["similarity_score"],
"label": "correct" if bool(eval_row["exact_match"]) else "incorrect",
"explanation": f"Predicted: '{eval_row['predicted_answer']}' | Ground Truth: '{eval_row['actual_answer']}' | Similarity: {eval_row['similarity_score']:.3f} | Exact Match: {eval_row['exact_match']}",
"annotator_kind": "HUMAN",
"metadata": {
"task_id": task_id,
"exact_match": eval_row["exact_match"],
"similarity_score": eval_row["similarity_score"],
"contains_answer": eval_row["contains_answer"],
"predicted_answer": eval_row["predicted_answer"],
"ground_truth": eval_row["actual_answer"]
}
}
evaluation_records.append(evaluation_record)
spans_with_evals.append(span_id)
if evaluation_records:
# Convert to DataFrame for Phoenix
eval_df = pd.DataFrame(evaluation_records)
# Create SpanEvaluations object
span_evaluations = SpanEvaluations(
eval_name="gaia_ground_truth",
dataframe=eval_df
)
# Log evaluations to Phoenix
try:
# Try the newer Phoenix API
px.log_evaluations(span_evaluations)
print(f"✅ Successfully logged {len(evaluation_records)} evaluations to Phoenix")
except AttributeError:
# Fallback for older Phoenix versions
client.log_evaluations(span_evaluations)
print(f"✅ Successfully logged {len(evaluation_records)} evaluations to Phoenix (fallback)")
return eval_df
else:
print("⚠️ No matching spans found for evaluations")
if spans_df is not None:
print(f"Available spans: {len(spans_df)}")
if len(spans_df) > 0:
print("Sample span names:", spans_df['name'].head(3).tolist())
return None
except Exception as e:
print(f"❌ Could not log evaluations to Phoenix: {e}")
import traceback
traceback.print_exc()
return None
|