Romain Fayoux commited on
Commit
3a7aaed
·
1 Parent(s): 11a8722

Started to run evaluations outside of submit loop

Browse files
eval/create_dataset.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import phoenix as px
3
+ from phoenix.client import Client
4
+
5
+
6
+ def create_dataset():
7
+ dataset_df = pd.read_json("./data/metadata.jsonl", lines=True)
8
+
9
+ # Script should be run with a running phoenix server, if not uncomment:
10
+ # _ = px.launch_app()
11
+ px_client = Client()
12
+ dataset = px_client.datasets.create_dataset(
13
+ dataframe=dataset_df,
14
+ name="gaia",
15
+ input_keys=["Question"],
16
+ output_keys=["Final answer"],
17
+ metadata_keys=["task_id", "Annotator Metadata", "file_name"],
18
+ )
19
+ print(f"Dataset created: {dataset.id}")
20
+
21
+
22
+ if __name__ == "__main__":
23
+ create_dataset()
eval/eval_notebook.ipynb ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 19,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import pandas as pd\n",
10
+ "import json\n",
11
+ "from phoenix.client import Client\n",
12
+ "\n",
13
+ "# Load the existing spans\n",
14
+ "spans_df = Client().spans.get_spans_dataframe(project_name=\"default\")"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": 20,
20
+ "metadata": {},
21
+ "outputs": [],
22
+ "source": [
23
+ "# Load the source of truth\n",
24
+ "dataset_df = pd.read_json(\"../data/metadata.jsonl\", lines=True)"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "execution_count": 21,
30
+ "metadata": {},
31
+ "outputs": [],
32
+ "source": [
33
+ "# Filter by root agents\n",
34
+ "agents_df = spans_df[(spans_df.span_kind == 'AGENT') & (spans_df.parent_id.isna()) & (spans_df.status_code == 'OK')]"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 22,
40
+ "metadata": {},
41
+ "outputs": [
42
+ {
43
+ "name": "stderr",
44
+ "output_type": "stream",
45
+ "text": [
46
+ "/var/folders/pj/v1zrqj1d10x9_1rd2njh_r_r0000gn/T/ipykernel_98186/3107371246.py:2: SettingWithCopyWarning: \n",
47
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
48
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
49
+ "\n",
50
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
51
+ " agents_df[\"task\"] = agents_df[\"attributes.input.value\"].apply(json.loads).apply(lambda x : x[\"task\"]).str.replace(r'\\s*The mentionned file can be downloaded from.*$', '', regex=True)\n"
52
+ ]
53
+ }
54
+ ],
55
+ "source": [
56
+ "# Retrieve the right question and add the answer\n",
57
+ "agents_df[\"task\"] = agents_df[\"attributes.input.value\"].apply(json.loads).apply(lambda x : x[\"task\"]).str.replace(r'\\s*The mentionned file can be downloaded from.*$', '', regex=True)\n",
58
+ "agents_merged_df = pd.merge(agents_df,dataset_df,how=\"left\",left_on=\"task\", right_on=\"Question\")"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": 29,
64
+ "metadata": {},
65
+ "outputs": [],
66
+ "source": [
67
+ "from phoenix.evals.evaluators import bind_evaluator, async_evaluate_dataframe\n",
68
+ "from phoenix.evals.metrics import exact_match\n",
69
+ "from evaluators import conciseness_evaluator\n",
70
+ "\n",
71
+ "# Define the evaluator\n",
72
+ "exact_match_eval = bind_evaluator(evaluator=exact_match, input_mapping= { \"output\": \"attributes.output.value\", \"expected\": \"Final answer\"})\n",
73
+ "conciseness_evaluator = bind_evaluator(evaluator=conciseness_evaluator, input_mapping={ \"output\": \"attributes.output.value\", \"expected\": \"Final answer\"})\n",
74
+ "results_df = await async_evaluate_dataframe(agents_merged_df, evaluators=[exact_match_eval, conciseness_evaluator])\n"
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "code",
79
+ "execution_count": 34,
80
+ "metadata": {},
81
+ "outputs": [],
82
+ "source": [
83
+ "results_df[\"exact_match\"] = results_df.exact_match_score.apply(json.loads).apply(lambda x : x[\"score\"])\n",
84
+ "results_df[\"conciseness\"] = results_df.conciseness_evaluator_score.apply(json.loads).apply(lambda x : x[\"label\"])\n",
85
+ "results_df[\"agent_type\"] = results_df[\"attributes.smolagents\"].apply(lambda x : \"multi_agent\" if \"managed_agents\" in x else \"llm_agent\")\n",
86
+ "results_filtered_df = results_df[[\"name\", \"span_kind\", \"start_time\", \"context.span_id\", \"context.trace_id\",\"attributes.output.value\", \"task_id\", \"Question\", \"Final answer\", \"agent_type\", \"exact_match_score\", \"conciseness_evaluator_score\", \"exact_match\", \"conciseness\"]]"
87
+ ]
88
+ },
89
+ {
90
+ "cell_type": "code",
91
+ "execution_count": 38,
92
+ "metadata": {},
93
+ "outputs": [
94
+ {
95
+ "name": "stderr",
96
+ "output_type": "stream",
97
+ "text": [
98
+ "/Users/romainfayoux/Documents/Programmation/Final_Assignment_Template/.venv/lib/python3.12/site-packages/phoenix/evals/utils.py:367: FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.\n",
99
+ " result_df = pd.concat(result_dfs, ignore_index=True)\n"
100
+ ]
101
+ }
102
+ ],
103
+ "source": [
104
+ "# Upload results\n",
105
+ "import numpy as np\n",
106
+ "from phoenix.evals.utils import to_annotation_dataframe\n",
107
+ "\n",
108
+ "annotation_df = to_annotation_dataframe(results_filtered_df)\n",
109
+ "annotation_df = annotation_df.replace({np.nan: None})\n",
110
+ "Client().spans.log_span_annotations_dataframe(dataframe=annotation_df)\n"
111
+ ]
112
+ }
113
+ ],
114
+ "metadata": {
115
+ "kernelspec": {
116
+ "display_name": "Final_Assignment_Template",
117
+ "language": "python",
118
+ "name": "python3"
119
+ },
120
+ "language_info": {
121
+ "codemirror_mode": {
122
+ "name": "ipython",
123
+ "version": 3
124
+ },
125
+ "file_extension": ".py",
126
+ "mimetype": "text/x-python",
127
+ "name": "python",
128
+ "nbconvert_exporter": "python",
129
+ "pygments_lexer": "ipython3",
130
+ "version": "3.12.11"
131
+ }
132
+ },
133
+ "nbformat": 4,
134
+ "nbformat_minor": 2
135
+ }
eval/evaluators.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from phoenix.evals import create_evaluator
2
+
3
+ @create_evaluator(name="conciseness_evaluator")
4
+ def conciseness_evaluator(output: str, expected: str):
5
+ ratio = (len(output) / len(expected))
6
+ if ratio < 0.5:
7
+ return "too short"
8
+ elif ratio > 3.0:
9
+ return "too long"
10
+ else:
11
+ return "concise"