Romain Fayoux commited on
Commit
279f51f
·
1 Parent(s): 58f5703

Corrected eval to discard exact match as it is not the eval used by the

Browse files
Files changed (1) hide show
  1. eval/eval_notebook.ipynb +33 -12
eval/eval_notebook.ipynb CHANGED
@@ -2,21 +2,30 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 29,
6
  "metadata": {},
7
- "outputs": [],
 
 
 
 
 
 
 
 
 
8
  "source": [
9
  "import pandas as pd\n",
10
  "import json\n",
11
  "from phoenix.client import Client\n",
12
  "\n",
13
  "# Load the existing spans\n",
14
- "spans_df = Client().spans.get_spans_dataframe(project_name=\"default\")"
15
  ]
16
  },
17
  {
18
  "cell_type": "code",
19
- "execution_count": 30,
20
  "metadata": {},
21
  "outputs": [],
22
  "source": [
@@ -26,7 +35,7 @@
26
  },
27
  {
28
  "cell_type": "code",
29
- "execution_count": 31,
30
  "metadata": {},
31
  "outputs": [],
32
  "source": [
@@ -36,14 +45,14 @@
36
  },
37
  {
38
  "cell_type": "code",
39
- "execution_count": 32,
40
  "metadata": {},
41
  "outputs": [
42
  {
43
  "name": "stderr",
44
  "output_type": "stream",
45
  "text": [
46
- "/var/folders/pj/v1zrqj1d10x9_1rd2njh_r_r0000gn/T/ipykernel_36696/3107371246.py:2: SettingWithCopyWarning: \n",
47
  "A value is trying to be set on a copy of a slice from a DataFrame.\n",
48
  "Try using .loc[row_indexer,col_indexer] = value instead\n",
49
  "\n",
@@ -60,9 +69,21 @@
60
  },
61
  {
62
  "cell_type": "code",
63
- "execution_count": 33,
64
  "metadata": {},
65
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
66
  "source": [
67
  "from phoenix.evals.evaluators import bind_evaluator, async_evaluate_dataframe\n",
68
  "from evaluators import conciseness_evaluator\n",
@@ -71,12 +92,12 @@
71
  "# Define the evaluator\n",
72
  "conciseness_evaluator = bind_evaluator(evaluator=conciseness_evaluator, input_mapping={ \"output\": \"attributes.output.value\", \"expected\": \"Final answer\"})\n",
73
  "question_scorer_eval = bind_evaluator(evaluator=question_scorer, input_mapping={ \"output\": \"attributes.output.value\", \"expected\": \"Final answer\"})\n",
74
- "results_df = await async_evaluate_dataframe(agents_merged_df, evaluators=[exact_match_eval, conciseness_evaluator, question_scorer_eval])\n"
75
  ]
76
  },
77
  {
78
  "cell_type": "code",
79
- "execution_count": 34,
80
  "metadata": {},
81
  "outputs": [],
82
  "source": [
@@ -88,7 +109,7 @@
88
  },
89
  {
90
  "cell_type": "code",
91
- "execution_count": 35,
92
  "metadata": {},
93
  "outputs": [
94
  {
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "/Users/romainfayoux/Documents/Programmation/Final_Assignment_Template/.venv/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
+ " from .autonotebook import tqdm as notebook_tqdm\n"
14
+ ]
15
+ }
16
+ ],
17
  "source": [
18
  "import pandas as pd\n",
19
  "import json\n",
20
  "from phoenix.client import Client\n",
21
  "\n",
22
  "# Load the existing spans\n",
23
+ "spans_df = Client().spans.get_spans_dataframe(project_name=\"default\", start_time=\"2025-10-23\")"
24
  ]
25
  },
26
  {
27
  "cell_type": "code",
28
+ "execution_count": 2,
29
  "metadata": {},
30
  "outputs": [],
31
  "source": [
 
35
  },
36
  {
37
  "cell_type": "code",
38
+ "execution_count": 3,
39
  "metadata": {},
40
  "outputs": [],
41
  "source": [
 
45
  },
46
  {
47
  "cell_type": "code",
48
+ "execution_count": 4,
49
  "metadata": {},
50
  "outputs": [
51
  {
52
  "name": "stderr",
53
  "output_type": "stream",
54
  "text": [
55
+ "/var/folders/pj/v1zrqj1d10x9_1rd2njh_r_r0000gn/T/ipykernel_35129/3107371246.py:2: SettingWithCopyWarning: \n",
56
  "A value is trying to be set on a copy of a slice from a DataFrame.\n",
57
  "Try using .loc[row_indexer,col_indexer] = value instead\n",
58
  "\n",
 
69
  },
70
  {
71
  "cell_type": "code",
72
+ "execution_count": null,
73
  "metadata": {},
74
+ "outputs": [
75
+ {
76
+ "ename": "NameError",
77
+ "evalue": "name 'exact_match_eval' is not defined",
78
+ "output_type": "error",
79
+ "traceback": [
80
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
81
+ "\u001b[31mNameError\u001b[39m Traceback (most recent call last)",
82
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 8\u001b[39m\n\u001b[32m 6\u001b[39m conciseness_evaluator = bind_evaluator(evaluator=conciseness_evaluator, input_mapping={ \u001b[33m\"\u001b[39m\u001b[33moutput\u001b[39m\u001b[33m\"\u001b[39m: \u001b[33m\"\u001b[39m\u001b[33mattributes.output.value\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mexpected\u001b[39m\u001b[33m\"\u001b[39m: \u001b[33m\"\u001b[39m\u001b[33mFinal answer\u001b[39m\u001b[33m\"\u001b[39m})\n\u001b[32m 7\u001b[39m question_scorer_eval = bind_evaluator(evaluator=question_scorer, input_mapping={ \u001b[33m\"\u001b[39m\u001b[33moutput\u001b[39m\u001b[33m\"\u001b[39m: \u001b[33m\"\u001b[39m\u001b[33mattributes.output.value\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mexpected\u001b[39m\u001b[33m\"\u001b[39m: \u001b[33m\"\u001b[39m\u001b[33mFinal answer\u001b[39m\u001b[33m\"\u001b[39m})\n\u001b[32m----> \u001b[39m\u001b[32m8\u001b[39m results_df = \u001b[38;5;28;01mawait\u001b[39;00m async_evaluate_dataframe(agents_merged_df, evaluators=[\u001b[43mexact_match_eval\u001b[49m, conciseness_evaluator, question_scorer_eval])\n",
83
+ "\u001b[31mNameError\u001b[39m: name 'exact_match_eval' is not defined"
84
+ ]
85
+ }
86
+ ],
87
  "source": [
88
  "from phoenix.evals.evaluators import bind_evaluator, async_evaluate_dataframe\n",
89
  "from evaluators import conciseness_evaluator\n",
 
92
  "# Define the evaluator\n",
93
  "conciseness_evaluator = bind_evaluator(evaluator=conciseness_evaluator, input_mapping={ \"output\": \"attributes.output.value\", \"expected\": \"Final answer\"})\n",
94
  "question_scorer_eval = bind_evaluator(evaluator=question_scorer, input_mapping={ \"output\": \"attributes.output.value\", \"expected\": \"Final answer\"})\n",
95
+ "results_df = await async_evaluate_dataframe(agents_merged_df, evaluators=[conciseness_evaluator, question_scorer_eval])\n"
96
  ]
97
  },
98
  {
99
  "cell_type": "code",
100
+ "execution_count": null,
101
  "metadata": {},
102
  "outputs": [],
103
  "source": [
 
109
  },
110
  {
111
  "cell_type": "code",
112
+ "execution_count": null,
113
  "metadata": {},
114
  "outputs": [
115
  {