Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
mvectors
Browse files
semantic_search/llm_eval.py
CHANGED
|
@@ -80,8 +80,13 @@ def eval(question, answers):
|
|
| 80 |
for idx,i in enumerate(answers[0]['answer']):
|
| 81 |
if('relevant' in response[idx]):
|
| 82 |
relevance = response[idx]['relevant']
|
|
|
|
|
|
|
|
|
|
| 83 |
if('score' in response[idx]):
|
| 84 |
score_ = response[idx]['relevant']
|
|
|
|
|
|
|
| 85 |
i['relevant'] = relevance
|
| 86 |
llm_scores.append(score_)
|
| 87 |
current_scores.append(i['score'])
|
|
|
|
| 80 |
for idx,i in enumerate(answers[0]['answer']):
|
| 81 |
if('relevant' in response[idx]):
|
| 82 |
relevance = response[idx]['relevant']
|
| 83 |
+
else:
|
| 84 |
+
relevance = True
|
| 85 |
+
|
| 86 |
if('score' in response[idx]):
|
| 87 |
score_ = response[idx]['relevant']
|
| 88 |
+
else:
|
| 89 |
+
score_ = 0.0
|
| 90 |
i['relevant'] = relevance
|
| 91 |
llm_scores.append(score_)
|
| 92 |
current_scores.append(i['score'])
|