Spaces:
Sleeping
Sleeping
Update rag_utils.py
Browse files- rag_utils.py +19 -6
rag_utils.py
CHANGED
|
@@ -2,10 +2,11 @@ import faiss
|
|
| 2 |
import pickle
|
| 3 |
import numpy as np
|
| 4 |
import torch
|
|
|
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 7 |
|
| 8 |
-
def load_faiss_index(index_path="faiss_index
|
| 9 |
index = faiss.read_index(index_path)
|
| 10 |
with open(doc_path, "rb") as f:
|
| 11 |
documents = pickle.load(f)
|
|
@@ -19,17 +20,29 @@ def query_index(question, index, documents, model, k=3):
|
|
| 19 |
_, indices = index.search(np.array(question_embedding).astype("float32"), k)
|
| 20 |
return [documents[i] for i in indices[0]]
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
def generate_answer(question, context):
|
| 23 |
model_id = "Salesforce/codegen-350M-mono"
|
| 24 |
|
| 25 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 26 |
tokenizer.pad_token = tokenizer.eos_token
|
| 27 |
-
|
| 28 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
| 29 |
|
| 30 |
-
prompt = f"Voici
|
| 31 |
-
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
| 32 |
-
outputs = model.generate(**inputs, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
|
| 33 |
-
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 34 |
|
|
|
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import pickle
|
| 3 |
import numpy as np
|
| 4 |
import torch
|
| 5 |
+
import re
|
| 6 |
from sentence_transformers import SentenceTransformer
|
| 7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 8 |
|
| 9 |
+
def load_faiss_index(index_path="faiss_index.faiss", doc_path="documents.pkl"):
|
| 10 |
index = faiss.read_index(index_path)
|
| 11 |
with open(doc_path, "rb") as f:
|
| 12 |
documents = pickle.load(f)
|
|
|
|
| 20 |
_, indices = index.search(np.array(question_embedding).astype("float32"), k)
|
| 21 |
return [documents[i] for i in indices[0]]
|
| 22 |
|
| 23 |
+
def nettoyer_context(context):
|
| 24 |
+
context = re.sub(r"\[\'(.*?)\'\]", r"\1", context) # nettoie ['...']
|
| 25 |
+
context = context.replace("None", "") # supprime les None
|
| 26 |
+
return context
|
| 27 |
+
|
| 28 |
def generate_answer(question, context):
|
| 29 |
model_id = "Salesforce/codegen-350M-mono"
|
| 30 |
|
| 31 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 32 |
tokenizer.pad_token = tokenizer.eos_token
|
|
|
|
| 33 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
| 34 |
|
| 35 |
+
prompt = f"""Voici des informations sur des établissements et formations en lien avec les métiers que tu recherches :
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
+
{context}
|
| 38 |
|
| 39 |
+
Formule ta réponse comme si tu étais un conseiller d’orientation bienveillant, qui s’adresse à un·e élève.
|
| 40 |
+
Rédige de manière fluide et naturelle, en expliquant les formations ou débouchés possibles, sans utiliser de listes brutes.
|
| 41 |
+
|
| 42 |
+
Question : {question}
|
| 43 |
+
Réponse :
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
| 47 |
+
outputs = model.generate(**inputs, max_new_tokens=256, pad_token_id=tokenizer.eos_token_id)
|
| 48 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|