Spaces:
Sleeping
Sleeping
| import os | |
| import faiss | |
| import pickle | |
| import numpy as np | |
| import torch | |
| from sentence_transformers import SentenceTransformer | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| def load_faiss_index(index_path="faiss_index/faiss_index.faiss", doc_path="faiss_index/documents.pkl"): | |
| index = faiss.read_index(index_path) | |
| with open(doc_path, "rb") as f: | |
| documents = pickle.load(f) | |
| return index, documents | |
| def get_embedding_model(): | |
| # Pas besoin de token ici, modèle public | |
| print("✅ Chargement de l'encodeur multi-qa-MiniLM-L6-cos-v1") | |
| return SentenceTransformer("sentence-transformers/multi-qa-MiniLM-L6-cos-v1") | |
| def query_index(question, index, documents, model, k=3): | |
| question_embedding = model.encode([question]) | |
| _, indices = index.search(np.array(question_embedding).astype("float32"), k) | |
| return [documents[i] for i in indices[0]] | |
| def generate_answer(question, context): | |
| token = os.getenv("HUGGINGFACE") # requis pour Mistral | |
| model_id = "mgoogle/flan-t5-base" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id, token=token) | |
| tokenizer.pad_token = tokenizer.eos_token | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| token=token, | |
| device_map="auto", | |
| torch_dtype=torch.float16 | |
| ) | |
| prompt = f"Voici un contexte :\n{context}\n\nQuestion : {question}\nRéponse :" | |
| inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(model.device) | |
| outputs = model.generate(**inputs, max_new_tokens=256, pad_token_id=tokenizer.eos_token_id) | |
| print("🔍 Contexte utilisé pour la génération :") | |
| print(context[:500]) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |