Spaces:
Sleeping
Sleeping
fetch relevance from vector store
Browse filesmay be in future have a relevance threshold.
added Qdrant url to env
- Dockerfile +1 -1
- api/db/vector_store.py +2 -1
- api/routes/search.py +3 -3
Dockerfile
CHANGED
|
@@ -17,6 +17,6 @@ COPY api .
|
|
| 17 |
EXPOSE 8080
|
| 18 |
|
| 19 |
ENV OPENAI_API_KEY=zzz
|
| 20 |
-
ENV
|
| 21 |
# Start the application using Uvicorn
|
| 22 |
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"]
|
|
|
|
| 17 |
EXPOSE 8080
|
| 18 |
|
| 19 |
ENV OPENAI_API_KEY=zzz
|
| 20 |
+
ENV QDRANT_URL="https://32f125d3-5ab1-4058-a10a-bd38a1ebd647.us-east-1-0.aws.cloud.qdrant.io"
|
| 21 |
# Start the application using Uvicorn
|
| 22 |
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"]
|
api/db/vector_store.py
CHANGED
|
@@ -4,8 +4,9 @@ from langchain.embeddings import OpenAIEmbeddings
|
|
| 4 |
from langchain.vectorstores import Qdrant
|
| 5 |
|
| 6 |
embeddings = OpenAIEmbeddings()
|
| 7 |
-
client = QdrantClient(url=
|
| 8 |
api_key=os.getenv("QDRANT_API_KEY"))
|
| 9 |
|
| 10 |
def get_instance(collection: str = "test"):
|
| 11 |
return Qdrant(client=client,collection_name=collection,embeddings=embeddings)
|
|
|
|
|
|
| 4 |
from langchain.vectorstores import Qdrant
|
| 5 |
|
| 6 |
embeddings = OpenAIEmbeddings()
|
| 7 |
+
client = QdrantClient(url=os.getenv("QDRANT_URL"),
|
| 8 |
api_key=os.getenv("QDRANT_API_KEY"))
|
| 9 |
|
| 10 |
def get_instance(collection: str = "test"):
|
| 11 |
return Qdrant(client=client,collection_name=collection,embeddings=embeddings)
|
| 12 |
+
|
api/routes/search.py
CHANGED
|
@@ -41,10 +41,10 @@ async def answer(name: str, query: str):
|
|
| 41 |
"""
|
| 42 |
_db = vector_store.get_instance(name)
|
| 43 |
print(query)
|
| 44 |
-
docs = _db.
|
| 45 |
print(docs)
|
| 46 |
-
answer = _chain.run(input_documents=docs, question=query)
|
| 47 |
-
return JSONResponse(status_code=200, content={"answer": answer, "
|
| 48 |
|
| 49 |
async def generate_documents(file: UploadFile, fileName: str):
|
| 50 |
num=0
|
|
|
|
| 41 |
"""
|
| 42 |
_db = vector_store.get_instance(name)
|
| 43 |
print(query)
|
| 44 |
+
docs = _db.similarity_search_with_relevance_scores(query=query)
|
| 45 |
print(docs)
|
| 46 |
+
answer = _chain.run(input_documents=[tup[0] for tup in docs], question=query)
|
| 47 |
+
return JSONResponse(status_code=200, content={"answer": answer, "file_score": [[f"{d[0].metadata['file']} : {d[0].metadata['page']}", d[1]] for d in docs]})
|
| 48 |
|
| 49 |
async def generate_documents(file: UploadFile, fileName: str):
|
| 50 |
num=0
|