Spaces:
Runtime error
Runtime error
added feature llm response text generation
Browse files
app.py
CHANGED
|
@@ -8,6 +8,8 @@ import PyPDF2
|
|
| 8 |
from tqdm.auto import tqdm
|
| 9 |
import math
|
| 10 |
from transformers import pipeline
|
|
|
|
|
|
|
| 11 |
# import json
|
| 12 |
|
| 13 |
# st.config(PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION="python")
|
|
@@ -76,8 +78,10 @@ def get_pinecone_semantic_index(pinecone):
|
|
| 76 |
# st.text(f"Succesfully connected to the pinecone index")
|
| 77 |
return index
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
|
|
|
|
|
|
| 81 |
write a concise summary of the following text delimited by triple backquotes.
|
| 82 |
return your response in bullet points which convers the key points of the text.
|
| 83 |
|
|
@@ -85,11 +89,11 @@ def promt_engineer(text):
|
|
| 85 |
|
| 86 |
BULLET POINT SUMMARY:
|
| 87 |
"""
|
| 88 |
-
|
| 89 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
| 90 |
|
| 91 |
# Generate the prompt
|
| 92 |
-
prompt =
|
| 93 |
|
| 94 |
# Generate the summary
|
| 95 |
summary = summarizer(prompt, max_length=1024, min_length=50)[0]["summary_text"]
|
|
@@ -100,7 +104,38 @@ def promt_engineer(text):
|
|
| 100 |
st.write(summary)
|
| 101 |
st.divider()
|
| 102 |
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
def chat_actions():
|
| 106 |
|
|
@@ -111,7 +146,8 @@ def chat_actions():
|
|
| 111 |
{"role": "user", "content": st.session_state["chat_input"]},
|
| 112 |
)
|
| 113 |
|
| 114 |
-
|
|
|
|
| 115 |
# create the query vector
|
| 116 |
query_vector = query_embedding.tolist()
|
| 117 |
# now query vector database
|
|
@@ -136,13 +172,13 @@ def chat_actions():
|
|
| 136 |
p = math.pow(1024, 2)
|
| 137 |
mbsize = round(len(bytesize) / p, 2)
|
| 138 |
st.write(f"Text lenth of {len(consolidated_text)} characters with {mbsize}MB size")
|
| 139 |
-
summary = promt_engineer(consolidated_text[:1024])
|
| 140 |
|
| 141 |
for res in result['matches']:
|
| 142 |
st.session_state["chat_history"].append(
|
| 143 |
{
|
| 144 |
"role": "assistant",
|
| 145 |
-
"content": f"{
|
| 146 |
}, # This can be replaced with your chat response logic
|
| 147 |
)
|
| 148 |
break;
|
|
|
|
| 8 |
from tqdm.auto import tqdm
|
| 9 |
import math
|
| 10 |
from transformers import pipeline
|
| 11 |
+
from langchain.prompts import ChatPromptTemplate
|
| 12 |
+
import re
|
| 13 |
# import json
|
| 14 |
|
| 15 |
# st.config(PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION="python")
|
|
|
|
| 78 |
# st.text(f"Succesfully connected to the pinecone index")
|
| 79 |
return index
|
| 80 |
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def promt_engineer(text, query):
|
| 84 |
+
summary_prompt_template = """
|
| 85 |
write a concise summary of the following text delimited by triple backquotes.
|
| 86 |
return your response in bullet points which convers the key points of the text.
|
| 87 |
|
|
|
|
| 89 |
|
| 90 |
BULLET POINT SUMMARY:
|
| 91 |
"""
|
| 92 |
+
Load the summarization pipeline with the specified model
|
| 93 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
| 94 |
|
| 95 |
# Generate the prompt
|
| 96 |
+
prompt = summary_prompt_template.format(text=text)
|
| 97 |
|
| 98 |
# Generate the summary
|
| 99 |
summary = summarizer(prompt, max_length=1024, min_length=50)[0]["summary_text"]
|
|
|
|
| 104 |
st.write(summary)
|
| 105 |
st.divider()
|
| 106 |
|
| 107 |
+
GENERATION_PROMPT_TEMPLATE = """
|
| 108 |
+
Instructions:
|
| 109 |
+
-------------------------------------------------------------------------------------------------------------------------------
|
| 110 |
+
Answer the question only based on the below context:
|
| 111 |
+
- You're a Research AI expert in the explaining and reading the research papers.
|
| 112 |
+
- Questions with out-of-context replay with The question is out of context.
|
| 113 |
+
- Always try to provide Keep it simple answers in nice format without incomplete sentence.
|
| 114 |
+
- Give the answer atleast 5 seperate lines addition to the title info.
|
| 115 |
+
- Only If question is relevent to context provide Doc Title: <title> Paragraph: <Paragraph> Page No: <pagenumber>
|
| 116 |
+
-------------------------------------------------------------------------------------------------------------------------------
|
| 117 |
+
{context}
|
| 118 |
+
-------------------------------------------------------------------------------------------------------------------------------
|
| 119 |
+
Answer the question based on the above context: {question}
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
prompt_template = ChatPromptTemplate.from_template(GENERATION_PROMPT_TEMPLATE)
|
| 123 |
+
prompt = prompt_template.format(context=text, question=query)
|
| 124 |
+
response_text = ""
|
| 125 |
+
result = ""
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
llm = HuggingFaceHub(
|
| 129 |
+
repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0.1, "max_new_tokens": 256, "task":"text-generation"}
|
| 130 |
+
)
|
| 131 |
+
response_text = llm.invoke(prompt)
|
| 132 |
+
escaped_query = re.escape(query)
|
| 133 |
+
result = re.split(f'Answer the question based on the above context: {escaped_query}\n',response_text)[-1]
|
| 134 |
+
st.error(f"Error invoke: {e}")
|
| 135 |
+
except Exception as e:
|
| 136 |
+
st.error(f"Error invoke: {e}")
|
| 137 |
+
|
| 138 |
+
return summary, result
|
| 139 |
|
| 140 |
def chat_actions():
|
| 141 |
|
|
|
|
| 146 |
{"role": "user", "content": st.session_state["chat_input"]},
|
| 147 |
)
|
| 148 |
|
| 149 |
+
query = st.session_state["chat_input"]
|
| 150 |
+
query_embedding = model.encode(query)
|
| 151 |
# create the query vector
|
| 152 |
query_vector = query_embedding.tolist()
|
| 153 |
# now query vector database
|
|
|
|
| 172 |
p = math.pow(1024, 2)
|
| 173 |
mbsize = round(len(bytesize) / p, 2)
|
| 174 |
st.write(f"Text lenth of {len(consolidated_text)} characters with {mbsize}MB size")
|
| 175 |
+
summary, response = promt_engineer(consolidated_text[:1024], query)
|
| 176 |
|
| 177 |
for res in result['matches']:
|
| 178 |
st.session_state["chat_history"].append(
|
| 179 |
{
|
| 180 |
"role": "assistant",
|
| 181 |
+
"content": f"{response}",
|
| 182 |
}, # This can be replaced with your chat response logic
|
| 183 |
)
|
| 184 |
break;
|