Pranjal Gupta commited on
Commit
75c55bc
·
1 Parent(s): 52e00f2

token added

Browse files
Files changed (1) hide show
  1. app.py +16 -5
app.py CHANGED
@@ -68,7 +68,18 @@ def process_pdf(file_path):
68
  gr.Info("PDF processed and ready for questions!")
69
 
70
  # Your existing functions
71
- def using_ollama_model(retriever, query, results, conversation_history):
 
 
 
 
 
 
 
 
 
 
 
72
  history_text = ""
73
  for item in conversation_history:
74
  if "question" in item and item["question"]:
@@ -111,7 +122,7 @@ def using_ollama_model(retriever, query, results, conversation_history):
111
 
112
  return answer
113
 
114
- def retrievingReponse(docId, query, conversation_history):
115
  retriever = vectorDB.as_retriever(
116
  search_type="similarity",
117
  search_kwargs={
@@ -130,11 +141,11 @@ def retrievingReponse(docId, query, conversation_history):
130
  unique_results.append(ans)
131
  seen_texts.add(result.page_content)
132
 
133
- llm_result = using_ollama_model(retriever, query, results, conversation_history)
134
  return llm_result
135
 
136
  # The revised Gradio wrapper function
137
- def gradio_rag_wrapper(message, history):
138
 
139
  print(history)
140
  # Check if a file has been uploaded
@@ -162,7 +173,7 @@ def gradio_rag_wrapper(message, history):
162
  rag_history.append({"question": user_text, "answer": bot_msg})
163
 
164
  docId = "42" # Use the docId from the uploaded file
165
- response = retrievingReponse(docId, text_query, rag_history)
166
 
167
  return response
168
 
 
68
  gr.Info("PDF processed and ready for questions!")
69
 
70
  # Your existing functions
71
+ def using_ollama_model(retriever, query, results, conversation_history, token):
72
+ try:
73
+ if token:
74
+ gr.Info("Attempting to log in to Hugging Face...")
75
+ login(token=token)
76
+ gr.Info("Login successful!")
77
+ else:
78
+ gr.Warning("No Hugging Face token provided. Gated models may not be accessible.")
79
+ except Exception as e:
80
+ gr.Error(f"Hugging Face login failed: {e}")
81
+ return "An error occurred during authentication. Please check your token and try again."
82
+
83
  history_text = ""
84
  for item in conversation_history:
85
  if "question" in item and item["question"]:
 
122
 
123
  return answer
124
 
125
+ def retrievingReponse(docId, query, conversation_history, token):
126
  retriever = vectorDB.as_retriever(
127
  search_type="similarity",
128
  search_kwargs={
 
141
  unique_results.append(ans)
142
  seen_texts.add(result.page_content)
143
 
144
+ llm_result = using_ollama_model(retriever, query, results, conversation_history, token)
145
  return llm_result
146
 
147
  # The revised Gradio wrapper function
148
+ def gradio_rag_wrapper(message, history, token):
149
 
150
  print(history)
151
  # Check if a file has been uploaded
 
173
  rag_history.append({"question": user_text, "answer": bot_msg})
174
 
175
  docId = "42" # Use the docId from the uploaded file
176
+ response = retrievingReponse(docId, text_query, rag_history, token)
177
 
178
  return response
179