moazzamdev commited on
Commit
fbbab3d
Β·
verified Β·
1 Parent(s): 734215d

Update page1.py

Browse files
Files changed (1) hide show
  1. page1.py +30 -76
page1.py CHANGED
@@ -12,18 +12,15 @@ import io
12
  import time
13
  from PIL import Image
14
  import os
 
15
  # Set your Google API key here
16
  GOOGLE_API_KEY = os.environ.get("api_key")
17
 
18
 
19
  def convert_to_base64(uploaded_file):
20
- """Convert uploaded image to Base64 format (supports JPEG and PNG)"""
21
  image = Image.open(uploaded_file)
22
  buffered = io.BytesIO()
23
-
24
- # Preserve format (default to PNG if unknown)
25
  format = image.format if image.format in ["JPEG", "PNG"] else "PNG"
26
-
27
  image.save(buffered, format=format)
28
  return base64.b64encode(buffered.getvalue()).decode("utf-8")
29
 
@@ -31,14 +28,12 @@ def convert_to_base64(uploaded_file):
31
  def text():
32
  st.title("Gemini 2.0 Thinking Experimental")
33
  st.sidebar.title("Capabilities:")
34
-
35
- # Add bullet points
36
  st.sidebar.markdown("""
37
- - **Text Queries**
38
- - **Visual Queries**
39
- - **PDF Support**
40
-
41
- """)
42
  st.markdown("""
43
  <style>
44
  .anim-typewriter {
@@ -73,7 +68,6 @@ def text():
73
  </style>
74
  """, unsafe_allow_html=True)
75
 
76
- # Initialize session state
77
  if "messages" not in st.session_state:
78
  st.session_state.messages = []
79
  st.session_state.chat_history = StreamlitChatMessageHistory()
@@ -82,13 +76,13 @@ def text():
82
  memory_key="history",
83
  chat_memory=st.session_state.chat_history
84
  )
85
- system_prompt = (
86
- "You are a compassionate and emotionally intelligent AI assistant trained in cognitive behavioral therapy (CBT), "
87
- "mindfulness, and active listening. You provide supportive, empathetic responses without making medical diagnoses. "
88
- "Use a warm tone and guide users to explore their feelings, reframe thoughts, and reflect gently."
89
- )
90
- st.session_state.chat_history.add_message(SystemMessage(content=system_prompt))
91
- # Initialize Gemini model
92
  llm = ChatGoogleGenerativeAI(
93
  model="gemini-2.5-flash",
94
  google_api_key=GOOGLE_API_KEY,
@@ -96,24 +90,15 @@ def text():
96
  streaming=True,
97
  timeout=120,
98
  max_retries=6
99
-
100
  )
101
- # Add system prompt (psychological assistant)
102
-
103
-
104
- # Add SystemMessage to chat history
105
- #st.session_state.chat_history.add_message(SystemMessage(content=system_prompt))
106
- # Display chat messages
107
  chat_container = st.container()
108
  with chat_container:
109
- # Show initial bot message
110
  if len(st.session_state.messages) == 0:
111
  animated_text = '<div class="anim-typewriter">Hello πŸ‘‹, how may I assist you today?</div>'
112
- # st.chat_message("assistant").markdown(animated_text, unsafe_allow_html=True)
113
  st.session_state.messages.append({"role": "assistant", "content": "Hello πŸ‘‹, how may I assist you today?"})
114
 
115
- # Display historical messages
116
- for message in st.session_state.messages[0:]: # Skip first static message
117
  if message["role"] == "user":
118
  if message.get("image"):
119
  st.chat_message("user", avatar="πŸ§‘").markdown(
@@ -125,7 +110,6 @@ def text():
125
  else:
126
  st.chat_message("assistant", avatar="πŸ€–").markdown(message["content"])
127
 
128
- # Chat input with multimodal support
129
  user_input = st.chat_input("Say something", accept_file=True, file_type=["png", "jpg", "jpeg", "pdf"])
130
 
131
  if user_input:
@@ -133,22 +117,18 @@ def text():
133
  file_name = ""
134
  image_base64 = convert_to_base64("pdf_icon.png")
135
  image_url = f"data:image/jpeg;base64,{image_base64}"
136
- # Process user input
137
- #image_url = ""
138
  message_content = [{"type": "text", "text": user_input.text}]
139
  files = user_input["files"]
140
 
141
-
142
  if files:
143
  file_type = files[0].type
144
-
145
 
146
  if file_type in ["image/png", "image/jpg", "image/jpeg"]:
147
  uploaded_file = user_input["files"][0]
148
  image_base64 = convert_to_base64(uploaded_file)
149
  image_url = f"data:image/jpeg;base64,{image_base64}"
150
-
151
  message_content.append({"type": "image_url", "image_url": image_url})
 
152
  text = ""
153
  if file_type == "application/pdf":
154
  uploaded_file = user_input["files"][0]
@@ -156,12 +136,10 @@ def text():
156
  pdf_reader = PdfReader(uploaded_file)
157
  for page in pdf_reader.pages:
158
  text += page.extract_text()
159
- #st.sidebar.write(text)
160
- prompt = "this is pdf data: \n"+text +"this is user asking about pdf:"+user_input.text
161
  message_content = [{"type": "text", "text": prompt}]
162
  message_content.append({"type": "text", "text": file_name})
163
- #message_content.append({"type": "image_url", "image_url": image_url})
164
- # Add user message to UI
165
  with chat_container:
166
  if file_type:
167
  st.chat_message("user", avatar="πŸ§‘").markdown(
@@ -174,34 +152,30 @@ def text():
174
  """,
175
  unsafe_allow_html=True
176
  )
177
-
178
  else:
179
  st.chat_message("user", avatar="πŸ§‘").markdown(user_input.text)
180
 
181
- # Store in session state
182
  st.session_state.messages.append({
183
  "role": "user",
184
  "content": user_input.text,
185
  "image": image_url if user_input["files"] else "",
186
- "file_name" : file_name,
187
- "file_type" : file_type
188
  })
189
 
190
- # Create LangChain message
191
  user_message = HumanMessage(content=message_content)
192
  st.session_state.chat_history.add_message(user_message)
193
 
194
- # Generate streaming response
195
  history = st.session_state.chat_history.messages
 
 
 
196
  typing_container = st.empty()
197
 
198
- def stream_generator(history, user_message):
199
- # Placeholder for "Thinking..." and "Typing..."
200
  typing_container = st.empty()
201
-
202
- # Show "Thinking..." first
203
  typing_container.markdown('<p class="fade-text">Thinking...</p>', unsafe_allow_html=True)
204
-
205
  st.markdown("""
206
  <style>
207
  @keyframes fade {
@@ -218,17 +192,11 @@ def text():
218
  </style>
219
  """, unsafe_allow_html=True)
220
 
221
- response = llm.stream(history + [user_message])
222
-
223
- # Buffer for partial words
224
  buffer = ""
225
-
226
- # Flag to change message
227
  first_chunk_received = False
228
-
229
- # Pause settings
230
  PAUSE_AFTER = {".", "!", "?", ",", ";", ":"}
231
- PAUSE_MULTIPLIER = 2.5 # Pause longer for punctuation
232
 
233
  for chunk in response:
234
  if not first_chunk_received:
@@ -239,51 +207,37 @@ def text():
239
  content = buffer + chunk.content
240
  words = content.split(' ')
241
 
242
- # Check if last word is complete
243
  if not content.endswith(' '):
244
  buffer = words.pop()
245
  else:
246
  buffer = ""
247
 
248
  for word in words:
249
- yield word + ' ' # Stream word-by-word
250
-
251
- # Add delay for natural pauses
252
  base_delay = 0.03
253
  last_char = word[-1] if word else ''
254
  time.sleep(base_delay * PAUSE_MULTIPLIER if last_char in PAUSE_AFTER else base_delay)
255
 
256
- # Yield any remaining content in buffer
257
  if buffer:
258
  yield buffer
259
  time.sleep(0.03)
260
 
261
- # Clear "Typing..." message after response finishes
262
  typing_container.empty()
263
 
264
- # Generate streaming response
265
  with st.chat_message("assistant", avatar="πŸ€–"):
266
-
267
  full_response = st.write_stream(
268
- stream_generator(
269
- st.session_state.chat_history.messages,
270
- user_message
271
- )
272
  )
 
273
 
274
- typing_container.empty() # Remove status message
275
-
276
- # Update session state
277
  st.session_state.messages.append({
278
  "role": "assistant",
279
  "content": full_response
280
  })
281
 
282
- # Update conversation memory
283
  ai_message = AIMessage(content=full_response)
284
  st.session_state.chat_history.add_message(ai_message)
285
  st.session_state.memory.save_context(
286
  {"input": user_message.content},
287
  {"output": ai_message.content}
288
  )
289
- #st.sidebar.write(user_message)
 
12
  import time
13
  from PIL import Image
14
  import os
15
+
16
  # Set your Google API key here
17
  GOOGLE_API_KEY = os.environ.get("api_key")
18
 
19
 
20
  def convert_to_base64(uploaded_file):
 
21
  image = Image.open(uploaded_file)
22
  buffered = io.BytesIO()
 
 
23
  format = image.format if image.format in ["JPEG", "PNG"] else "PNG"
 
24
  image.save(buffered, format=format)
25
  return base64.b64encode(buffered.getvalue()).decode("utf-8")
26
 
 
28
  def text():
29
  st.title("Gemini 2.0 Thinking Experimental")
30
  st.sidebar.title("Capabilities:")
 
 
31
  st.sidebar.markdown("""
32
+ - **Text Queries**
33
+ - **Visual Queries**
34
+ - **PDF Support**
35
+ """)
36
+
37
  st.markdown("""
38
  <style>
39
  .anim-typewriter {
 
68
  </style>
69
  """, unsafe_allow_html=True)
70
 
 
71
  if "messages" not in st.session_state:
72
  st.session_state.messages = []
73
  st.session_state.chat_history = StreamlitChatMessageHistory()
 
76
  memory_key="history",
77
  chat_memory=st.session_state.chat_history
78
  )
79
+ system_prompt = (
80
+ "You are a compassionate and emotionally intelligent AI assistant trained in cognitive behavioral therapy (CBT), "
81
+ "mindfulness, and active listening. You provide supportive, empathetic responses without making medical diagnoses. "
82
+ "Use a warm tone and guide users to explore their feelings, reframe thoughts, and reflect gently."
83
+ )
84
+ st.session_state.chat_history.add_message(SystemMessage(content=system_prompt))
85
+
86
  llm = ChatGoogleGenerativeAI(
87
  model="gemini-2.5-flash",
88
  google_api_key=GOOGLE_API_KEY,
 
90
  streaming=True,
91
  timeout=120,
92
  max_retries=6
 
93
  )
94
+
 
 
 
 
 
95
  chat_container = st.container()
96
  with chat_container:
 
97
  if len(st.session_state.messages) == 0:
98
  animated_text = '<div class="anim-typewriter">Hello πŸ‘‹, how may I assist you today?</div>'
 
99
  st.session_state.messages.append({"role": "assistant", "content": "Hello πŸ‘‹, how may I assist you today?"})
100
 
101
+ for message in st.session_state.messages:
 
102
  if message["role"] == "user":
103
  if message.get("image"):
104
  st.chat_message("user", avatar="πŸ§‘").markdown(
 
110
  else:
111
  st.chat_message("assistant", avatar="πŸ€–").markdown(message["content"])
112
 
 
113
  user_input = st.chat_input("Say something", accept_file=True, file_type=["png", "jpg", "jpeg", "pdf"])
114
 
115
  if user_input:
 
117
  file_name = ""
118
  image_base64 = convert_to_base64("pdf_icon.png")
119
  image_url = f"data:image/jpeg;base64,{image_base64}"
 
 
120
  message_content = [{"type": "text", "text": user_input.text}]
121
  files = user_input["files"]
122
 
 
123
  if files:
124
  file_type = files[0].type
 
125
 
126
  if file_type in ["image/png", "image/jpg", "image/jpeg"]:
127
  uploaded_file = user_input["files"][0]
128
  image_base64 = convert_to_base64(uploaded_file)
129
  image_url = f"data:image/jpeg;base64,{image_base64}"
 
130
  message_content.append({"type": "image_url", "image_url": image_url})
131
+
132
  text = ""
133
  if file_type == "application/pdf":
134
  uploaded_file = user_input["files"][0]
 
136
  pdf_reader = PdfReader(uploaded_file)
137
  for page in pdf_reader.pages:
138
  text += page.extract_text()
139
+ prompt = "this is pdf data: \n" + text + "this is user asking about pdf:" + user_input.text
 
140
  message_content = [{"type": "text", "text": prompt}]
141
  message_content.append({"type": "text", "text": file_name})
142
+
 
143
  with chat_container:
144
  if file_type:
145
  st.chat_message("user", avatar="πŸ§‘").markdown(
 
152
  """,
153
  unsafe_allow_html=True
154
  )
 
155
  else:
156
  st.chat_message("user", avatar="πŸ§‘").markdown(user_input.text)
157
 
 
158
  st.session_state.messages.append({
159
  "role": "user",
160
  "content": user_input.text,
161
  "image": image_url if user_input["files"] else "",
162
+ "file_name": file_name,
163
+ "file_type": file_type
164
  })
165
 
 
166
  user_message = HumanMessage(content=message_content)
167
  st.session_state.chat_history.add_message(user_message)
168
 
169
+ # Ensure valid message history (SystemMessage only at index 0)
170
  history = st.session_state.chat_history.messages
171
+ valid_history = [msg for msg in history if not isinstance(msg, SystemMessage)]
172
+ valid_history = [history[0]] + valid_history # Keep the first SystemMessage only
173
+
174
  typing_container = st.empty()
175
 
176
+ def stream_generator(valid_history, user_message):
 
177
  typing_container = st.empty()
 
 
178
  typing_container.markdown('<p class="fade-text">Thinking...</p>', unsafe_allow_html=True)
 
179
  st.markdown("""
180
  <style>
181
  @keyframes fade {
 
192
  </style>
193
  """, unsafe_allow_html=True)
194
 
195
+ response = llm.stream(valid_history + [user_message])
 
 
196
  buffer = ""
 
 
197
  first_chunk_received = False
 
 
198
  PAUSE_AFTER = {".", "!", "?", ",", ";", ":"}
199
+ PAUSE_MULTIPLIER = 2.5
200
 
201
  for chunk in response:
202
  if not first_chunk_received:
 
207
  content = buffer + chunk.content
208
  words = content.split(' ')
209
 
 
210
  if not content.endswith(' '):
211
  buffer = words.pop()
212
  else:
213
  buffer = ""
214
 
215
  for word in words:
216
+ yield word + ' '
 
 
217
  base_delay = 0.03
218
  last_char = word[-1] if word else ''
219
  time.sleep(base_delay * PAUSE_MULTIPLIER if last_char in PAUSE_AFTER else base_delay)
220
 
 
221
  if buffer:
222
  yield buffer
223
  time.sleep(0.03)
224
 
 
225
  typing_container.empty()
226
 
 
227
  with st.chat_message("assistant", avatar="πŸ€–"):
 
228
  full_response = st.write_stream(
229
+ stream_generator(valid_history, user_message)
 
 
 
230
  )
231
+ typing_container.empty()
232
 
 
 
 
233
  st.session_state.messages.append({
234
  "role": "assistant",
235
  "content": full_response
236
  })
237
 
 
238
  ai_message = AIMessage(content=full_response)
239
  st.session_state.chat_history.add_message(ai_message)
240
  st.session_state.memory.save_context(
241
  {"input": user_message.content},
242
  {"output": ai_message.content}
243
  )