import streamlit as st from PyPDF2 import PdfReader from langchain_core.messages import HumanMessage, AIMessage from langchain_core.messages import SystemMessage from langchain_google_genai import ChatGoogleGenerativeAI from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.memory import ConversationSummaryMemory from langchain.memory.chat_message_histories import StreamlitChatMessageHistory import base64 import io import time from PIL import Image import os # Set your Google API key here GOOGLE_API_KEY = os.environ.get("api_key") def convert_to_base64(uploaded_file): image = Image.open(uploaded_file) buffered = io.BytesIO() format = image.format if image.format in ["JPEG", "PNG"] else "PNG" image.save(buffered, format=format) return base64.b64encode(buffered.getvalue()).decode("utf-8") def text(): st.title("Gemini Psychology Demo") st.sidebar.title("Capabilities:") st.sidebar.markdown(""" - **Text Queries** - **Visual Queries** - **PDF Support** """) st.markdown(""" """, unsafe_allow_html=True) if "messages" not in st.session_state: st.session_state.messages = [] st.session_state.chat_history = StreamlitChatMessageHistory() st.session_state.memory = ConversationSummaryMemory( llm=ChatGoogleGenerativeAI(model="gemini-2.5-flash", google_api_key=GOOGLE_API_KEY), memory_key="history", chat_memory=st.session_state.chat_history ) system_prompt = ( "You are a compassionate and emotionally intelligent AI assistant trained in cognitive behavioral therapy (CBT), " "mindfulness, and active listening. You provide supportive, empathetic responses without making medical diagnoses. " "Use a warm tone and guide users to explore their feelings, reframe thoughts, and reflect gently." ) st.session_state.chat_history.add_message(SystemMessage(content=system_prompt)) llm = ChatGoogleGenerativeAI( model="gemini-2.5-flash", google_api_key=GOOGLE_API_KEY, temperature=0.3, streaming=True, timeout=120, max_retries=6 ) chat_container = st.container() with chat_container: if len(st.session_state.messages) == 0: animated_text = '
Thinking...
', unsafe_allow_html=True) st.markdown(""" """, unsafe_allow_html=True) response = llm.stream(valid_history + [user_message]) buffer = "" first_chunk_received = False PAUSE_AFTER = {".", "!", "?", ",", ";", ":"} PAUSE_MULTIPLIER = 2.5 for chunk in response: if not first_chunk_received: typing_container.empty() typing_container.markdown('Typing...
', unsafe_allow_html=True) first_chunk_received = True content = buffer + chunk.content words = content.split(' ') if not content.endswith(' '): buffer = words.pop() else: buffer = "" for word in words: yield word + ' ' base_delay = 0.03 last_char = word[-1] if word else '' time.sleep(base_delay * PAUSE_MULTIPLIER if last_char in PAUSE_AFTER else base_delay) if buffer: yield buffer time.sleep(0.03) typing_container.empty() with st.chat_message("assistant", avatar="π€"): full_response = st.write_stream( stream_generator(valid_history, user_message) ) typing_container.empty() st.session_state.messages.append({ "role": "assistant", "content": full_response }) ai_message = AIMessage(content=full_response) st.session_state.chat_history.add_message(ai_message) st.session_state.memory.save_context( {"input": user_message.content}, {"output": ai_message.content} )