Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from PIL import Image | |
| import random | |
| import time | |
| from dotenv import load_dotenv | |
| import pickle | |
| from huggingface_hub import Repository | |
| from PyPDF2 import PdfReader | |
| from streamlit_extras.add_vertical_space import add_vertical_space | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain.embeddings.openai import OpenAIEmbeddings | |
| from langchain.vectorstores import FAISS | |
| from langchain.llms import OpenAI | |
| from langchain.chains.question_answering import load_qa_chain | |
| from langchain.callbacks import get_openai_callback | |
| import os | |
| import uuid | |
| import json | |
| import chromadb | |
| import pandas as pd | |
| import pydeck as pdk | |
| from urllib.error import URLError | |
| # Initialize session state variables | |
| if 'chat_history_page1' not in st.session_state: | |
| st.session_state['chat_history_page1'] = [] | |
| if 'chat_history_page2' not in st.session_state: | |
| st.session_state['chat_history_page2'] = [] | |
| if 'chat_history_page3' not in st.session_state: | |
| st.session_state['chat_history_page3'] = [] | |
| # This session ID will be unique per user session and consistent across all pages. | |
| if 'session_id' not in st.session_state: | |
| st.session_state['session_id'] = str(uuid.uuid4()) | |
| # Step 1: Clone the Dataset Repository | |
| repo = Repository( | |
| local_dir="Private_Book", # Local directory to clone the repository | |
| repo_type="dataset", # Specify that this is a dataset repository | |
| clone_from="Anne31415/Private_Book", # Replace with your repository URL | |
| token=os.environ["HUB_TOKEN"] # Use the secret token to authenticate | |
| ) | |
| repo.git_pull() # Pull the latest changes (if any) | |
| # Step 1: Clone the ChatSet Repository - save all the chats anonymously | |
| repo2 = Repository( | |
| local_dir="Chat_Store", # Local directory to clone the repository | |
| repo_type="dataset", # Specify that this is a dataset repository | |
| clone_from="Anne31415/Chat_Store", # Replace with your repository URL | |
| token=os.environ["HUB_TOKEN"] # Use the secret token to authenticate | |
| ) | |
| repo2.git_pull() # Pull the latest changes (if any) | |
| # Step 2: Load the PDF File | |
| pdf_path = "Private_Book/KH_Reform230124.pdf" # Replace with your PDF file path | |
| pdf_path2 = "Private_Book/Buch_23012024.pdf" | |
| pdf_path3 = "Private_Book/Kosten_Strukturdaten_RAG_vorbereited.pdf" | |
| api_key = os.getenv("OPENAI_API_KEY") | |
| # Retrieve the API key from st.secrets | |
| def load_vector_store(file_path, store_name, force_reload=False): | |
| local_repo_path = "Private_Book" | |
| vector_store_path = os.path.join(local_repo_path, f"{store_name}.pkl") | |
| # Check if vector store already exists and force_reload is False | |
| if not force_reload and os.path.exists(vector_store_path): | |
| with open(vector_store_path, "rb") as f: | |
| VectorStore = pickle.load(f) | |
| #st.text(f"Loaded existing vector store from {vector_store_path}") | |
| else: | |
| # Load and process the PDF, then create the vector store | |
| text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=100, length_function=len) | |
| text = load_pdf_text(file_path) | |
| chunks = text_splitter.split_text(text=text) | |
| embeddings = OpenAIEmbeddings() | |
| VectorStore = FAISS.from_texts(chunks, embedding=embeddings) | |
| # Serialize the vector store | |
| with open(vector_store_path, "wb") as f: | |
| pickle.dump(VectorStore, f) | |
| #st.text(f"Created and saved vector store at {vector_store_path}") | |
| # Change working directory for Git operations | |
| original_dir = os.getcwd() | |
| os.chdir(local_repo_path) | |
| try: | |
| # Check current working directory and list files for debugging | |
| #st.text(f"Current working directory: {os.getcwd()}") | |
| #st.text(f"Files in current directory: {os.listdir()}") | |
| # Adjusted file path for Git command | |
| repo.git_add(f"{store_name}.pkl") # Use just the file name | |
| repo.git_commit(f"Update vector store: {store_name}") | |
| repo.git_push() | |
| except Exception as e: | |
| st.error(f"Error during Git operations: {e}") | |
| finally: | |
| # Change back to the original directory | |
| os.chdir(original_dir) | |
| return VectorStore | |
| # Utility function to load text from a PDF | |
| def load_pdf_text(file_path): | |
| pdf_reader = PdfReader(file_path) | |
| text = "" | |
| for page in pdf_reader.pages: | |
| text += page.extract_text() or "" # Add fallback for pages where text extraction fails | |
| return text | |
| # Utility function to load text from a PDF and split it into pages | |
| def load_pdf_text_by_page(file_path): | |
| pdf_reader = PdfReader(file_path) | |
| pages_text = [] | |
| for page in pdf_reader.pages: | |
| # Extract text for each page and add it to the list | |
| page_text = page.extract_text() or "" # Add fallback for pages where text extraction fails | |
| pages_text.append(page_text) | |
| return pages_text | |
| # Use the new function to get a list of texts, each representing a page | |
| pdf_pages = load_pdf_text_by_page(pdf_path3) | |
| def load_chatbot(): | |
| #return load_qa_chain(llm=OpenAI(), chain_type="stuff") | |
| return load_qa_chain(llm=OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff") | |
| def display_chat_history(chat_history): | |
| for chat in chat_history: | |
| background_color = "#ffeecf" if chat[2] == "new" else "#ffeecf" if chat[0] == "User" else "#ffeecf" | |
| st.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True) | |
| def handle_no_answer(response): | |
| no_answer_phrases = [ | |
| "ich weiß es nicht", | |
| "ich weiß nicht", | |
| "ich bin mir nicht sicher", | |
| "es wird nicht erwähnt", | |
| "Leider kann ich diese Frage nicht beantworten", | |
| "kann ich diese Frage nicht beantworten", | |
| "ich kann diese Frage nicht beantworten", | |
| "ich kann diese Frage leider nicht beantworten", | |
| "keine information", | |
| "das ist unklar", | |
| "da habe ich keine antwort", | |
| "das kann ich nicht beantworten", | |
| "i don't know", | |
| "i am not sure", | |
| "it is not mentioned", | |
| "no information", | |
| "that is unclear", | |
| "i have no answer", | |
| "i cannot answer that", | |
| "unable to provide an answer", | |
| "not enough context", | |
| "Sorry, I do not have enough information", | |
| "I do not have enough information", | |
| "I don't have enough information", | |
| "Sorry, I don't have enough context to answer that question.", | |
| "I don't have enough context to answer that question.", | |
| "to answer that question.", | |
| "Sorry", | |
| "I'm sorry", | |
| "I don't understand the question", | |
| "I don't understand" | |
| ] | |
| alternative_responses = [ | |
| "Hmm, das ist eine knifflige Frage. Lass uns das gemeinsam erkunden. Kannst du mehr Details geben?", | |
| "Interessante Frage! Ich bin mir nicht sicher, aber wir können es herausfinden. Hast du weitere Informationen?", | |
| "Das ist eine gute Frage. Ich habe momentan keine Antwort darauf, aber vielleicht kannst du sie anders formulieren?", | |
| "Da bin ich überfragt. Kannst du die Frage anders stellen oder mir mehr Kontext geben?", | |
| "Ich stehe hier etwas auf dem Schlauch. Gibt es noch andere Aspekte der Frage, die wir betrachten könnten?", | |
| # Add more alternative responses as needed | |
| ] | |
| # Check if response matches any phrase in no_answer_phrases | |
| if any(phrase in response.lower() for phrase in no_answer_phrases): | |
| return random.choice(alternative_responses) # Randomly select a response | |
| return response | |
| def ask_bot(query): | |
| # Definiere den standardmäßigen Prompt | |
| standard_prompt = "Antworte immer in der Sprache in der der User schreibt. Formuliere immer ganze freundliche ganze Sätze und biete wenn möglich auch mehr Informationen (aber nicht mehr als 1 Satz mehr). Wenn der User sehr vage schreibt frage nach. Wenn du zu einer bestimmten Frage Daten aus mehreren Jahren hast, nenne das aktuellste und ein weiters. " | |
| # Kombiniere den standardmäßigen Prompt mit der Benutzeranfrage | |
| full_query = standard_prompt + query | |
| return full_query | |
| def save_conversation(chat_histories, session_id): | |
| base_path = "Chat_Store/conversation_logs" | |
| if not os.path.exists(base_path): | |
| os.makedirs(base_path) | |
| filename = f"{base_path}/{session_id}.json" | |
| # Check if the log file already exists | |
| existing_data = {"page1": [], "page2": [], "page3": []} | |
| if os.path.exists(filename): | |
| with open(filename, 'r', encoding='utf-8') as file: | |
| existing_data = json.load(file) | |
| # Append the new chat history to the existing data for each page | |
| for page_number, chat_history in enumerate(chat_histories, start=1): | |
| existing_data[f"page{page_number}"] += chat_history | |
| with open(filename, 'w', encoding='utf-8') as file: | |
| json.dump(existing_data, file, indent=4, ensure_ascii=False) | |
| # Git operations | |
| try: | |
| # Change directory to Chat_Store for Git operations | |
| original_dir = os.getcwd() | |
| os.chdir('Chat_Store') | |
| # Correct file path relative to the Git repository's root | |
| git_file_path = f"conversation_logs/{session_id}.json" | |
| repo2.git_add(git_file_path) | |
| repo2.git_commit(f"Add/update conversation log for session {session_id}") | |
| repo2.git_push() | |
| # Change back to the original directory | |
| os.chdir(original_dir) | |
| except Exception as e: | |
| st.error(f"Error during Git operations: {e}") | |
| def display_session_id(): | |
| session_id = st.session_state['session_id'] | |
| st.sidebar.markdown(f"**Ihre Session ID:** `{session_id}`") | |
| st.sidebar.markdown("Verwenden Sie diese ID als Referenz bei Mitteilungen oder Rückmeldungen.") | |
| def preprocess_and_store_pdf_text(pdf_path, collection, text_splitter): | |
| # Load and split the PDF text | |
| text = load_pdf_text(pdf_path) | |
| chunks = text_splitter.split_text(text=text) | |
| # Store each chunk as a separate document in CromA DB | |
| for i, chunk in enumerate(chunks): | |
| document_id = f"Chunk_{i+1}" | |
| collection.add(documents=[chunk], ids=[document_id]) | |
| def page1(): | |
| try: | |
| hide_streamlit_style = """ | |
| <style> | |
| #MainMenu {visibility: hidden;} | |
| footer {visibility: hidden;} | |
| </style> | |
| """ | |
| st.markdown(hide_streamlit_style, unsafe_allow_html=True) | |
| # Create columns for layout | |
| col1, col2 = st.columns([3, 1]) # Adjust the ratio to your liking | |
| with col1: | |
| st.title("Alles zur aktuellen Krankenhausreform!") | |
| with col2: | |
| # Load and display the image in the right column, which will be the top-right corner of the page | |
| image = Image.open('BinDoc Logo (Quadratisch).png') | |
| st.image(image, use_column_width='always') | |
| if not os.path.exists(pdf_path): | |
| st.error("File not found. Please check the file path.") | |
| return | |
| VectorStore = load_vector_store(pdf_path, "KH_Reform_2301", force_reload=False) | |
| display_chat_history(st.session_state['chat_history_page1']) | |
| st.write("<!-- Start Spacer -->", unsafe_allow_html=True) | |
| st.write("<div style='flex: 1;'></div>", unsafe_allow_html=True) | |
| st.write("<!-- End Spacer -->", unsafe_allow_html=True) | |
| new_messages_placeholder = st.empty() | |
| query = st.text_input("Geben Sie hier Ihre Frage ein / Enter your question here:") | |
| add_vertical_space(2) # Adjust as per the desired spacing | |
| # Create two columns for the buttons | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| if st.button("Wie viele Ärzte benötigt eine Klinik in der Leistungsgruppe Stammzell-transplantation?"): | |
| query = "Wie viele Ärzte benötigt eine Klinik in der Leistungsgruppe Stammzell-transplantation?" | |
| if st.button("Wie viele Leistungsgruppen soll es durch die neue KH Reform geben?"): | |
| query = ("Wie viele Leistungsgruppen soll es durch die neue KH Reform geben?") | |
| if st.button("Was sind die hauptsächlichen Änderungsvorhaben der Krankenhausreform?"): | |
| query = "Was sind die hauptsächlichen Änderungsvorhaben der Krankenhausreform?" | |
| with col2: | |
| if st.button("Welche technischen Gerätevorgaben und Personalvorgaben muss die LG Allgemeine Chirugie erfüllen?"): | |
| query = "Welche technischen Gerätevorgaben und Personalvorgaben muss die LG Allgemeine Chirugie erfüllen?" | |
| if st.button("Was soll die Reform der Notfallversorgung beinhalten?"): | |
| query = "Was soll die Reform der Notfallversorgung beinhalten?" | |
| if st.button("Was bedeutet die Vorhaltefinanzierung?"): | |
| query = "Was bedeutet die Vorhaltefinanzierung?" | |
| if query: | |
| full_query = ask_bot(query) | |
| st.session_state['chat_history_page1'].append(("User", query, "new")) | |
| # Start timing | |
| start_time = time.time() | |
| # Create a placeholder for the response time | |
| response_time_placeholder = st.empty() | |
| # Include the spinner around all processing and display operations | |
| with st.spinner('Eve denkt über Ihre Frage nach...'): | |
| chain = load_chatbot() | |
| docs = VectorStore.similarity_search(query=query, k=5) | |
| with get_openai_callback() as cb: | |
| response = chain.run(input_documents=docs, question=full_query) | |
| response = handle_no_answer(response) | |
| # Stop timing | |
| end_time = time.time() | |
| # Calculate duration | |
| duration = end_time - start_time | |
| st.session_state['chat_history_page1'].append(("Eve", response, "new")) | |
| # Combine chat histories from all pages | |
| all_chat_histories = [ | |
| st.session_state['chat_history_page1'], | |
| st.session_state['chat_history_page2'], | |
| st.session_state['chat_history_page3'] | |
| ] | |
| # Save the combined chat histories | |
| save_conversation(all_chat_histories, st.session_state['session_id']) | |
| # Display new messages at the bottom | |
| new_messages = st.session_state['chat_history_page1'][-2:] | |
| for chat in new_messages: | |
| background_color = "#ffeecf" if chat[2] == "new" else "#ffeecf" if chat[0] == "User" else "#ffeecf" | |
| new_messages_placeholder.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True) | |
| # Update the response time placeholder after the messages are displayed | |
| response_time_placeholder.text(f"Response time: {duration:.2f} seconds") | |
| # Clear the input field after the query is made | |
| query = "" | |
| # Mark all messages as old after displaying | |
| st.session_state['chat_history_page1'] = [(sender, msg, "old") for sender, msg, _ in st.session_state['chat_history_page1']] | |
| except Exception as e: | |
| st.error(f"Upsi, an unexpected error occurred: {e}") | |
| # Optionally log the exception details to a file or error tracking service | |
| def page2(): | |
| try: | |
| hide_streamlit_style = """ | |
| <style> | |
| #MainMenu {visibility: hidden;} | |
| footer {visibility: hidden;} | |
| </style> | |
| """ | |
| st.markdown(hide_streamlit_style, unsafe_allow_html=True) | |
| # Create columns for layout | |
| col1, col2 = st.columns([3, 1]) # Adjust the ratio to your liking | |
| with col1: | |
| st.title("Die wichtigsten 100 Kennzahlen und KPIs!") | |
| with col2: | |
| # Load and display the image in the right column, which will be the top-right corner of the page | |
| image = Image.open('BinDoc Logo (Quadratisch).png') | |
| st.image(image, use_column_width='always') | |
| if not os.path.exists(pdf_path2): | |
| st.error("File not found. Please check the file path.") | |
| return | |
| VectorStore = load_vector_store(pdf_path2, "Buch_2301", force_reload=False) | |
| display_chat_history(st.session_state['chat_history_page2']) | |
| st.write("<!-- Start Spacer -->", unsafe_allow_html=True) | |
| st.write("<div style='flex: 1;'></div>", unsafe_allow_html=True) | |
| st.write("<!-- End Spacer -->", unsafe_allow_html=True) | |
| new_messages_placeholder = st.empty() | |
| query = st.text_input("Geben Sie hier Ihre Frage ein / Enter your question here:") | |
| add_vertical_space(2) # Adjust as per the desired spacing | |
| # Create two columns for the buttons | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| if st.button("Erstelle mir eine Liste mit 3 wichtigen Personalkennzahlen im Krankenhaus."): | |
| query = "Erstelle mir eine Liste mit 3 wichtigen Personalkennzahlen im Krankenhaus." | |
| if st.button("Wie ist die durchschnittliche Bettenauslastung eines Krankenhauses im Jahr 2020?"): | |
| query = ("Wie ist die durchschnittliche Bettenauslastung eines Krankenhauses im Jahr 2020?") | |
| if st.button("Welches sind die Top 1-5 DRGs, die von den Krankenhäusern 2020 abgerechnet wurden?"): | |
| query = "Welches sind die Top 1-5 DRGs, die von den Krankenhäusern 2020 abgerechnet wurden? " | |
| with col2: | |
| if st.button("Wie viel Casemixpunkte werden im Median von einer ärztlichen VK ärztlicher Dienst 2020 erbracht?"): | |
| query = "Wie viel Casemixpunkte werden im Median von einer ärztlichen VK ärztlicher Dienst 2020 erbracht?" | |
| if st.button("Bitte erstelle mir einer Übersicht des BBFW, Planbetten und CM-relevanten Erlöse eines KH der Grund- und Regelversorgung."): | |
| query = "Bitte erstelle mir einer Übersicht des BBFW, Planbetten und CM-relevanten Erlöse eines KH der Grund- und Regelversorgung." | |
| if st.button("Wie viele Patienten eines Grund- und Regelversorgers kommen aus einem 10, 20, 30, 40 Minuten Radius?"): | |
| query = "Wie viele Patienten eines Grund- und Regelversorgers kommen aus einem 10, 20, 30, 40 Minuten Radius?" | |
| if query: | |
| full_query = ask_bot(query) | |
| st.session_state['chat_history_page2'].append(("User", query, "new")) | |
| # Start timing | |
| start_time = time.time() | |
| # Create a placeholder for the response time | |
| response_time_placeholder = st.empty() | |
| with st.spinner('Eve denkt über Ihre Frage nach...'): | |
| chain = load_chatbot() | |
| docs = VectorStore.similarity_search(query=query, k=5) | |
| with get_openai_callback() as cb: | |
| response = chain.run(input_documents=docs, question=full_query) | |
| response = handle_no_answer(response) # Process the response through the new function | |
| # Stop timing | |
| end_time = time.time() | |
| # Calculate duration | |
| duration = end_time - start_time | |
| st.session_state['chat_history_page2'].append(("Eve", response, "new")) | |
| # Combine chat histories from all pages | |
| all_chat_histories = [ | |
| st.session_state['chat_history_page1'], | |
| st.session_state['chat_history_page2'], | |
| st.session_state['chat_history_page3'] | |
| ] | |
| # Save the combined chat histories | |
| save_conversation(all_chat_histories, st.session_state['session_id']) | |
| # Display new messages at the bottom | |
| new_messages = st.session_state['chat_history_page2'][-2:] | |
| for chat in new_messages: | |
| background_color = "#ffeecf" if chat[2] == "new" else "#ffeecf" if chat[0] == "User" else "#ffeecf" | |
| new_messages_placeholder.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True) | |
| # Update the response time placeholder after the messages are displayed | |
| response_time_placeholder.text(f"Response time: {duration:.2f} seconds") | |
| # Clear the input field after the query is made | |
| query = "" | |
| # Mark all messages as old after displaying | |
| st.session_state['chat_history_page2'] = [(sender, msg, "old") for sender, msg, _ in st.session_state['chat_history_page2']] | |
| except Exception as e: | |
| st.error(f"Upsi, an unexpected error occurred: {e}") | |
| # Optionally log the exception details to a file or error tracking service | |
| def page3(): | |
| try: | |
| # Basic layout setup | |
| st.title("Kosten- und Strukturdaten der Krankenhäuser") | |
| # Initialize text splitter | |
| text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=200, length_function=len) | |
| # Initialize CromA client and handle collection | |
| chroma_client = chromadb.Client() | |
| try: | |
| collection = chroma_client.create_collection(name="Kosten_Strukturdaten0602204") | |
| except Exception as e: | |
| if 'already exists' in str(e): | |
| collection = chroma_client.get_collection(name="Kosten_Strukturdaten0602204") | |
| else: | |
| raise e | |
| # Add documents to the collection if not already done | |
| if "documents_added" not in st.session_state: | |
| preprocess_and_store_pdf_text(pdf_path3, collection, text_splitter) | |
| st.session_state["documents_added"] = True | |
| # Display chat history | |
| display_chat_history(st.session_state['chat_history_page3']) | |
| # User query input | |
| query = st.text_input("Geben Sie hier Ihre Frage ein / Enter your question here:") | |
| if query: | |
| full_query = ask_bot(query) | |
| st.session_state['chat_history_page3'].append(("User", query, "new")) | |
| # Query the CromA collection with error handling | |
| try: | |
| results = collection.query(query_texts=[full_query], n_results=5) | |
| response = process_croma_results(results) | |
| except Exception as query_exception: | |
| log_error(f"CromA DB query error: {query_exception}") # Logging function to be implemented | |
| response = "An error occurred while processing your query." | |
| st.session_state['chat_history_page3'].append(("Eve", response, "new")) | |
| # Display new messages at the bottom | |
| new_messages = st.session_state['chat_history_page3'][-2:] | |
| for chat in new_messages: | |
| background_color = "#ffeecf" | |
| st.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True) | |
| except Exception as e: | |
| log_error(f"General error in page3: {e}") # Log general errors | |
| st.error(f"An unexpected error occurred: {repr(e)}") | |
| def log_error(message): | |
| """ | |
| Logs an error message. Can be enhanced to write to a file or external logging service. | |
| """ | |
| # Example: Print to console, can be replaced with file logging or external service logging | |
| print(message) | |
| def process_croma_results(results): | |
| """ | |
| Process the query results from CromA DB and generate a response. | |
| """ | |
| if results and results['documents']: | |
| try: | |
| # Example processing: Extract and concatenate texts from top documents | |
| top_documents = results['documents'][0] # Adjusted access | |
| response_texts = [doc['text'] for doc in top_documents if 'text' in doc] | |
| response = " ".join(response_texts[:3]) # Limiting to top 3 documents for brevity | |
| except KeyError as ke: | |
| response = "Error in processing the response." | |
| else: | |
| response = "No results found for your query." | |
| return response | |
| # TODO: Implement additional error handling and logging | |
| # TODO: Review for security and performance improvements | |
| # This is a modified snippet focusing on the querying and response handling for CromA DB. | |
| # The full integration requires updating the main application code. | |
| def page4(): | |
| try: | |
| st.header(":mailbox: Kontakt & Feedback!") | |
| st.markdown("Ihre Session-ID finden Sie auf der linken Seite!") | |
| contact_form = """ | |
| <form action="https://formsubmit.co/anne.demond@googlemail.com" method="POST"> | |
| <input type="hidden" name="_captcha" value="false"> | |
| <input type="text" name="Session-ID" placeholder="Your Session-ID goes here" required> | |
| <input type="email" name="email" placeholder="Your email" required> | |
| <textarea name="message" placeholder="Your message here"></textarea> | |
| <form action="https://formsubmit.co/your-random-string" method="POST" /> | |
| <button type="submit">Send</button> | |
| </form> | |
| """ | |
| st.markdown(contact_form, unsafe_allow_html=True) | |
| # Use Local CSS File | |
| def local_css(file_name): | |
| with open(file_name) as f: | |
| st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True) | |
| local_css("style.css") | |
| except Exception as e: | |
| st.error(f"Upsi, an unexpected error occurred: {e}") | |
| # Optionally log the exception details to a file or error tracking service | |
| def display_session_id(): | |
| session_id = st.session_state['session_id'] | |
| st.sidebar.markdown(f"**Your Session ID:** `{session_id}`") | |
| st.sidebar.markdown("Verwenden Sie diese ID als Referenz bei Mitteilungen oder Rückmeldungen.") | |
| # Main function | |
| def main(): | |
| # Sidebar content | |
| with st.sidebar: | |
| st.title('BinDoc GmbH') | |
| st.markdown("Tauchen Sie ein in eine revolutionäre Erfahrung mit BinDocs Chat-App - angetrieben von fortschrittlichster KI-Technologie.") | |
| add_vertical_space(1) | |
| page = st.sidebar.selectbox("Wählen Sie eine Seite aus:", ["Krankenhausreform!", "Kennzahlen und KPIs!", "Kosten- und Strukturdaten", "Kontakt & Feedback!"]) | |
| add_vertical_space(4) | |
| display_session_id() # Display the session ID in the sidebar | |
| st.write('Made with ❤️ by BinDoc GmbH') | |
| # Main area content based on page selection | |
| if page == "Krankenhausreform!": | |
| page1() | |
| elif page == "Kennzahlen und KPIs!": | |
| page2() | |
| elif page == "Kosten- und Strukturdaten": | |
| page3() | |
| elif page == "Kontakt & Feedback!": | |
| page4() | |
| if __name__ == "__main__": | |
| main() |