Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -100,42 +100,6 @@ with st.sidebar:
|
|
| 100 |
# Main chat area
|
| 101 |
st.title("Personalized Real-Time Chat")
|
| 102 |
|
| 103 |
-
# Display chat messages
|
| 104 |
-
chat_container = st.container()
|
| 105 |
-
|
| 106 |
-
# Input for new message
|
| 107 |
-
new_message = st.text_input("Type your message:")
|
| 108 |
-
if st.button("Send"):
|
| 109 |
-
if new_message:
|
| 110 |
-
timestamp = datetime.now(pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')
|
| 111 |
-
st.session_state.messages.append({
|
| 112 |
-
'user': st.session_state.current_user['name'],
|
| 113 |
-
'message': new_message,
|
| 114 |
-
'timestamp': timestamp
|
| 115 |
-
})
|
| 116 |
-
save_data()
|
| 117 |
-
st.experimental_rerun()
|
| 118 |
-
|
| 119 |
-
# Function to display chat messages
|
| 120 |
-
def display_messages():
|
| 121 |
-
for msg in st.session_state.messages:
|
| 122 |
-
with chat_container.container():
|
| 123 |
-
st.write(f"**{msg['user']}** ({msg['timestamp']}): {msg['message']}")
|
| 124 |
-
|
| 125 |
-
# Display messages
|
| 126 |
-
display_messages()
|
| 127 |
-
|
| 128 |
-
# Polling for updates
|
| 129 |
-
if st.button("Refresh Chat"):
|
| 130 |
-
load_data()
|
| 131 |
-
st.experimental_rerun()
|
| 132 |
-
|
| 133 |
-
# Auto-refresh (note: this will refresh the entire app)
|
| 134 |
-
time.sleep(5)
|
| 135 |
-
st.experimental_rerun()
|
| 136 |
-
|
| 137 |
-
# Additional functionalities for text, image, audio, and video processing
|
| 138 |
-
|
| 139 |
# Function to generate filenames
|
| 140 |
def generate_filename(prompt, file_type):
|
| 141 |
central = pytz.timezone('US/Central')
|
|
@@ -144,29 +108,36 @@ def generate_filename(prompt, file_type):
|
|
| 144 |
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
| 145 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
# Function to process text
|
| 148 |
-
def process_text(text_input):
|
| 149 |
if text_input:
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
|
|
|
| 154 |
completion = client.chat.completions.create(
|
| 155 |
model=MODEL,
|
| 156 |
messages=[
|
| 157 |
-
{"role":
|
| 158 |
for m in st.session_state.messages
|
| 159 |
],
|
| 160 |
stream=False
|
| 161 |
)
|
| 162 |
return_text = completion.choices[0].message.content
|
| 163 |
-
st.
|
| 164 |
filename = generate_filename(text_input, "md")
|
| 165 |
-
create_file(filename, text_input, return_text,
|
| 166 |
-
st.session_state.messages.append({"
|
|
|
|
| 167 |
|
| 168 |
# Function to process image
|
| 169 |
-
def process_image(image_input, user_prompt):
|
| 170 |
if isinstance(image_input, str):
|
| 171 |
with open(image_input, "rb") as image_file:
|
| 172 |
image_input = image_file.read()
|
|
@@ -184,28 +155,33 @@ def process_image(image_input, user_prompt):
|
|
| 184 |
)
|
| 185 |
image_response = response.choices[0].message.content
|
| 186 |
st.markdown(image_response)
|
|
|
|
| 187 |
filename_md = generate_filename(user_prompt, "md")
|
| 188 |
-
create_file(filename_md, image_response,
|
|
|
|
|
|
|
| 189 |
return image_response
|
| 190 |
|
| 191 |
# Function to process audio
|
| 192 |
-
def process_audio(audio_input, text_input):
|
| 193 |
if audio_input:
|
| 194 |
transcription = client.audio.transcriptions.create(
|
| 195 |
model="whisper-1",
|
| 196 |
file=audio_input,
|
| 197 |
)
|
| 198 |
-
|
| 199 |
-
|
|
|
|
|
|
|
|
|
|
| 200 |
st.markdown(transcription.text)
|
| 201 |
-
SpeechSynthesis(transcription.text)
|
| 202 |
filename = generate_filename(transcription.text, "wav")
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
|
| 207 |
# Function to process video
|
| 208 |
-
def process_video(video_input, user_prompt):
|
| 209 |
if isinstance(video_input, str):
|
| 210 |
with open(video_input, "rb") as video_file:
|
| 211 |
video_input = video_file.read()
|
|
@@ -226,8 +202,11 @@ def process_video(video_input, user_prompt):
|
|
| 226 |
)
|
| 227 |
video_response = response.choices[0].message.content
|
| 228 |
st.markdown(video_response)
|
|
|
|
| 229 |
filename_md = generate_filename(user_prompt, "md")
|
| 230 |
-
create_file(filename_md, video_response,
|
|
|
|
|
|
|
| 231 |
return video_response
|
| 232 |
|
| 233 |
# Function to extract video frames
|
|
@@ -260,11 +239,6 @@ def process_audio_for_video(video_input):
|
|
| 260 |
except:
|
| 261 |
return ''
|
| 262 |
|
| 263 |
-
# Function to create files
|
| 264 |
-
def create_file(filename, prompt, response, is_image=False):
|
| 265 |
-
with open(filename, "w", encoding="utf-8") as f:
|
| 266 |
-
f.write(prompt + "\n\n" + response)
|
| 267 |
-
|
| 268 |
# Initialize OpenAI client
|
| 269 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
| 270 |
openai.organization = os.getenv('OPENAI_ORG_ID')
|
|
@@ -272,6 +246,15 @@ client = OpenAI(api_key=openai.api_key, organization=openai.organization)
|
|
| 272 |
MODEL = "gpt-4o-2024-05-13"
|
| 273 |
should_save = st.sidebar.checkbox("💾 Save", value=True, help="Save your session data.")
|
| 274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
# Main function
|
| 276 |
def main():
|
| 277 |
st.markdown("##### GPT-4o Omni Model: Text, Audio, Image, & Video")
|
|
@@ -279,21 +262,36 @@ def main():
|
|
| 279 |
if option == "Text":
|
| 280 |
text_input = st.text_input("Enter your text:")
|
| 281 |
if text_input:
|
| 282 |
-
process_text(text_input)
|
| 283 |
elif option == "Image":
|
| 284 |
text_input = st.text_input("Enter text prompt to use with Image context:")
|
| 285 |
image_input = st.file_uploader("Upload an image", type=["png"])
|
| 286 |
if image_input:
|
| 287 |
-
process_image(image_input, text_input)
|
| 288 |
elif option == "Audio":
|
| 289 |
text_input = st.text_input("Enter text prompt to use with Audio context:")
|
| 290 |
uploaded_files = st.file_uploader("Upload an audio file", type=["mp3", "wav"], accept_multiple_files=True)
|
| 291 |
for audio_input in uploaded_files:
|
| 292 |
-
process_audio(audio_input, text_input)
|
| 293 |
elif option == "Video":
|
| 294 |
video_input = st.file_uploader("Upload a video file", type=["mp4"])
|
| 295 |
text_input = st.text_input("Enter text prompt to use with Video context:")
|
| 296 |
if video_input and text_input:
|
| 297 |
-
process_video(video_input, text_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 298 |
|
| 299 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
# Main chat area
|
| 101 |
st.title("Personalized Real-Time Chat")
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
# Function to generate filenames
|
| 104 |
def generate_filename(prompt, file_type):
|
| 105 |
central = pytz.timezone('US/Central')
|
|
|
|
| 108 |
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
| 109 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
| 110 |
|
| 111 |
+
# Function to create files
|
| 112 |
+
def create_file(filename, prompt, response, user_name, timestamp, is_image=False):
|
| 113 |
+
with open(filename, "w", encoding="utf-8") as f:
|
| 114 |
+
f.write(f"User: {user_name}\nTimestamp: {timestamp}\n\nPrompt:\n{prompt}\n\nResponse:\n{response}")
|
| 115 |
+
|
| 116 |
# Function to process text
|
| 117 |
+
def process_text(user_name, text_input):
|
| 118 |
if text_input:
|
| 119 |
+
timestamp = datetime.now(pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')
|
| 120 |
+
st.session_state.messages.append({"user": user_name, "message": text_input, "timestamp": timestamp})
|
| 121 |
+
with st.chat_message(user_name):
|
| 122 |
+
st.markdown(f"{user_name} ({timestamp}): {text_input}")
|
| 123 |
+
with st.chat_message("Assistant"):
|
| 124 |
completion = client.chat.completions.create(
|
| 125 |
model=MODEL,
|
| 126 |
messages=[
|
| 127 |
+
{"role": "user", "content": m["message"]}
|
| 128 |
for m in st.session_state.messages
|
| 129 |
],
|
| 130 |
stream=False
|
| 131 |
)
|
| 132 |
return_text = completion.choices[0].message.content
|
| 133 |
+
st.markdown(f"Assistant ({timestamp}): {return_text}")
|
| 134 |
filename = generate_filename(text_input, "md")
|
| 135 |
+
create_file(filename, text_input, return_text, user_name, timestamp)
|
| 136 |
+
st.session_state.messages.append({"user": "Assistant", "message": return_text, "timestamp": timestamp})
|
| 137 |
+
save_data()
|
| 138 |
|
| 139 |
# Function to process image
|
| 140 |
+
def process_image(user_name, image_input, user_prompt):
|
| 141 |
if isinstance(image_input, str):
|
| 142 |
with open(image_input, "rb") as image_file:
|
| 143 |
image_input = image_file.read()
|
|
|
|
| 155 |
)
|
| 156 |
image_response = response.choices[0].message.content
|
| 157 |
st.markdown(image_response)
|
| 158 |
+
timestamp = datetime.now(pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')
|
| 159 |
filename_md = generate_filename(user_prompt, "md")
|
| 160 |
+
create_file(filename_md, user_prompt, image_response, user_name, timestamp)
|
| 161 |
+
st.session_state.messages.append({"user": user_name, "message": image_response, "timestamp": timestamp})
|
| 162 |
+
save_data()
|
| 163 |
return image_response
|
| 164 |
|
| 165 |
# Function to process audio
|
| 166 |
+
def process_audio(user_name, audio_input, text_input):
|
| 167 |
if audio_input:
|
| 168 |
transcription = client.audio.transcriptions.create(
|
| 169 |
model="whisper-1",
|
| 170 |
file=audio_input,
|
| 171 |
)
|
| 172 |
+
timestamp = datetime.now(pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')
|
| 173 |
+
st.session_state.messages.append({"user": user_name, "message": transcription.text, "timestamp": timestamp})
|
| 174 |
+
with st.chat_message(user_name):
|
| 175 |
+
st.markdown(f"{user_name} ({timestamp}): {transcription.text}")
|
| 176 |
+
with st.chat_message("Assistant"):
|
| 177 |
st.markdown(transcription.text)
|
|
|
|
| 178 |
filename = generate_filename(transcription.text, "wav")
|
| 179 |
+
create_file(filename, text_input, transcription.text, user_name, timestamp)
|
| 180 |
+
st.session_state.messages.append({"user": "Assistant", "message": transcription.text, "timestamp": timestamp})
|
| 181 |
+
save_data()
|
| 182 |
|
| 183 |
# Function to process video
|
| 184 |
+
def process_video(user_name, video_input, user_prompt):
|
| 185 |
if isinstance(video_input, str):
|
| 186 |
with open(video_input, "rb") as video_file:
|
| 187 |
video_input = video_file.read()
|
|
|
|
| 202 |
)
|
| 203 |
video_response = response.choices[0].message.content
|
| 204 |
st.markdown(video_response)
|
| 205 |
+
timestamp = datetime.now(pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')
|
| 206 |
filename_md = generate_filename(user_prompt, "md")
|
| 207 |
+
create_file(filename_md, user_prompt, video_response, user_name, timestamp)
|
| 208 |
+
st.session_state.messages.append({"user": user_name, "message": video_response, "timestamp": timestamp})
|
| 209 |
+
save_data()
|
| 210 |
return video_response
|
| 211 |
|
| 212 |
# Function to extract video frames
|
|
|
|
| 239 |
except:
|
| 240 |
return ''
|
| 241 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
# Initialize OpenAI client
|
| 243 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
| 244 |
openai.organization = os.getenv('OPENAI_ORG_ID')
|
|
|
|
| 246 |
MODEL = "gpt-4o-2024-05-13"
|
| 247 |
should_save = st.sidebar.checkbox("💾 Save", value=True, help="Save your session data.")
|
| 248 |
|
| 249 |
+
# Function to display chat messages
|
| 250 |
+
def display_messages():
|
| 251 |
+
for msg in st.session_state.messages:
|
| 252 |
+
with st.chat_message(msg['user']):
|
| 253 |
+
st.markdown(f"**{msg['user']}** ({msg['timestamp']}): {msg['message']}")
|
| 254 |
+
|
| 255 |
+
# Display messages
|
| 256 |
+
display_messages()
|
| 257 |
+
|
| 258 |
# Main function
|
| 259 |
def main():
|
| 260 |
st.markdown("##### GPT-4o Omni Model: Text, Audio, Image, & Video")
|
|
|
|
| 262 |
if option == "Text":
|
| 263 |
text_input = st.text_input("Enter your text:")
|
| 264 |
if text_input:
|
| 265 |
+
process_text(st.session_state.current_user['name'], text_input)
|
| 266 |
elif option == "Image":
|
| 267 |
text_input = st.text_input("Enter text prompt to use with Image context:")
|
| 268 |
image_input = st.file_uploader("Upload an image", type=["png"])
|
| 269 |
if image_input:
|
| 270 |
+
process_image(st.session_state.current_user['name'], image_input, text_input)
|
| 271 |
elif option == "Audio":
|
| 272 |
text_input = st.text_input("Enter text prompt to use with Audio context:")
|
| 273 |
uploaded_files = st.file_uploader("Upload an audio file", type=["mp3", "wav"], accept_multiple_files=True)
|
| 274 |
for audio_input in uploaded_files:
|
| 275 |
+
process_audio(st.session_state.current_user['name'], audio_input, text_input)
|
| 276 |
elif option == "Video":
|
| 277 |
video_input = st.file_uploader("Upload a video file", type=["mp4"])
|
| 278 |
text_input = st.text_input("Enter text prompt to use with Video context:")
|
| 279 |
if video_input and text_input:
|
| 280 |
+
process_video(st.session_state.current_user['name'], video_input, text_input)
|
| 281 |
+
|
| 282 |
+
# Add buttons for quick access to different UI options
|
| 283 |
+
with st.sidebar:
|
| 284 |
+
if st.button("📝 Add Text"):
|
| 285 |
+
st.session_state.ui_option = "Text"
|
| 286 |
+
if st.button("🖼️ Add Image"):
|
| 287 |
+
st.session_state.ui_option = "Image"
|
| 288 |
+
if st.button("🎵 Add Audio"):
|
| 289 |
+
st.session_state.ui_option = "Audio"
|
| 290 |
+
if st.button("🎥 Add Video"):
|
| 291 |
+
st.session_state.ui_option = "Video"
|
| 292 |
|
| 293 |
+
if 'ui_option' in st.session_state:
|
| 294 |
+
main()
|
| 295 |
+
else:
|
| 296 |
+
st.session_state.ui_option = "Text"
|
| 297 |
+
main()
|