Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -41,7 +41,7 @@ def encode_image(image_path):
|
|
| 41 |
|
| 42 |
def respond(
|
| 43 |
message,
|
| 44 |
-
image_files,
|
| 45 |
history: list[tuple[str, str]],
|
| 46 |
system_message,
|
| 47 |
max_tokens,
|
|
@@ -83,79 +83,79 @@ def respond(
|
|
| 83 |
if seed == -1:
|
| 84 |
seed = None
|
| 85 |
|
| 86 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
if image_files and len(image_files) > 0:
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
"url": f"data:image/jpeg;base64,{encoded_image}"
|
| 109 |
-
}
|
| 110 |
-
})
|
| 111 |
-
except Exception as e:
|
| 112 |
-
print(f"Error encoding image: {e}")
|
| 113 |
-
else:
|
| 114 |
-
# Text-only message
|
| 115 |
-
user_content = message
|
| 116 |
-
|
| 117 |
# Prepare messages in the format expected by the API
|
| 118 |
messages = [{"role": "system", "content": system_message}]
|
| 119 |
print("Initial messages array constructed.")
|
| 120 |
|
| 121 |
# Add conversation history to the context
|
| 122 |
for val in history:
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
"type": "text",
|
| 133 |
-
"text":
|
| 134 |
})
|
| 135 |
|
| 136 |
-
for
|
| 137 |
-
if
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
"url": f"data:image/jpeg;base64,{encoded_img}"
|
| 145 |
-
}
|
| 146 |
-
})
|
| 147 |
-
except Exception as e:
|
| 148 |
-
print(f"Error encoding history image: {e}")
|
| 149 |
|
| 150 |
-
|
|
|
|
| 151 |
else:
|
| 152 |
# Regular text message
|
| 153 |
-
messages.append({"role": "user", "content":
|
| 154 |
-
print(f"Added user message to context (type: {type(user_part)})")
|
| 155 |
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
|
| 160 |
# Append the latest user message
|
| 161 |
messages.append({"role": "user", "content": user_content})
|
|
@@ -409,36 +409,26 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 409 |
print("Empty message, skipping")
|
| 410 |
return history
|
| 411 |
|
| 412 |
-
#
|
| 413 |
text_content = user_message.get("text", "").strip()
|
| 414 |
-
|
| 415 |
|
| 416 |
print(f"Text content: {text_content}")
|
| 417 |
-
print(f"Files: {
|
| 418 |
|
| 419 |
-
#
|
| 420 |
-
if
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
# Process multimodal content
|
| 425 |
-
if files:
|
| 426 |
-
# For multimodal messages with files
|
| 427 |
-
for file_path in files:
|
| 428 |
-
print(f"Processing file: {file_path}")
|
| 429 |
-
if not file_path:
|
| 430 |
-
continue
|
| 431 |
-
|
| 432 |
-
# Add a combined message with text and file
|
| 433 |
-
history.append([(text_content, file_path), None])
|
| 434 |
-
# Reset text content for subsequent files if there are multiple
|
| 435 |
-
text_content = ""
|
| 436 |
|
| 437 |
-
|
|
|
|
| 438 |
else:
|
| 439 |
-
#
|
| 440 |
-
history.append([text_content, None])
|
| 441 |
-
|
|
|
|
| 442 |
|
| 443 |
# Define bot response function
|
| 444 |
def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
|
|
@@ -451,29 +441,19 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
| 451 |
user_message = history[-1][0]
|
| 452 |
print(f"Processing user message: {user_message}")
|
| 453 |
|
| 454 |
-
#
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
if isinstance(user_message, tuple):
|
| 459 |
-
# Tuple format: (text, image_path)
|
| 460 |
-
text_content = user_message[0] if user_message[0] else ""
|
| 461 |
-
# Handle both single image path and list of paths
|
| 462 |
-
if isinstance(user_message[1], list):
|
| 463 |
-
image_files = user_message[1]
|
| 464 |
-
else:
|
| 465 |
-
image_files = [user_message[1]]
|
| 466 |
-
print(f"Multimodal message detected - Text: {text_content}, Images: {image_files}")
|
| 467 |
else:
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
print(f"Text-only message detected: {text_content}")
|
| 471 |
|
| 472 |
# Process message through respond function
|
| 473 |
history[-1][1] = ""
|
| 474 |
for response in respond(
|
| 475 |
text_content,
|
| 476 |
-
image_files
|
| 477 |
history[:-1],
|
| 478 |
system_msg,
|
| 479 |
max_tokens,
|
|
|
|
| 41 |
|
| 42 |
def respond(
|
| 43 |
message,
|
| 44 |
+
image_files,
|
| 45 |
history: list[tuple[str, str]],
|
| 46 |
system_message,
|
| 47 |
max_tokens,
|
|
|
|
| 83 |
if seed == -1:
|
| 84 |
seed = None
|
| 85 |
|
| 86 |
+
# Prepare messages for the API
|
| 87 |
+
user_content = []
|
| 88 |
+
|
| 89 |
+
# Add text if there is any
|
| 90 |
+
if message and message.strip():
|
| 91 |
+
user_content.append({
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": message
|
| 94 |
+
})
|
| 95 |
+
|
| 96 |
+
# Add images if any
|
| 97 |
if image_files and len(image_files) > 0:
|
| 98 |
+
for file_path in image_files:
|
| 99 |
+
if not file_path:
|
| 100 |
+
continue
|
| 101 |
+
|
| 102 |
+
try:
|
| 103 |
+
print(f"Processing image file: {file_path}")
|
| 104 |
+
# For direct file paths, no need to encode as base64
|
| 105 |
+
user_content.append({
|
| 106 |
+
"type": "image_url",
|
| 107 |
+
"image_url": {
|
| 108 |
+
"url": f"file://{file_path}"
|
| 109 |
+
}
|
| 110 |
+
})
|
| 111 |
+
except Exception as e:
|
| 112 |
+
print(f"Error processing image file: {e}")
|
| 113 |
+
|
| 114 |
+
# If empty content, set to text only
|
| 115 |
+
if not user_content:
|
| 116 |
+
user_content = ""
|
| 117 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
# Prepare messages in the format expected by the API
|
| 119 |
messages = [{"role": "system", "content": system_message}]
|
| 120 |
print("Initial messages array constructed.")
|
| 121 |
|
| 122 |
# Add conversation history to the context
|
| 123 |
for val in history:
|
| 124 |
+
user_msg = val[0]
|
| 125 |
+
assistant_msg = val[1]
|
| 126 |
+
|
| 127 |
+
# Process user message
|
| 128 |
+
if user_msg:
|
| 129 |
+
if isinstance(user_msg, dict) and "text" in user_msg:
|
| 130 |
+
# This is a MultimodalTextbox message
|
| 131 |
+
hist_text = user_msg.get("text", "")
|
| 132 |
+
hist_files = user_msg.get("files", [])
|
| 133 |
+
|
| 134 |
+
hist_content = []
|
| 135 |
+
if hist_text:
|
| 136 |
+
hist_content.append({
|
| 137 |
"type": "text",
|
| 138 |
+
"text": hist_text
|
| 139 |
})
|
| 140 |
|
| 141 |
+
for hist_file in hist_files:
|
| 142 |
+
if hist_file:
|
| 143 |
+
hist_content.append({
|
| 144 |
+
"type": "image_url",
|
| 145 |
+
"image_url": {
|
| 146 |
+
"url": f"file://{hist_file}"
|
| 147 |
+
}
|
| 148 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
+
if hist_content:
|
| 151 |
+
messages.append({"role": "user", "content": hist_content})
|
| 152 |
else:
|
| 153 |
# Regular text message
|
| 154 |
+
messages.append({"role": "user", "content": user_msg})
|
|
|
|
| 155 |
|
| 156 |
+
# Process assistant message
|
| 157 |
+
if assistant_msg:
|
| 158 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
| 159 |
|
| 160 |
# Append the latest user message
|
| 161 |
messages.append({"role": "user", "content": user_content})
|
|
|
|
| 409 |
print("Empty message, skipping")
|
| 410 |
return history
|
| 411 |
|
| 412 |
+
# Extract data from the MultimodalTextbox
|
| 413 |
text_content = user_message.get("text", "").strip()
|
| 414 |
+
file_paths = user_message.get("files", [])
|
| 415 |
|
| 416 |
print(f"Text content: {text_content}")
|
| 417 |
+
print(f"Files: {file_paths}")
|
| 418 |
|
| 419 |
+
# Process the message
|
| 420 |
+
if file_paths and len(file_paths) > 0:
|
| 421 |
+
# We have files - create a multimodal message
|
| 422 |
+
file_path = file_paths[0] # For simplicity, use the first file
|
| 423 |
+
print(f"Using file: {file_path}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 424 |
|
| 425 |
+
# Add the message with both text and file as separate components
|
| 426 |
+
history.append([user_message, None]) # Keep the original format for processing
|
| 427 |
else:
|
| 428 |
+
# Text-only message
|
| 429 |
+
history.append([{"text": text_content, "files": []}, None])
|
| 430 |
+
|
| 431 |
+
return history
|
| 432 |
|
| 433 |
# Define bot response function
|
| 434 |
def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
|
|
|
|
| 441 |
user_message = history[-1][0]
|
| 442 |
print(f"Processing user message: {user_message}")
|
| 443 |
|
| 444 |
+
# Get text and files from the message
|
| 445 |
+
if isinstance(user_message, dict) and "text" in user_message:
|
| 446 |
+
text_content = user_message.get("text", "")
|
| 447 |
+
image_files = user_message.get("files", [])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 448 |
else:
|
| 449 |
+
text_content = ""
|
| 450 |
+
image_files = []
|
|
|
|
| 451 |
|
| 452 |
# Process message through respond function
|
| 453 |
history[-1][1] = ""
|
| 454 |
for response in respond(
|
| 455 |
text_content,
|
| 456 |
+
image_files,
|
| 457 |
history[:-1],
|
| 458 |
system_msg,
|
| 459 |
max_tokens,
|