Spaces:
Running
Running
fix api
Browse files
app.py
CHANGED
|
@@ -272,17 +272,18 @@ class PicletGeneratorService:
|
|
| 272 |
|
| 273 |
print(f"Generating caption for image...")
|
| 274 |
result = await client.predict(
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
)
|
| 283 |
|
| 284 |
-
# JoyCaption returns tuple: (
|
| 285 |
-
|
|
|
|
| 286 |
print(f"Caption generated: {caption[:100]}...")
|
| 287 |
return caption
|
| 288 |
|
|
@@ -302,15 +303,16 @@ class PicletGeneratorService:
|
|
| 302 |
|
| 303 |
print(f"Generating text...")
|
| 304 |
result = await client.predict(
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
)
|
| 311 |
|
| 312 |
# Extract response text (GPT-OSS formats with Analysis and Response)
|
| 313 |
-
|
|
|
|
| 314 |
|
| 315 |
# Try to extract Response section
|
| 316 |
response_match = re.search(r'\*\*💬 Response:\*\*\s*\n\n([\s\S]*)', response_text)
|
|
@@ -489,18 +491,19 @@ CRITICAL RULES:
|
|
| 489 |
|
| 490 |
print(f"Generating image with prompt: {full_prompt[:100]}...")
|
| 491 |
result = await client.predict(
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
)
|
| 500 |
|
| 501 |
# Extract image URL and seed
|
| 502 |
-
|
| 503 |
-
|
|
|
|
| 504 |
|
| 505 |
# Handle different return formats
|
| 506 |
image_url = None
|
|
|
|
| 272 |
|
| 273 |
print(f"Generating caption for image...")
|
| 274 |
result = await client.predict(
|
| 275 |
+
"/stream_chat",
|
| 276 |
+
image_data, # input_image
|
| 277 |
+
"Descriptive", # caption_type
|
| 278 |
+
"medium-length", # caption_length
|
| 279 |
+
[], # extra_options
|
| 280 |
+
"", # name_input
|
| 281 |
+
"Describe this image in detail, identifying any recognizable objects, brands, logos, or specific models. Be specific about product names and types." # custom_prompt
|
| 282 |
)
|
| 283 |
|
| 284 |
+
# JoyCaption returns tuple: (prompt_used, caption_text) in .data
|
| 285 |
+
result_data = result.data if hasattr(result, 'data') else result
|
| 286 |
+
caption = result_data[1] if isinstance(result_data, (list, tuple)) and len(result_data) > 1 else str(result_data)
|
| 287 |
print(f"Caption generated: {caption[:100]}...")
|
| 288 |
return caption
|
| 289 |
|
|
|
|
| 303 |
|
| 304 |
print(f"Generating text...")
|
| 305 |
result = await client.predict(
|
| 306 |
+
"/chat",
|
| 307 |
+
prompt, # message
|
| 308 |
+
[], # history
|
| 309 |
+
"You are a helpful assistant that creates Pokemon-style monster concepts based on real-world objects.", # system_prompt
|
| 310 |
+
0.7 # temperature
|
| 311 |
)
|
| 312 |
|
| 313 |
# Extract response text (GPT-OSS formats with Analysis and Response)
|
| 314 |
+
result_data = result.data if hasattr(result, 'data') else result
|
| 315 |
+
response_text = result_data[0] if isinstance(result_data, (list, tuple)) else str(result_data)
|
| 316 |
|
| 317 |
# Try to extract Response section
|
| 318 |
response_match = re.search(r'\*\*💬 Response:\*\*\s*\n\n([\s\S]*)', response_text)
|
|
|
|
| 491 |
|
| 492 |
print(f"Generating image with prompt: {full_prompt[:100]}...")
|
| 493 |
result = await client.predict(
|
| 494 |
+
"/infer",
|
| 495 |
+
full_prompt, # prompt
|
| 496 |
+
0, # seed
|
| 497 |
+
True, # randomize_seed
|
| 498 |
+
1024, # width
|
| 499 |
+
1024, # height
|
| 500 |
+
4 # num_inference_steps
|
| 501 |
)
|
| 502 |
|
| 503 |
# Extract image URL and seed
|
| 504 |
+
result_data = result.data if hasattr(result, 'data') else result
|
| 505 |
+
image_data = result_data[0] if isinstance(result_data, (list, tuple)) else result_data
|
| 506 |
+
seed = result_data[1] if isinstance(result_data, (list, tuple)) and len(result_data) > 1 else 0
|
| 507 |
|
| 508 |
# Handle different return formats
|
| 509 |
image_url = None
|