Speedofmastery commited on
Commit
80ff6ee
·
1 Parent(s): 38e31eb

Auto-commit: app.py updated

Browse files
Files changed (1) hide show
  1. app.py +269 -172
app.py CHANGED
@@ -17,23 +17,23 @@ import logging
17
  # Configure logging for Linux environment
18
  try:
19
  # Try to create logs directory if it doesn't exist
20
- log_dir = Path('/home/user/app/logs')
21
  log_dir.mkdir(parents=True, exist_ok=True)
22
 
23
  logging.basicConfig(
24
  level=logging.INFO,
25
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
26
  handlers=[
27
  logging.StreamHandler(sys.stdout),
28
- logging.FileHandler('/home/user/app/logs/openmanus.log', mode='a')
29
- ]
30
  )
31
  except Exception:
32
  # Fallback to console-only logging
33
  logging.basicConfig(
34
  level=logging.INFO,
35
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
36
- handlers=[logging.StreamHandler(sys.stdout)]
37
  )
38
 
39
  logger = logging.getLogger(__name__)
@@ -46,98 +46,183 @@ CLOUDFLARE_CONFIG = {
46
  "d1_database_id": os.getenv("CLOUDFLARE_D1_DATABASE_ID", ""),
47
  "r2_bucket_name": os.getenv("CLOUDFLARE_R2_BUCKET_NAME", ""),
48
  "kv_namespace_id": os.getenv("CLOUDFLARE_KV_NAMESPACE_ID", ""),
49
- "durable_objects_id": os.getenv("CLOUDFLARE_DURABLE_OBJECTS_ID", "")
50
  }
51
 
52
  # AI Model Categories with 200+ models
53
  AI_MODELS = {
54
  "Text Generation": {
55
  "Qwen Models": [
56
- "Qwen/Qwen2.5-72B-Instruct", "Qwen/Qwen2.5-32B-Instruct", "Qwen/Qwen2.5-14B-Instruct",
57
- "Qwen/Qwen2.5-7B-Instruct", "Qwen/Qwen2.5-3B-Instruct", "Qwen/Qwen2.5-1.5B-Instruct",
58
- "Qwen/Qwen2.5-0.5B-Instruct", "Qwen/Qwen2-72B-Instruct", "Qwen/Qwen2-57B-A14B-Instruct",
59
- "Qwen/Qwen2-7B-Instruct", "Qwen/Qwen2-1.5B-Instruct", "Qwen/Qwen2-0.5B-Instruct",
60
- "Qwen/Qwen1.5-110B-Chat", "Qwen/Qwen1.5-72B-Chat", "Qwen/Qwen1.5-32B-Chat",
61
- "Qwen/Qwen1.5-14B-Chat", "Qwen/Qwen1.5-7B-Chat", "Qwen/Qwen1.5-4B-Chat",
62
- "Qwen/Qwen1.5-1.8B-Chat", "Qwen/Qwen1.5-0.5B-Chat", "Qwen/CodeQwen1.5-7B-Chat",
63
- "Qwen/Qwen2.5-Math-72B-Instruct", "Qwen/Qwen2.5-Math-7B-Instruct", "Qwen/Qwen2.5-Coder-32B-Instruct",
64
- "Qwen/Qwen2.5-Coder-14B-Instruct", "Qwen/Qwen2.5-Coder-7B-Instruct", "Qwen/Qwen2.5-Coder-3B-Instruct",
65
- "Qwen/Qwen2.5-Coder-1.5B-Instruct", "Qwen/Qwen2.5-Coder-0.5B-Instruct", "Qwen/QwQ-32B-Preview",
66
- "Qwen/Qwen2-VL-72B-Instruct", "Qwen/Qwen2-VL-7B-Instruct", "Qwen/Qwen2-VL-2B-Instruct",
67
- "Qwen/Qwen2-Audio-7B-Instruct", "Qwen/Qwen-Agent-Chat", "Qwen/Qwen-VL-Chat"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  ],
69
  "DeepSeek Models": [
70
- "deepseek-ai/deepseek-llm-67b-chat", "deepseek-ai/deepseek-llm-7b-chat",
71
- "deepseek-ai/deepseek-coder-33b-instruct", "deepseek-ai/deepseek-coder-7b-instruct",
72
- "deepseek-ai/deepseek-coder-6.7b-instruct", "deepseek-ai/deepseek-coder-1.3b-instruct",
73
- "deepseek-ai/DeepSeek-V2-Chat", "deepseek-ai/DeepSeek-V2-Lite-Chat",
74
- "deepseek-ai/deepseek-math-7b-instruct", "deepseek-ai/deepseek-moe-16b-chat",
75
- "deepseek-ai/deepseek-vl-7b-chat", "deepseek-ai/deepseek-vl-1.3b-chat",
76
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
77
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
78
- "deepseek-ai/DeepSeek-Reasoner-R1"
79
- ]
 
 
 
 
 
 
 
 
80
  },
81
  "Image Processing": {
82
  "Image Generation": [
83
- "black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-pro",
84
- "runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-xl-base-1.0",
85
- "stabilityai/stable-diffusion-3-medium-diffusers", "stabilityai/sd-turbo",
86
- "kandinsky-community/kandinsky-2-2-decoder", "playgroundai/playground-v2.5-1024px-aesthetic",
87
- "midjourney/midjourney-v6"
 
 
 
 
 
88
  ],
89
  "Image Editing": [
90
- "timbrooks/instruct-pix2pix", "runwayml/stable-diffusion-inpainting",
91
- "stabilityai/stable-diffusion-xl-refiner-1.0", "lllyasviel/control_v11p_sd15_inpaint",
92
- "SG161222/RealVisXL_V4.0", "ByteDance/SDXL-Lightning", "segmind/SSD-1B",
93
- "segmind/Segmind-Vega", "playgroundai/playground-v2-1024px-aesthetic",
94
- "stabilityai/stable-cascade"
 
 
 
 
 
95
  ],
96
  "Face Processing": [
97
- "InsightFace/inswapper_128.onnx", "deepinsight/insightface", "TencentARC/GFPGAN",
98
- "sczhou/CodeFormer", "xinntao/Real-ESRGAN", "ESRGAN/ESRGAN"
99
- ]
 
 
 
 
100
  },
101
  "Audio Processing": {
102
  "Text-to-Speech": [
103
- "microsoft/speecht5_tts", "facebook/mms-tts-eng", "facebook/mms-tts-ara",
104
- "coqui/XTTS-v2", "suno/bark", "parler-tts/parler-tts-large-v1",
105
- "microsoft/DisTTS", "facebook/fastspeech2-en-ljspeech", "espnet/kan-bayashi_ljspeech_vits",
106
- "facebook/tts_transformer-en-ljspeech", "microsoft/SpeechT5", "Voicemod/fastspeech2-en-male1",
107
- "facebook/mms-tts-spa", "facebook/mms-tts-fra", "facebook/mms-tts-deu"
 
 
 
 
 
 
 
 
 
 
108
  ],
109
  "Speech-to-Text": [
110
- "openai/whisper-large-v3", "openai/whisper-large-v2", "openai/whisper-medium",
111
- "openai/whisper-small", "openai/whisper-base", "openai/whisper-tiny",
112
- "facebook/wav2vec2-large-960h", "facebook/wav2vec2-base-960h",
113
- "microsoft/unispeech-sat-large", "nvidia/stt_en_conformer_ctc_large",
114
- "speechbrain/asr-wav2vec2-commonvoice-en", "facebook/mms-1b-all", "facebook/seamless-m4t-v2-large",
115
- "distil-whisper/distil-large-v3", "distil-whisper/distil-medium.en"
116
- ]
 
 
 
 
 
 
 
 
 
117
  },
118
  "Multimodal AI": {
119
  "Vision-Language": [
120
- "microsoft/DialoGPT-large", "microsoft/blip-image-captioning-large",
121
- "microsoft/blip2-opt-6.7b", "microsoft/blip2-flan-t5-xl",
122
- "salesforce/blip-vqa-capfilt-large", "dandelin/vilt-b32-finetuned-vqa",
123
- "google/pix2struct-ai2d-base", "microsoft/git-large-coco", "microsoft/git-base-vqa",
124
- "liuhaotian/llava-v1.6-34b", "liuhaotian/llava-v1.6-vicuna-7b"
 
 
 
 
 
 
125
  ],
126
  "Talking Avatars": [
127
- "microsoft/SpeechT5-TTS-Avatar", "Wav2Lip-HD", "First-Order-Model",
128
- "LipSync-Expert", "DeepFaceLive", "FaceSwapper-Live", "RealTime-FaceRig",
129
- "AI-Avatar-Generator", "TalkingHead-3D"
130
- ]
 
 
 
 
 
 
131
  },
132
  "Arabic-English Models": [
133
- "aubmindlab/bert-base-arabertv2", "aubmindlab/aragpt2-base", "aubmindlab/aragpt2-medium",
134
- "CAMeL-Lab/bert-base-arabic-camelbert-mix", "asafaya/bert-base-arabic",
135
- "UBC-NLP/MARBERT", "UBC-NLP/ARBERTv2", "facebook/nllb-200-3.3B",
136
- "facebook/m2m100_1.2B", "Helsinki-NLP/opus-mt-ar-en", "Helsinki-NLP/opus-mt-en-ar",
137
- "microsoft/DialoGPT-medium-arabic"
138
- ]
 
 
 
 
 
 
 
139
  }
140
 
 
141
  def init_database():
142
  """Initialize SQLite database for authentication - Linux optimized"""
143
  try:
@@ -152,7 +237,8 @@ def init_database():
152
  cursor = conn.cursor()
153
 
154
  # Create users table
155
- cursor.execute("""
 
156
  CREATE TABLE IF NOT EXISTS users (
157
  id INTEGER PRIMARY KEY AUTOINCREMENT,
158
  mobile_number TEXT UNIQUE NOT NULL,
@@ -162,10 +248,12 @@ def init_database():
162
  last_login TIMESTAMP,
163
  is_active BOOLEAN DEFAULT 1
164
  )
165
- """)
 
166
 
167
  # Create sessions table
168
- cursor.execute("""
 
169
  CREATE TABLE IF NOT EXISTS sessions (
170
  id TEXT PRIMARY KEY,
171
  user_id INTEGER NOT NULL,
@@ -175,10 +263,12 @@ def init_database():
175
  user_agent TEXT,
176
  FOREIGN KEY (user_id) REFERENCES users (id)
177
  )
178
- """)
 
179
 
180
  # Create model usage table
181
- cursor.execute("""
 
182
  CREATE TABLE IF NOT EXISTS model_usage (
183
  id INTEGER PRIMARY KEY AUTOINCREMENT,
184
  user_id INTEGER,
@@ -190,7 +280,8 @@ def init_database():
190
  processing_time REAL,
191
  FOREIGN KEY (user_id) REFERENCES users (id)
192
  )
193
- """)
 
194
 
195
  conn.commit()
196
  conn.close()
@@ -201,10 +292,12 @@ def init_database():
201
  logger.error(f"Database initialization failed: {e}")
202
  return False
203
 
 
204
  def hash_password(password):
205
  """Hash password using SHA-256"""
206
  return hashlib.sha256(password.encode()).hexdigest()
207
 
 
208
  def signup_user(mobile, name, password, confirm_password):
209
  """User registration with mobile number"""
210
  if not all([mobile, name, password, confirm_password]):
@@ -232,10 +325,13 @@ def signup_user(mobile, name, password, confirm_password):
232
 
233
  # Create new user
234
  password_hash = hash_password(password)
235
- cursor.execute("""
 
236
  INSERT INTO users (mobile_number, full_name, password_hash)
237
  VALUES (?, ?, ?)
238
- """, (mobile, name, password_hash))
 
 
239
 
240
  conn.commit()
241
  conn.close()
@@ -245,6 +341,7 @@ def signup_user(mobile, name, password, confirm_password):
245
  except Exception as e:
246
  return f"❌ Registration failed: {str(e)}"
247
 
 
248
  def login_user(mobile, password):
249
  """User authentication"""
250
  if not mobile or not password:
@@ -256,17 +353,23 @@ def login_user(mobile, password):
256
 
257
  # Verify credentials
258
  password_hash = hash_password(password)
259
- cursor.execute("""
 
260
  SELECT id, full_name FROM users
261
  WHERE mobile_number = ? AND password_hash = ? AND is_active = 1
262
- """, (mobile, password_hash))
 
 
263
 
264
  user = cursor.fetchone()
265
  if user:
266
  # Update last login
267
- cursor.execute("""
 
268
  UPDATE users SET last_login = CURRENT_TIMESTAMP WHERE id = ?
269
- """, (user[0],))
 
 
270
  conn.commit()
271
  conn.close()
272
 
@@ -278,6 +381,7 @@ def login_user(mobile, password):
278
  except Exception as e:
279
  return f"❌ Login failed: {str(e)}"
280
 
 
281
  def use_ai_model(model_name, input_text, user_session="guest"):
282
  """Simulate AI model usage"""
283
  if not input_text.strip():
@@ -288,13 +392,19 @@ def use_ai_model(model_name, input_text, user_session="guest"):
288
  "text": f"🧠 {model_name} processed: '{input_text}'\n\n✨ AI Response: This is a simulated response from the {model_name} model. In production, this would connect to the actual model API.",
289
  "image": f"🖼️ {model_name} would generate/edit an image based on: '{input_text}'\n\n📸 Output: Image processing complete (simulated)",
290
  "audio": f"🎵 {model_name} audio processing for: '{input_text}'\n\n🔊 Output: Audio generated/processed (simulated)",
291
- "multimodal": f"🤖 {model_name} multimodal processing: '{input_text}'\n\n🎯 Output: Combined AI analysis complete (simulated)"
292
  }
293
 
294
  # Determine response type based on model
295
- if any(x in model_name.lower() for x in ["image", "flux", "diffusion", "face", "avatar"]):
 
 
 
296
  response_type = "image"
297
- elif any(x in model_name.lower() for x in ["tts", "speech", "audio", "whisper", "wav2vec"]):
 
 
 
298
  response_type = "audio"
299
  elif any(x in model_name.lower() for x in ["vl", "blip", "vision", "talking"]):
300
  response_type = "multimodal"
@@ -303,6 +413,7 @@ def use_ai_model(model_name, input_text, user_session="guest"):
303
 
304
  return response_templates[response_type]
305
 
 
306
  def get_cloudflare_status():
307
  """Get Cloudflare services status"""
308
  services = []
@@ -329,6 +440,7 @@ def get_cloudflare_status():
329
 
330
  return "\n".join(services)
331
 
 
332
  # Initialize database
333
  init_database()
334
 
@@ -340,17 +452,19 @@ with gr.Blocks(
340
  .container { max-width: 1400px; margin: 0 auto; }
341
  .header { text-align: center; padding: 25px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 15px; margin-bottom: 25px; }
342
  .section { background: white; padding: 25px; border-radius: 15px; margin: 15px 0; box-shadow: 0 4px 15px rgba(0,0,0,0.1); }
343
- """
344
  ) as app:
345
 
346
  # Header
347
- gr.HTML("""
 
348
  <div class="header">
349
  <h1>🤖 OpenManus - Complete AI Platform</h1>
350
  <p><strong>Mobile Authentication + 200+ AI Models + Cloudflare Services</strong></p>
351
  <p>🧠 Qwen & DeepSeek | 🖼️ Image Processing | 🎵 TTS/STT | 👤 Face Swap | 🌍 Arabic-English | ☁️ Cloud Integration</p>
352
  </div>
353
- """)
 
354
 
355
  with gr.Row():
356
  # Authentication Section
@@ -362,55 +476,39 @@ with gr.Blocks(
362
  signup_mobile = gr.Textbox(
363
  label="Mobile Number",
364
  placeholder="+1234567890",
365
- info="Enter your mobile number with country code"
366
  )
367
  signup_name = gr.Textbox(
368
- label="Full Name",
369
- placeholder="Your full name"
370
  )
371
  signup_password = gr.Textbox(
372
- label="Password",
373
- type="password",
374
- info="Minimum 6 characters"
375
- )
376
- signup_confirm = gr.Textbox(
377
- label="Confirm Password",
378
- type="password"
379
  )
 
380
  signup_btn = gr.Button("Create Account", variant="primary")
381
  signup_result = gr.Textbox(
382
- label="Registration Status",
383
- interactive=False,
384
- lines=2
385
  )
386
 
387
  signup_btn.click(
388
  signup_user,
389
  [signup_mobile, signup_name, signup_password, signup_confirm],
390
- signup_result
391
  )
392
 
393
  with gr.Tab("Login"):
394
  gr.Markdown("### Access Your Account")
395
  login_mobile = gr.Textbox(
396
- label="Mobile Number",
397
- placeholder="+1234567890"
398
- )
399
- login_password = gr.Textbox(
400
- label="Password",
401
- type="password"
402
  )
 
403
  login_btn = gr.Button("Login", variant="primary")
404
  login_result = gr.Textbox(
405
- label="Login Status",
406
- interactive=False,
407
- lines=2
408
  )
409
 
410
  login_btn.click(
411
- login_user,
412
- [login_mobile, login_password],
413
- login_result
414
  )
415
 
416
  # AI Models Section
@@ -424,40 +522,42 @@ with gr.Blocks(
424
  qwen_model = gr.Dropdown(
425
  choices=AI_MODELS["Text Generation"]["Qwen Models"],
426
  label="Select Qwen Model",
427
- value="Qwen/Qwen2.5-72B-Instruct"
428
  )
429
  qwen_input = gr.Textbox(
430
  label="Input Text",
431
  placeholder="Enter your prompt for Qwen...",
432
- lines=3
433
  )
434
  qwen_btn = gr.Button("Generate with Qwen")
435
  qwen_output = gr.Textbox(
436
- label="Qwen Response",
437
- lines=5,
438
- interactive=False
 
439
  )
440
- qwen_btn.click(use_ai_model, [qwen_model, qwen_input], qwen_output)
441
 
442
  with gr.Column():
443
  gr.Markdown("### DeepSeek Models (17 models)")
444
  deepseek_model = gr.Dropdown(
445
  choices=AI_MODELS["Text Generation"]["DeepSeek Models"],
446
  label="Select DeepSeek Model",
447
- value="deepseek-ai/deepseek-llm-67b-chat"
448
  )
449
  deepseek_input = gr.Textbox(
450
  label="Input Text",
451
  placeholder="Enter your prompt for DeepSeek...",
452
- lines=3
453
  )
454
  deepseek_btn = gr.Button("Generate with DeepSeek")
455
  deepseek_output = gr.Textbox(
456
- label="DeepSeek Response",
457
- lines=5,
458
- interactive=False
 
 
 
459
  )
460
- deepseek_btn.click(use_ai_model, [deepseek_model, deepseek_input], deepseek_output)
461
 
462
  with gr.Tab("Image Processing"):
463
  with gr.Row():
@@ -466,40 +566,40 @@ with gr.Blocks(
466
  img_gen_model = gr.Dropdown(
467
  choices=AI_MODELS["Image Processing"]["Image Generation"],
468
  label="Select Image Model",
469
- value="black-forest-labs/FLUX.1-dev"
470
  )
471
  img_prompt = gr.Textbox(
472
  label="Image Prompt",
473
  placeholder="Describe the image you want to generate...",
474
- lines=2
475
  )
476
  img_gen_btn = gr.Button("Generate Image")
477
  img_gen_output = gr.Textbox(
478
- label="Generation Status",
479
- lines=4,
480
- interactive=False
 
481
  )
482
- img_gen_btn.click(use_ai_model, [img_gen_model, img_prompt], img_gen_output)
483
 
484
  with gr.Column():
485
  gr.Markdown("### Face Processing & Editing")
486
  face_model = gr.Dropdown(
487
  choices=AI_MODELS["Image Processing"]["Face Processing"],
488
  label="Select Face Model",
489
- value="InsightFace/inswapper_128.onnx"
490
  )
491
  face_input = gr.Textbox(
492
  label="Face Processing Task",
493
  placeholder="Describe face swap or enhancement task...",
494
- lines=2
495
  )
496
  face_btn = gr.Button("Process Face")
497
  face_output = gr.Textbox(
498
- label="Processing Status",
499
- lines=4,
500
- interactive=False
 
501
  )
502
- face_btn.click(use_ai_model, [face_model, face_input], face_output)
503
 
504
  with gr.Tab("Audio Processing"):
505
  with gr.Row():
@@ -508,18 +608,16 @@ with gr.Blocks(
508
  tts_model = gr.Dropdown(
509
  choices=AI_MODELS["Audio Processing"]["Text-to-Speech"],
510
  label="Select TTS Model",
511
- value="microsoft/speecht5_tts"
512
  )
513
  tts_text = gr.Textbox(
514
  label="Text to Speak",
515
  placeholder="Enter text to convert to speech...",
516
- lines=3
517
  )
518
  tts_btn = gr.Button("Generate Speech")
519
  tts_output = gr.Textbox(
520
- label="TTS Status",
521
- lines=4,
522
- interactive=False
523
  )
524
  tts_btn.click(use_ai_model, [tts_model, tts_text], tts_output)
525
 
@@ -528,18 +626,16 @@ with gr.Blocks(
528
  stt_model = gr.Dropdown(
529
  choices=AI_MODELS["Audio Processing"]["Speech-to-Text"],
530
  label="Select STT Model",
531
- value="openai/whisper-large-v3"
532
  )
533
  stt_input = gr.Textbox(
534
  label="Audio Description",
535
  placeholder="Describe audio file to transcribe...",
536
- lines=3
537
  )
538
  stt_btn = gr.Button("Transcribe Audio")
539
  stt_output = gr.Textbox(
540
- label="STT Status",
541
- lines=4,
542
- interactive=False
543
  )
544
  stt_btn.click(use_ai_model, [stt_model, stt_input], stt_output)
545
 
@@ -550,18 +646,16 @@ with gr.Blocks(
550
  vl_model = gr.Dropdown(
551
  choices=AI_MODELS["Multimodal AI"]["Vision-Language"],
552
  label="Select VL Model",
553
- value="liuhaotian/llava-v1.6-34b"
554
  )
555
  vl_input = gr.Textbox(
556
  label="Vision-Language Task",
557
  placeholder="Describe image analysis or VQA task...",
558
- lines=3
559
  )
560
  vl_btn = gr.Button("Process with VL Model")
561
  vl_output = gr.Textbox(
562
- label="VL Response",
563
- lines=4,
564
- interactive=False
565
  )
566
  vl_btn.click(use_ai_model, [vl_model, vl_input], vl_output)
567
 
@@ -570,40 +664,40 @@ with gr.Blocks(
570
  avatar_model = gr.Dropdown(
571
  choices=AI_MODELS["Multimodal AI"]["Talking Avatars"],
572
  label="Select Avatar Model",
573
- value="Wav2Lip-HD"
574
  )
575
  avatar_input = gr.Textbox(
576
  label="Avatar Generation Task",
577
  placeholder="Describe talking avatar or lip-sync task...",
578
- lines=3
579
  )
580
  avatar_btn = gr.Button("Generate Avatar")
581
  avatar_output = gr.Textbox(
582
- label="Avatar Status",
583
- lines=4,
584
- interactive=False
 
585
  )
586
- avatar_btn.click(use_ai_model, [avatar_model, avatar_input], avatar_output)
587
 
588
  with gr.Tab("Arabic-English"):
589
  gr.Markdown("### Arabic-English Interactive Models (12 models)")
590
  arabic_model = gr.Dropdown(
591
  choices=AI_MODELS["Arabic-English Models"],
592
  label="Select Arabic-English Model",
593
- value="aubmindlab/bert-base-arabertv2"
594
  )
595
  arabic_input = gr.Textbox(
596
  label="Text (Arabic or English)",
597
  placeholder="أدخل النص باللغة العربية أو الإنجليزية / Enter text in Arabic or English...",
598
- lines=4
599
  )
600
  arabic_btn = gr.Button("Process Arabic-English")
601
  arabic_output = gr.Textbox(
602
- label="Processing Result",
603
- lines=6,
604
- interactive=False
 
605
  )
606
- arabic_btn.click(use_ai_model, [arabic_model, arabic_input], arabic_output)
607
 
608
  # Services Status Section
609
  with gr.Row():
@@ -617,17 +711,17 @@ with gr.Blocks(
617
  label="Cloudflare Services",
618
  value=get_cloudflare_status(),
619
  lines=6,
620
- interactive=False
621
  )
622
  refresh_btn = gr.Button("Refresh Status")
623
  refresh_btn.click(
624
- lambda: get_cloudflare_status(),
625
- outputs=services_status
626
  )
627
 
628
  with gr.Column():
629
  gr.Markdown("### Configuration")
630
- gr.HTML("""
 
631
  <div style="background: #f0f8ff; padding: 15px; border-radius: 10px;">
632
  <h4>Environment Variables:</h4>
633
  <ul>
@@ -639,10 +733,12 @@ with gr.Blocks(
639
  <li><code>CLOUDFLARE_DURABLE_OBJECTS_ID</code> - Durable objects</li>
640
  </ul>
641
  </div>
642
- """)
 
643
 
644
  # Footer Status
645
- gr.HTML("""
 
646
  <div style="background: linear-gradient(45deg, #f0f8ff 0%, #e6f3ff 100%); padding: 20px; border-radius: 15px; margin-top: 25px; text-align: center;">
647
  <h3>📊 Platform Status</h3>
648
  <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 15px; margin: 15px 0;">
@@ -657,7 +753,8 @@ with gr.Blocks(
657
  </div>
658
  <p><em>Complete AI Platform successfully deployed on HuggingFace Spaces with Docker!</em></p>
659
  </div>
660
- """)
 
661
 
662
  if __name__ == "__main__":
663
  logger.info("🚀 Launching OpenManus Platform...")
@@ -674,7 +771,7 @@ if __name__ == "__main__":
674
  debug=False,
675
  enable_queue=True,
676
  show_error=True,
677
- quiet=False
678
  )
679
  except Exception as e:
680
  logger.error(f"Failed to launch application: {e}")
 
17
  # Configure logging for Linux environment
18
  try:
19
  # Try to create logs directory if it doesn't exist
20
+ log_dir = Path("/home/user/app/logs")
21
  log_dir.mkdir(parents=True, exist_ok=True)
22
 
23
  logging.basicConfig(
24
  level=logging.INFO,
25
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
26
  handlers=[
27
  logging.StreamHandler(sys.stdout),
28
+ logging.FileHandler("/home/user/app/logs/openmanus.log", mode="a"),
29
+ ],
30
  )
31
  except Exception:
32
  # Fallback to console-only logging
33
  logging.basicConfig(
34
  level=logging.INFO,
35
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
36
+ handlers=[logging.StreamHandler(sys.stdout)],
37
  )
38
 
39
  logger = logging.getLogger(__name__)
 
46
  "d1_database_id": os.getenv("CLOUDFLARE_D1_DATABASE_ID", ""),
47
  "r2_bucket_name": os.getenv("CLOUDFLARE_R2_BUCKET_NAME", ""),
48
  "kv_namespace_id": os.getenv("CLOUDFLARE_KV_NAMESPACE_ID", ""),
49
+ "durable_objects_id": os.getenv("CLOUDFLARE_DURABLE_OBJECTS_ID", ""),
50
  }
51
 
52
  # AI Model Categories with 200+ models
53
  AI_MODELS = {
54
  "Text Generation": {
55
  "Qwen Models": [
56
+ "Qwen/Qwen2.5-72B-Instruct",
57
+ "Qwen/Qwen2.5-32B-Instruct",
58
+ "Qwen/Qwen2.5-14B-Instruct",
59
+ "Qwen/Qwen2.5-7B-Instruct",
60
+ "Qwen/Qwen2.5-3B-Instruct",
61
+ "Qwen/Qwen2.5-1.5B-Instruct",
62
+ "Qwen/Qwen2.5-0.5B-Instruct",
63
+ "Qwen/Qwen2-72B-Instruct",
64
+ "Qwen/Qwen2-57B-A14B-Instruct",
65
+ "Qwen/Qwen2-7B-Instruct",
66
+ "Qwen/Qwen2-1.5B-Instruct",
67
+ "Qwen/Qwen2-0.5B-Instruct",
68
+ "Qwen/Qwen1.5-110B-Chat",
69
+ "Qwen/Qwen1.5-72B-Chat",
70
+ "Qwen/Qwen1.5-32B-Chat",
71
+ "Qwen/Qwen1.5-14B-Chat",
72
+ "Qwen/Qwen1.5-7B-Chat",
73
+ "Qwen/Qwen1.5-4B-Chat",
74
+ "Qwen/Qwen1.5-1.8B-Chat",
75
+ "Qwen/Qwen1.5-0.5B-Chat",
76
+ "Qwen/CodeQwen1.5-7B-Chat",
77
+ "Qwen/Qwen2.5-Math-72B-Instruct",
78
+ "Qwen/Qwen2.5-Math-7B-Instruct",
79
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
80
+ "Qwen/Qwen2.5-Coder-14B-Instruct",
81
+ "Qwen/Qwen2.5-Coder-7B-Instruct",
82
+ "Qwen/Qwen2.5-Coder-3B-Instruct",
83
+ "Qwen/Qwen2.5-Coder-1.5B-Instruct",
84
+ "Qwen/Qwen2.5-Coder-0.5B-Instruct",
85
+ "Qwen/QwQ-32B-Preview",
86
+ "Qwen/Qwen2-VL-72B-Instruct",
87
+ "Qwen/Qwen2-VL-7B-Instruct",
88
+ "Qwen/Qwen2-VL-2B-Instruct",
89
+ "Qwen/Qwen2-Audio-7B-Instruct",
90
+ "Qwen/Qwen-Agent-Chat",
91
+ "Qwen/Qwen-VL-Chat",
92
  ],
93
  "DeepSeek Models": [
94
+ "deepseek-ai/deepseek-llm-67b-chat",
95
+ "deepseek-ai/deepseek-llm-7b-chat",
96
+ "deepseek-ai/deepseek-coder-33b-instruct",
97
+ "deepseek-ai/deepseek-coder-7b-instruct",
98
+ "deepseek-ai/deepseek-coder-6.7b-instruct",
99
+ "deepseek-ai/deepseek-coder-1.3b-instruct",
100
+ "deepseek-ai/DeepSeek-V2-Chat",
101
+ "deepseek-ai/DeepSeek-V2-Lite-Chat",
102
+ "deepseek-ai/deepseek-math-7b-instruct",
103
+ "deepseek-ai/deepseek-moe-16b-chat",
104
+ "deepseek-ai/deepseek-vl-7b-chat",
105
+ "deepseek-ai/deepseek-vl-1.3b-chat",
106
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
107
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
108
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
109
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
110
+ "deepseek-ai/DeepSeek-Reasoner-R1",
111
+ ],
112
  },
113
  "Image Processing": {
114
  "Image Generation": [
115
+ "black-forest-labs/FLUX.1-dev",
116
+ "black-forest-labs/FLUX.1-schnell",
117
+ "black-forest-labs/FLUX.1-pro",
118
+ "runwayml/stable-diffusion-v1-5",
119
+ "stabilityai/stable-diffusion-xl-base-1.0",
120
+ "stabilityai/stable-diffusion-3-medium-diffusers",
121
+ "stabilityai/sd-turbo",
122
+ "kandinsky-community/kandinsky-2-2-decoder",
123
+ "playgroundai/playground-v2.5-1024px-aesthetic",
124
+ "midjourney/midjourney-v6",
125
  ],
126
  "Image Editing": [
127
+ "timbrooks/instruct-pix2pix",
128
+ "runwayml/stable-diffusion-inpainting",
129
+ "stabilityai/stable-diffusion-xl-refiner-1.0",
130
+ "lllyasviel/control_v11p_sd15_inpaint",
131
+ "SG161222/RealVisXL_V4.0",
132
+ "ByteDance/SDXL-Lightning",
133
+ "segmind/SSD-1B",
134
+ "segmind/Segmind-Vega",
135
+ "playgroundai/playground-v2-1024px-aesthetic",
136
+ "stabilityai/stable-cascade",
137
  ],
138
  "Face Processing": [
139
+ "InsightFace/inswapper_128.onnx",
140
+ "deepinsight/insightface",
141
+ "TencentARC/GFPGAN",
142
+ "sczhou/CodeFormer",
143
+ "xinntao/Real-ESRGAN",
144
+ "ESRGAN/ESRGAN",
145
+ ],
146
  },
147
  "Audio Processing": {
148
  "Text-to-Speech": [
149
+ "microsoft/speecht5_tts",
150
+ "facebook/mms-tts-eng",
151
+ "facebook/mms-tts-ara",
152
+ "coqui/XTTS-v2",
153
+ "suno/bark",
154
+ "parler-tts/parler-tts-large-v1",
155
+ "microsoft/DisTTS",
156
+ "facebook/fastspeech2-en-ljspeech",
157
+ "espnet/kan-bayashi_ljspeech_vits",
158
+ "facebook/tts_transformer-en-ljspeech",
159
+ "microsoft/SpeechT5",
160
+ "Voicemod/fastspeech2-en-male1",
161
+ "facebook/mms-tts-spa",
162
+ "facebook/mms-tts-fra",
163
+ "facebook/mms-tts-deu",
164
  ],
165
  "Speech-to-Text": [
166
+ "openai/whisper-large-v3",
167
+ "openai/whisper-large-v2",
168
+ "openai/whisper-medium",
169
+ "openai/whisper-small",
170
+ "openai/whisper-base",
171
+ "openai/whisper-tiny",
172
+ "facebook/wav2vec2-large-960h",
173
+ "facebook/wav2vec2-base-960h",
174
+ "microsoft/unispeech-sat-large",
175
+ "nvidia/stt_en_conformer_ctc_large",
176
+ "speechbrain/asr-wav2vec2-commonvoice-en",
177
+ "facebook/mms-1b-all",
178
+ "facebook/seamless-m4t-v2-large",
179
+ "distil-whisper/distil-large-v3",
180
+ "distil-whisper/distil-medium.en",
181
+ ],
182
  },
183
  "Multimodal AI": {
184
  "Vision-Language": [
185
+ "microsoft/DialoGPT-large",
186
+ "microsoft/blip-image-captioning-large",
187
+ "microsoft/blip2-opt-6.7b",
188
+ "microsoft/blip2-flan-t5-xl",
189
+ "salesforce/blip-vqa-capfilt-large",
190
+ "dandelin/vilt-b32-finetuned-vqa",
191
+ "google/pix2struct-ai2d-base",
192
+ "microsoft/git-large-coco",
193
+ "microsoft/git-base-vqa",
194
+ "liuhaotian/llava-v1.6-34b",
195
+ "liuhaotian/llava-v1.6-vicuna-7b",
196
  ],
197
  "Talking Avatars": [
198
+ "microsoft/SpeechT5-TTS-Avatar",
199
+ "Wav2Lip-HD",
200
+ "First-Order-Model",
201
+ "LipSync-Expert",
202
+ "DeepFaceLive",
203
+ "FaceSwapper-Live",
204
+ "RealTime-FaceRig",
205
+ "AI-Avatar-Generator",
206
+ "TalkingHead-3D",
207
+ ],
208
  },
209
  "Arabic-English Models": [
210
+ "aubmindlab/bert-base-arabertv2",
211
+ "aubmindlab/aragpt2-base",
212
+ "aubmindlab/aragpt2-medium",
213
+ "CAMeL-Lab/bert-base-arabic-camelbert-mix",
214
+ "asafaya/bert-base-arabic",
215
+ "UBC-NLP/MARBERT",
216
+ "UBC-NLP/ARBERTv2",
217
+ "facebook/nllb-200-3.3B",
218
+ "facebook/m2m100_1.2B",
219
+ "Helsinki-NLP/opus-mt-ar-en",
220
+ "Helsinki-NLP/opus-mt-en-ar",
221
+ "microsoft/DialoGPT-medium-arabic",
222
+ ],
223
  }
224
 
225
+
226
  def init_database():
227
  """Initialize SQLite database for authentication - Linux optimized"""
228
  try:
 
237
  cursor = conn.cursor()
238
 
239
  # Create users table
240
+ cursor.execute(
241
+ """
242
  CREATE TABLE IF NOT EXISTS users (
243
  id INTEGER PRIMARY KEY AUTOINCREMENT,
244
  mobile_number TEXT UNIQUE NOT NULL,
 
248
  last_login TIMESTAMP,
249
  is_active BOOLEAN DEFAULT 1
250
  )
251
+ """
252
+ )
253
 
254
  # Create sessions table
255
+ cursor.execute(
256
+ """
257
  CREATE TABLE IF NOT EXISTS sessions (
258
  id TEXT PRIMARY KEY,
259
  user_id INTEGER NOT NULL,
 
263
  user_agent TEXT,
264
  FOREIGN KEY (user_id) REFERENCES users (id)
265
  )
266
+ """
267
+ )
268
 
269
  # Create model usage table
270
+ cursor.execute(
271
+ """
272
  CREATE TABLE IF NOT EXISTS model_usage (
273
  id INTEGER PRIMARY KEY AUTOINCREMENT,
274
  user_id INTEGER,
 
280
  processing_time REAL,
281
  FOREIGN KEY (user_id) REFERENCES users (id)
282
  )
283
+ """
284
+ )
285
 
286
  conn.commit()
287
  conn.close()
 
292
  logger.error(f"Database initialization failed: {e}")
293
  return False
294
 
295
+
296
  def hash_password(password):
297
  """Hash password using SHA-256"""
298
  return hashlib.sha256(password.encode()).hexdigest()
299
 
300
+
301
  def signup_user(mobile, name, password, confirm_password):
302
  """User registration with mobile number"""
303
  if not all([mobile, name, password, confirm_password]):
 
325
 
326
  # Create new user
327
  password_hash = hash_password(password)
328
+ cursor.execute(
329
+ """
330
  INSERT INTO users (mobile_number, full_name, password_hash)
331
  VALUES (?, ?, ?)
332
+ """,
333
+ (mobile, name, password_hash),
334
+ )
335
 
336
  conn.commit()
337
  conn.close()
 
341
  except Exception as e:
342
  return f"❌ Registration failed: {str(e)}"
343
 
344
+
345
  def login_user(mobile, password):
346
  """User authentication"""
347
  if not mobile or not password:
 
353
 
354
  # Verify credentials
355
  password_hash = hash_password(password)
356
+ cursor.execute(
357
+ """
358
  SELECT id, full_name FROM users
359
  WHERE mobile_number = ? AND password_hash = ? AND is_active = 1
360
+ """,
361
+ (mobile, password_hash),
362
+ )
363
 
364
  user = cursor.fetchone()
365
  if user:
366
  # Update last login
367
+ cursor.execute(
368
+ """
369
  UPDATE users SET last_login = CURRENT_TIMESTAMP WHERE id = ?
370
+ """,
371
+ (user[0],),
372
+ )
373
  conn.commit()
374
  conn.close()
375
 
 
381
  except Exception as e:
382
  return f"❌ Login failed: {str(e)}"
383
 
384
+
385
  def use_ai_model(model_name, input_text, user_session="guest"):
386
  """Simulate AI model usage"""
387
  if not input_text.strip():
 
392
  "text": f"🧠 {model_name} processed: '{input_text}'\n\n✨ AI Response: This is a simulated response from the {model_name} model. In production, this would connect to the actual model API.",
393
  "image": f"🖼️ {model_name} would generate/edit an image based on: '{input_text}'\n\n📸 Output: Image processing complete (simulated)",
394
  "audio": f"🎵 {model_name} audio processing for: '{input_text}'\n\n🔊 Output: Audio generated/processed (simulated)",
395
+ "multimodal": f"🤖 {model_name} multimodal processing: '{input_text}'\n\n🎯 Output: Combined AI analysis complete (simulated)",
396
  }
397
 
398
  # Determine response type based on model
399
+ if any(
400
+ x in model_name.lower()
401
+ for x in ["image", "flux", "diffusion", "face", "avatar"]
402
+ ):
403
  response_type = "image"
404
+ elif any(
405
+ x in model_name.lower()
406
+ for x in ["tts", "speech", "audio", "whisper", "wav2vec"]
407
+ ):
408
  response_type = "audio"
409
  elif any(x in model_name.lower() for x in ["vl", "blip", "vision", "talking"]):
410
  response_type = "multimodal"
 
413
 
414
  return response_templates[response_type]
415
 
416
+
417
  def get_cloudflare_status():
418
  """Get Cloudflare services status"""
419
  services = []
 
440
 
441
  return "\n".join(services)
442
 
443
+
444
  # Initialize database
445
  init_database()
446
 
 
452
  .container { max-width: 1400px; margin: 0 auto; }
453
  .header { text-align: center; padding: 25px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 15px; margin-bottom: 25px; }
454
  .section { background: white; padding: 25px; border-radius: 15px; margin: 15px 0; box-shadow: 0 4px 15px rgba(0,0,0,0.1); }
455
+ """,
456
  ) as app:
457
 
458
  # Header
459
+ gr.HTML(
460
+ """
461
  <div class="header">
462
  <h1>🤖 OpenManus - Complete AI Platform</h1>
463
  <p><strong>Mobile Authentication + 200+ AI Models + Cloudflare Services</strong></p>
464
  <p>🧠 Qwen & DeepSeek | 🖼️ Image Processing | 🎵 TTS/STT | 👤 Face Swap | 🌍 Arabic-English | ☁️ Cloud Integration</p>
465
  </div>
466
+ """
467
+ )
468
 
469
  with gr.Row():
470
  # Authentication Section
 
476
  signup_mobile = gr.Textbox(
477
  label="Mobile Number",
478
  placeholder="+1234567890",
479
+ info="Enter your mobile number with country code",
480
  )
481
  signup_name = gr.Textbox(
482
+ label="Full Name", placeholder="Your full name"
 
483
  )
484
  signup_password = gr.Textbox(
485
+ label="Password", type="password", info="Minimum 6 characters"
 
 
 
 
 
 
486
  )
487
+ signup_confirm = gr.Textbox(label="Confirm Password", type="password")
488
  signup_btn = gr.Button("Create Account", variant="primary")
489
  signup_result = gr.Textbox(
490
+ label="Registration Status", interactive=False, lines=2
 
 
491
  )
492
 
493
  signup_btn.click(
494
  signup_user,
495
  [signup_mobile, signup_name, signup_password, signup_confirm],
496
+ signup_result,
497
  )
498
 
499
  with gr.Tab("Login"):
500
  gr.Markdown("### Access Your Account")
501
  login_mobile = gr.Textbox(
502
+ label="Mobile Number", placeholder="+1234567890"
 
 
 
 
 
503
  )
504
+ login_password = gr.Textbox(label="Password", type="password")
505
  login_btn = gr.Button("Login", variant="primary")
506
  login_result = gr.Textbox(
507
+ label="Login Status", interactive=False, lines=2
 
 
508
  )
509
 
510
  login_btn.click(
511
+ login_user, [login_mobile, login_password], login_result
 
 
512
  )
513
 
514
  # AI Models Section
 
522
  qwen_model = gr.Dropdown(
523
  choices=AI_MODELS["Text Generation"]["Qwen Models"],
524
  label="Select Qwen Model",
525
+ value="Qwen/Qwen2.5-72B-Instruct",
526
  )
527
  qwen_input = gr.Textbox(
528
  label="Input Text",
529
  placeholder="Enter your prompt for Qwen...",
530
+ lines=3,
531
  )
532
  qwen_btn = gr.Button("Generate with Qwen")
533
  qwen_output = gr.Textbox(
534
+ label="Qwen Response", lines=5, interactive=False
535
+ )
536
+ qwen_btn.click(
537
+ use_ai_model, [qwen_model, qwen_input], qwen_output
538
  )
 
539
 
540
  with gr.Column():
541
  gr.Markdown("### DeepSeek Models (17 models)")
542
  deepseek_model = gr.Dropdown(
543
  choices=AI_MODELS["Text Generation"]["DeepSeek Models"],
544
  label="Select DeepSeek Model",
545
+ value="deepseek-ai/deepseek-llm-67b-chat",
546
  )
547
  deepseek_input = gr.Textbox(
548
  label="Input Text",
549
  placeholder="Enter your prompt for DeepSeek...",
550
+ lines=3,
551
  )
552
  deepseek_btn = gr.Button("Generate with DeepSeek")
553
  deepseek_output = gr.Textbox(
554
+ label="DeepSeek Response", lines=5, interactive=False
555
+ )
556
+ deepseek_btn.click(
557
+ use_ai_model,
558
+ [deepseek_model, deepseek_input],
559
+ deepseek_output,
560
  )
 
561
 
562
  with gr.Tab("Image Processing"):
563
  with gr.Row():
 
566
  img_gen_model = gr.Dropdown(
567
  choices=AI_MODELS["Image Processing"]["Image Generation"],
568
  label="Select Image Model",
569
+ value="black-forest-labs/FLUX.1-dev",
570
  )
571
  img_prompt = gr.Textbox(
572
  label="Image Prompt",
573
  placeholder="Describe the image you want to generate...",
574
+ lines=2,
575
  )
576
  img_gen_btn = gr.Button("Generate Image")
577
  img_gen_output = gr.Textbox(
578
+ label="Generation Status", lines=4, interactive=False
579
+ )
580
+ img_gen_btn.click(
581
+ use_ai_model, [img_gen_model, img_prompt], img_gen_output
582
  )
 
583
 
584
  with gr.Column():
585
  gr.Markdown("### Face Processing & Editing")
586
  face_model = gr.Dropdown(
587
  choices=AI_MODELS["Image Processing"]["Face Processing"],
588
  label="Select Face Model",
589
+ value="InsightFace/inswapper_128.onnx",
590
  )
591
  face_input = gr.Textbox(
592
  label="Face Processing Task",
593
  placeholder="Describe face swap or enhancement task...",
594
+ lines=2,
595
  )
596
  face_btn = gr.Button("Process Face")
597
  face_output = gr.Textbox(
598
+ label="Processing Status", lines=4, interactive=False
599
+ )
600
+ face_btn.click(
601
+ use_ai_model, [face_model, face_input], face_output
602
  )
 
603
 
604
  with gr.Tab("Audio Processing"):
605
  with gr.Row():
 
608
  tts_model = gr.Dropdown(
609
  choices=AI_MODELS["Audio Processing"]["Text-to-Speech"],
610
  label="Select TTS Model",
611
+ value="microsoft/speecht5_tts",
612
  )
613
  tts_text = gr.Textbox(
614
  label="Text to Speak",
615
  placeholder="Enter text to convert to speech...",
616
+ lines=3,
617
  )
618
  tts_btn = gr.Button("Generate Speech")
619
  tts_output = gr.Textbox(
620
+ label="TTS Status", lines=4, interactive=False
 
 
621
  )
622
  tts_btn.click(use_ai_model, [tts_model, tts_text], tts_output)
623
 
 
626
  stt_model = gr.Dropdown(
627
  choices=AI_MODELS["Audio Processing"]["Speech-to-Text"],
628
  label="Select STT Model",
629
+ value="openai/whisper-large-v3",
630
  )
631
  stt_input = gr.Textbox(
632
  label="Audio Description",
633
  placeholder="Describe audio file to transcribe...",
634
+ lines=3,
635
  )
636
  stt_btn = gr.Button("Transcribe Audio")
637
  stt_output = gr.Textbox(
638
+ label="STT Status", lines=4, interactive=False
 
 
639
  )
640
  stt_btn.click(use_ai_model, [stt_model, stt_input], stt_output)
641
 
 
646
  vl_model = gr.Dropdown(
647
  choices=AI_MODELS["Multimodal AI"]["Vision-Language"],
648
  label="Select VL Model",
649
+ value="liuhaotian/llava-v1.6-34b",
650
  )
651
  vl_input = gr.Textbox(
652
  label="Vision-Language Task",
653
  placeholder="Describe image analysis or VQA task...",
654
+ lines=3,
655
  )
656
  vl_btn = gr.Button("Process with VL Model")
657
  vl_output = gr.Textbox(
658
+ label="VL Response", lines=4, interactive=False
 
 
659
  )
660
  vl_btn.click(use_ai_model, [vl_model, vl_input], vl_output)
661
 
 
664
  avatar_model = gr.Dropdown(
665
  choices=AI_MODELS["Multimodal AI"]["Talking Avatars"],
666
  label="Select Avatar Model",
667
+ value="Wav2Lip-HD",
668
  )
669
  avatar_input = gr.Textbox(
670
  label="Avatar Generation Task",
671
  placeholder="Describe talking avatar or lip-sync task...",
672
+ lines=3,
673
  )
674
  avatar_btn = gr.Button("Generate Avatar")
675
  avatar_output = gr.Textbox(
676
+ label="Avatar Status", lines=4, interactive=False
677
+ )
678
+ avatar_btn.click(
679
+ use_ai_model, [avatar_model, avatar_input], avatar_output
680
  )
 
681
 
682
  with gr.Tab("Arabic-English"):
683
  gr.Markdown("### Arabic-English Interactive Models (12 models)")
684
  arabic_model = gr.Dropdown(
685
  choices=AI_MODELS["Arabic-English Models"],
686
  label="Select Arabic-English Model",
687
+ value="aubmindlab/bert-base-arabertv2",
688
  )
689
  arabic_input = gr.Textbox(
690
  label="Text (Arabic or English)",
691
  placeholder="أدخل النص باللغة العربية أو الإنجليزية / Enter text in Arabic or English...",
692
+ lines=4,
693
  )
694
  arabic_btn = gr.Button("Process Arabic-English")
695
  arabic_output = gr.Textbox(
696
+ label="Processing Result", lines=6, interactive=False
697
+ )
698
+ arabic_btn.click(
699
+ use_ai_model, [arabic_model, arabic_input], arabic_output
700
  )
 
701
 
702
  # Services Status Section
703
  with gr.Row():
 
711
  label="Cloudflare Services",
712
  value=get_cloudflare_status(),
713
  lines=6,
714
+ interactive=False,
715
  )
716
  refresh_btn = gr.Button("Refresh Status")
717
  refresh_btn.click(
718
+ lambda: get_cloudflare_status(), outputs=services_status
 
719
  )
720
 
721
  with gr.Column():
722
  gr.Markdown("### Configuration")
723
+ gr.HTML(
724
+ """
725
  <div style="background: #f0f8ff; padding: 15px; border-radius: 10px;">
726
  <h4>Environment Variables:</h4>
727
  <ul>
 
733
  <li><code>CLOUDFLARE_DURABLE_OBJECTS_ID</code> - Durable objects</li>
734
  </ul>
735
  </div>
736
+ """
737
+ )
738
 
739
  # Footer Status
740
+ gr.HTML(
741
+ """
742
  <div style="background: linear-gradient(45deg, #f0f8ff 0%, #e6f3ff 100%); padding: 20px; border-radius: 15px; margin-top: 25px; text-align: center;">
743
  <h3>📊 Platform Status</h3>
744
  <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 15px; margin: 15px 0;">
 
753
  </div>
754
  <p><em>Complete AI Platform successfully deployed on HuggingFace Spaces with Docker!</em></p>
755
  </div>
756
+ """
757
+ )
758
 
759
  if __name__ == "__main__":
760
  logger.info("🚀 Launching OpenManus Platform...")
 
771
  debug=False,
772
  enable_queue=True,
773
  show_error=True,
774
+ quiet=False,
775
  )
776
  except Exception as e:
777
  logger.error(f"Failed to launch application: {e}")