Ahmed-El-Sharkawy commited on
Commit
3f41fce
·
verified ·
1 Parent(s): 599b027

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +143 -0
  2. download.jpeg +0 -0
  3. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys, time, asyncio
2
+ from typing import List, Dict
3
+ import gradio as gr
4
+ from dotenv import load_dotenv
5
+
6
+ # Windows event loop (prevents asyncio warnings on Win)
7
+ if sys.platform.startswith("win"):
8
+ try:
9
+ asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
10
+ except Exception:
11
+ pass
12
+
13
+ # config from .env or defaults
14
+ load_dotenv()
15
+ APP_Name = os.getenv("APP_Name", "Ghaymah GenAI chatbots")
16
+ APP_Version = os.getenv("APP_Version", "0.1.0")
17
+ API_KEY = os.getenv("API_KEY", "")
18
+
19
+ # Models from .env or fallback to your set
20
+ MODELS = [m.strip() for m in os.getenv("Models", "").split(",") if m.strip()] or [
21
+ "gemma-3-4b-it",
22
+ "QwQ-32B",
23
+ "DeepSeek-V3-0324",
24
+ "Qwen/Qwen3-32B",
25
+ "zai-org/GLM-4.5-Air",
26
+ "moonshotai/Kimi-K2-Instruct",
27
+ ]
28
+
29
+ # Friendly descriptions & logo
30
+ MODEL_INFO = {
31
+ "gemma-3-4b-it": "Google Gemma-3 4B Instruct — light, fast, solid reasoning.",
32
+ "QwQ-32B": "QwQ-32B — reasoning-focused; strong long-form answers.",
33
+ "DeepSeek-V3-0324": "DeepSeek V3 (0324) — versatile, great multi-step reasoning.",
34
+ "Qwen/Qwen3-32B": "Qwen3-32B — multilingual, good code & math.",
35
+ "zai-org/GLM-4.5-Air": "GLM-4.5-Air — efficient generalist, good latency.",
36
+ "moonshotai/Kimi-K2-Instruct": "Kimi K2 Instruct — long-context, helpful writing.",
37
+ }
38
+ LOGO_PATH = "download.jpeg" # change to your image if different
39
+
40
+ # ── OpenAI-compatible client ──────────────────────────────────────────────────
41
+ from openai import OpenAI
42
+ BASE_URL = "https://genai.ghaymah.systems"
43
+ client = OpenAI(api_key=API_KEY, base_url=BASE_URL) if API_KEY else None
44
+
45
+ SYSTEM_SEED = "You are Ghaymah Assistant. Be concise and helpful."
46
+
47
+ # Helpers
48
+ BACKOFF = [5, 10, 20] # basic retry for 429s
49
+
50
+ def safe_chat_complete(model: str, messages: List[Dict], max_tokens: int = 800) -> str:
51
+ if not client:
52
+ return "⚠️ Missing API_KEY in .env"
53
+ attempt = 0
54
+ while True:
55
+ try:
56
+ resp = client.chat.completions.create(
57
+ model=model,
58
+ messages=messages,
59
+ max_tokens=max_tokens,
60
+ temperature=0.3,
61
+ timeout=90,
62
+ )
63
+ return resp.choices[0].message.content or ""
64
+ except Exception as e:
65
+ msg = str(e)
66
+ if ("429" in msg or "Rate" in msg) and attempt < len(BACKOFF):
67
+ time.sleep(BACKOFF[attempt]); attempt += 1
68
+ continue
69
+ return f"Request failed for `{model}`: {e}"
70
+
71
+ def init_state():
72
+ return {"messages": [{"role": "system", "content": SYSTEM_SEED}]}
73
+
74
+ # Gradio app
75
+ with gr.Blocks(title=APP_Name) as demo:
76
+ state = gr.State(init_state())
77
+
78
+ gr.Markdown(f"# {APP_Name} \n<span style='opacity:.7'>v{APP_Version}</span>")
79
+
80
+ with gr.Row():
81
+ # Left: Chat
82
+ with gr.Column(scale=3):
83
+ chat = gr.Chatbot(label="Chat", height=520, type="messages", value=[])
84
+ user_in = gr.Textbox(label="Your message", placeholder="Type here…", lines=2)
85
+ with gr.Row():
86
+ send_btn = gr.Button("Send", variant="primary")
87
+ clear_btn = gr.Button("Clear")
88
+
89
+ # Right: Model selector + logo + info
90
+ with gr.Column(scale=1, min_width=320):
91
+ model_choice = gr.Radio(
92
+ choices=MODELS,
93
+ value=MODELS[0],
94
+ label="Models",
95
+ info="Select Your Model Here",
96
+ )
97
+ gr.Image(LOGO_PATH, show_label=False, container=False)
98
+ info_md = gr.Markdown(MODEL_INFO.get(MODELS[0], ""))
99
+
100
+ def _update_info(m: str) -> str:
101
+ title = f"**{m}**"
102
+ desc = MODEL_INFO.get(m, "")
103
+ return f"{title}\n\n{desc}"
104
+ model_choice.change(_update_info, model_choice, info_md)
105
+
106
+ # Step 1: push the user message into the chat stream
107
+ def on_submit(msg, chat_messages):
108
+ if not msg:
109
+ return "", (chat_messages or [])
110
+ updated = (chat_messages or []) + [{"role": "user", "content": msg}]
111
+ return "", updated
112
+
113
+ def bot_step(chat_messages, chosen_model, st):
114
+ msgs = [{"role": "system", "content": SYSTEM_SEED}]
115
+ # only include last 2 visible messages
116
+ for m in (chat_messages or [])[-2:]:
117
+ role, content = m.get("role"), m.get("content")
118
+ if role in ("user", "assistant") and isinstance(content, str):
119
+ msgs.append({"role": role, "content": content})
120
+
121
+ reply = safe_chat_complete(chosen_model, msgs, max_tokens=800)
122
+ updated = (chat_messages or []) + [{"role": "assistant", "content": reply}]
123
+ st = st or init_state()
124
+ st["messages"] = msgs + [{"role": "assistant", "content": reply}]
125
+ return updated, st
126
+
127
+
128
+ # Clear
129
+ def on_clear():
130
+ return [], init_state()
131
+
132
+ # Wire events
133
+ user_in.submit(on_submit, [user_in, chat], [user_in, chat]) \
134
+ .then(bot_step, [chat, model_choice, state], [chat, state])
135
+
136
+ send_btn.click(on_submit, [user_in, chat], [user_in, chat]) \
137
+ .then(bot_step, [chat, model_choice, state], [chat, state])
138
+
139
+ clear_btn.click(on_clear, outputs=[chat, state])
140
+
141
+ if __name__ == "__main__":
142
+ demo.queue()
143
+ demo.launch(debug=True)
download.jpeg ADDED
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ openai
2
+ dotenv
3
+ gradio