arubaDev commited on
Commit
b1235ef
·
verified ·
1 Parent(s): 34bdda9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -25
app.py CHANGED
@@ -3,6 +3,7 @@ import sqlite3
3
  from datetime import datetime
4
  import gradio as gr
5
  from huggingface_hub import InferenceClient
 
6
 
7
  # ---------------------------
8
  # Config
@@ -10,10 +11,10 @@ from huggingface_hub import InferenceClient
10
  MODELS = {
11
  "Meta LLaMA 3.1 (8B Instruct)": "meta-llama/Llama-3.1-8B-Instruct",
12
  "Mistral 7B Instruct": "mistralai/Mistral-7B-Instruct-v0.3",
13
- "StarCoder Backend-Focused": "bigcode/starcoderbase",
14
- "CodeLlama Backend-Focused": "codellama/CodeLlama-7b-Instruct"
15
  }
16
 
 
 
17
  HF_TOKEN = os.getenv("HF_TOKEN") # Set in your Space's Secrets
18
  DB_PATH = "history.db"
19
 
@@ -23,7 +24,7 @@ SYSTEM_DEFAULT = (
23
  "Provide full backend code scaffolds with files, paths, and commands. "
24
  "Only include frontend if required for framework integration "
25
  "(e.g., Laravel Blade, Django templates). "
26
- "If asked for unrelated frontend/UI tasks, respond politely that you are a backend assistant."
27
  )
28
 
29
  # ---------------------------
@@ -96,7 +97,8 @@ def get_messages(session_id: int):
96
  """, (session_id,))
97
  rows = cur.fetchall()
98
  conn.close()
99
- return [{"role": role, "content": content} for role, content in rows]
 
100
 
101
  def add_message(session_id: int, role: str, content: str):
102
  conn = db()
@@ -140,6 +142,13 @@ def get_client(model_choice: str):
140
  model_id = MODELS.get(model_choice, list(MODELS.values())[0])
141
  return InferenceClient(model_id, token=HF_TOKEN)
142
 
 
 
 
 
 
 
 
143
  # ---------------------------
144
  # Gradio Callbacks
145
  # ---------------------------
@@ -168,7 +177,7 @@ def delete_chat_cb(selected_label):
168
  selected = labels[0] if labels else None
169
  return gr.update(choices=labels, value=selected), []
170
 
171
- def send_cb(user_text, selected_label, chatbot_msgs, system_message, max_tokens, temperature, top_p, model_choice):
172
  sid = label_to_id(selected_label)
173
  if sid is None:
174
  sid = create_session("New chat")
@@ -178,10 +187,21 @@ def send_cb(user_text, selected_label, chatbot_msgs, system_message, max_tokens,
178
  add_message(sid, "user", user_text)
179
  update_session_title_if_needed(sid, user_text)
180
 
 
 
 
181
  api_messages = build_api_messages(sid, system_message)
182
  display_msgs = get_messages(sid)
183
  display_msgs.append({"role": "assistant", "content": ""})
184
 
 
 
 
 
 
 
 
 
185
  client = get_client(model_choice)
186
  partial = ""
187
  try:
@@ -203,7 +223,7 @@ def send_cb(user_text, selected_label, chatbot_msgs, system_message, max_tokens,
203
  display_msgs[-1]["content"] = err
204
  yield (display_msgs, "", selected_label)
205
 
206
- def regenerate_cb(selected_label, system_message, max_tokens, temperature, top_p, model_choice):
207
  sid = label_to_id(selected_label)
208
  if sid is None:
209
  return [], ""
@@ -225,6 +245,7 @@ def regenerate_cb(selected_label, system_message, max_tokens, temperature, top_p
225
  conn.close()
226
  msgs = get_messages(sid)
227
 
 
228
  api_messages = [{"role": "system", "content": system_message.strip()}] + msgs
229
  display_msgs = msgs + [{"role": "assistant", "content": ""}]
230
 
@@ -258,25 +279,16 @@ if not labels:
258
  labels, _ = list_sessions()
259
  default_selected = labels[0] if labels else None
260
 
261
- with gr.Blocks(title="Backend-Focused CRUD Assistant", theme=gr.themes.Soft()) as demo:
262
  gr.HTML("""
263
  <style>
264
- button {
265
- background-color: #22c55e !important;
266
- color: #ffffff !important;
267
- border: none !important;
268
- }
269
- button:hover {
270
- background-color: #16a34a !important;
271
- }
272
- button:focus {
273
- outline: 2px solid #166534 !important;
274
- outline-offset: 2px;
275
- }
276
  </style>
277
  """)
278
 
279
- gr.Markdown("## 🗄️ Backend-Focused CRUD Automation — with Persistent History")
280
 
281
  with gr.Row():
282
  with gr.Column(scale=1, min_width=260):
@@ -300,11 +312,19 @@ with gr.Blocks(title="Backend-Focused CRUD Assistant", theme=gr.themes.Soft()) a
300
  interactive=True
301
  )
302
 
 
 
 
 
 
 
 
 
303
  gr.Markdown("### ⚙️ Generation Settings")
304
  system_box = gr.Textbox(
305
  value=SYSTEM_DEFAULT,
306
  label="System message",
307
- lines=4
308
  )
309
  max_tokens = gr.Slider(256, 4096, value=1200, step=16, label="Max tokens")
310
  temperature = gr.Slider(0.0, 2.0, value=0.25, step=0.05, label="Temperature")
@@ -318,7 +338,6 @@ with gr.Blocks(title="Backend-Focused CRUD Assistant", theme=gr.themes.Soft()) a
318
  send_btn = gr.Button("Send ▶️", variant="primary")
319
  regen_btn = gr.Button("Regenerate 🔁", variant="secondary")
320
 
321
- # --- Button interactions ---
322
  refresh_btn.click(refresh_sessions_cb, outputs=session_list)
323
  new_btn.click(new_chat_cb, outputs=[session_list, chatbot, user_box])
324
  del_btn.click(delete_chat_cb, inputs=session_list, outputs=[session_list, chatbot])
@@ -326,19 +345,19 @@ with gr.Blocks(title="Backend-Focused CRUD Assistant", theme=gr.themes.Soft()) a
326
 
327
  send_btn.click(
328
  send_cb,
329
- inputs=[user_box, session_list, chatbot, system_box, max_tokens, temperature, top_p, model_choice],
330
  outputs=[chatbot, user_box, session_list]
331
  )
332
 
333
  user_box.submit(
334
  send_cb,
335
- inputs=[user_box, session_list, chatbot, system_box, max_tokens, temperature, top_p, model_choice],
336
  outputs=[chatbot, user_box, session_list]
337
  )
338
 
339
  regen_btn.click(
340
  regenerate_cb,
341
- inputs=[session_list, system_box, max_tokens, temperature, top_p, model_choice],
342
  outputs=chatbot
343
  )
344
 
 
3
  from datetime import datetime
4
  import gradio as gr
5
  from huggingface_hub import InferenceClient
6
+ from datasets import load_dataset
7
 
8
  # ---------------------------
9
  # Config
 
11
  MODELS = {
12
  "Meta LLaMA 3.1 (8B Instruct)": "meta-llama/Llama-3.1-8B-Instruct",
13
  "Mistral 7B Instruct": "mistralai/Mistral-7B-Instruct-v0.3",
 
 
14
  }
15
 
16
+ DATASETS = ["The Stack", "CodeXGLUE"] # Dropdown for dataset selection
17
+
18
  HF_TOKEN = os.getenv("HF_TOKEN") # Set in your Space's Secrets
19
  DB_PATH = "history.db"
20
 
 
24
  "Provide full backend code scaffolds with files, paths, and commands. "
25
  "Only include frontend if required for framework integration "
26
  "(e.g., Laravel Blade, Django templates). "
27
+ "If user asks for too much frontend, politely say: 'I'm a backend-focused assistant and cannot provide excessive frontend code.'"
28
  )
29
 
30
  # ---------------------------
 
97
  """, (session_id,))
98
  rows = cur.fetchall()
99
  conn.close()
100
+ msgs = [{"role": role, "content": content} for (role, content) in rows]
101
+ return msgs
102
 
103
  def add_message(session_id: int, role: str, content: str):
104
  conn = db()
 
142
  model_id = MODELS.get(model_choice, list(MODELS.values())[0])
143
  return InferenceClient(model_id, token=HF_TOKEN)
144
 
145
+ def load_dataset_by_name(name: str):
146
+ if name == "The Stack":
147
+ return load_dataset("bigcode/the-stack", split="train")
148
+ elif name == "CodeXGLUE":
149
+ return load_dataset("google/code_x_glue_cc_code_to_code_trans", split="train")
150
+ return None
151
+
152
  # ---------------------------
153
  # Gradio Callbacks
154
  # ---------------------------
 
177
  selected = labels[0] if labels else None
178
  return gr.update(choices=labels, value=selected), []
179
 
180
+ def send_cb(user_text, selected_label, chatbot_msgs, system_message, max_tokens, temperature, top_p, model_choice, dataset_choice):
181
  sid = label_to_id(selected_label)
182
  if sid is None:
183
  sid = create_session("New chat")
 
187
  add_message(sid, "user", user_text)
188
  update_session_title_if_needed(sid, user_text)
189
 
190
+ # Load selected dataset (used internally for reference / semantic checks)
191
+ dataset = load_dataset_by_name(dataset_choice)
192
+
193
  api_messages = build_api_messages(sid, system_message)
194
  display_msgs = get_messages(sid)
195
  display_msgs.append({"role": "assistant", "content": ""})
196
 
197
+ # Block excessive frontend requests
198
+ if "frontend" in user_text.lower() and "too much" in user_text.lower():
199
+ warning = "⚠️ I'm a backend-focused assistant and cannot provide excessive frontend code."
200
+ display_msgs[-1]["content"] = warning
201
+ yield (display_msgs, "", selected_label)
202
+ add_message(sid, "assistant", warning)
203
+ return
204
+
205
  client = get_client(model_choice)
206
  partial = ""
207
  try:
 
223
  display_msgs[-1]["content"] = err
224
  yield (display_msgs, "", selected_label)
225
 
226
+ def regenerate_cb(selected_label, system_message, max_tokens, temperature, top_p, model_choice, dataset_choice):
227
  sid = label_to_id(selected_label)
228
  if sid is None:
229
  return [], ""
 
245
  conn.close()
246
  msgs = get_messages(sid)
247
 
248
+ dataset = load_dataset_by_name(dataset_choice)
249
  api_messages = [{"role": "system", "content": system_message.strip()}] + msgs
250
  display_msgs = msgs + [{"role": "assistant", "content": ""}]
251
 
 
279
  labels, _ = list_sessions()
280
  default_selected = labels[0] if labels else None
281
 
282
+ with gr.Blocks(title="Backend-Focused LLaMA/Mistral CRUD Assistant", theme=gr.themes.Soft()) as demo:
283
  gr.HTML("""
284
  <style>
285
+ button { background-color: #22c55e !important; color: #ffffff !important; border: none !important; }
286
+ button:hover { background-color: #16a34a !important; }
287
+ button:focus { outline: 2px solid #166534 !important; outline-offset: 2px; }
 
 
 
 
 
 
 
 
 
288
  </style>
289
  """)
290
 
291
+ gr.Markdown("## 🗄️ LLaMA & Mistral Backend-Focused CRUD Automation — with Persistent History")
292
 
293
  with gr.Row():
294
  with gr.Column(scale=1, min_width=260):
 
312
  interactive=True
313
  )
314
 
315
+ gr.Markdown("### 📚 Dataset Selection")
316
+ dataset_choice = gr.Dropdown(
317
+ choices=DATASETS,
318
+ value=DATASETS[0],
319
+ label="Select a dataset",
320
+ interactive=True
321
+ )
322
+
323
  gr.Markdown("### ⚙️ Generation Settings")
324
  system_box = gr.Textbox(
325
  value=SYSTEM_DEFAULT,
326
  label="System message",
327
+ lines=5
328
  )
329
  max_tokens = gr.Slider(256, 4096, value=1200, step=16, label="Max tokens")
330
  temperature = gr.Slider(0.0, 2.0, value=0.25, step=0.05, label="Temperature")
 
338
  send_btn = gr.Button("Send ▶️", variant="primary")
339
  regen_btn = gr.Button("Regenerate 🔁", variant="secondary")
340
 
 
341
  refresh_btn.click(refresh_sessions_cb, outputs=session_list)
342
  new_btn.click(new_chat_cb, outputs=[session_list, chatbot, user_box])
343
  del_btn.click(delete_chat_cb, inputs=session_list, outputs=[session_list, chatbot])
 
345
 
346
  send_btn.click(
347
  send_cb,
348
+ inputs=[user_box, session_list, chatbot, system_box, max_tokens, temperature, top_p, model_choice, dataset_choice],
349
  outputs=[chatbot, user_box, session_list]
350
  )
351
 
352
  user_box.submit(
353
  send_cb,
354
+ inputs=[user_box, session_list, chatbot, system_box, max_tokens, temperature, top_p, model_choice, dataset_choice],
355
  outputs=[chatbot, user_box, session_list]
356
  )
357
 
358
  regen_btn.click(
359
  regenerate_cb,
360
+ inputs=[session_list, system_box, max_tokens, temperature, top_p, model_choice, dataset_choice],
361
  outputs=chatbot
362
  )
363