iamthewalrus67 commited on
Commit
d5bedae
·
1 Parent(s): 77980fa

Initial commit

Browse files
Files changed (7) hide show
  1. .gitignore +2 -0
  2. analytics.py +81 -0
  3. app.py +443 -0
  4. requirements.txt +8 -0
  5. runtime.yaml +2 -0
  6. static/script.js +221 -0
  7. static/style.css +256 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .ignore
2
+ venv
analytics.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ import datetime as dt
4
+ import sys
5
+ from pathlib import Path
6
+ from typing import Optional
7
+
8
+ from supabase import create_client, Client
9
+
10
+
11
+ def _utc_now_iso() -> str:
12
+ return dt.datetime.now(dt.timezone.utc).isoformat()
13
+
14
+
15
+ class AnalyticsLogger:
16
+ """
17
+ Simple Supabase logger for:
18
+ - Sessions (id: uuid, created_at: timestamptz)
19
+ - Chats (id: uuid, session_id: uuid, timestamp: timestamptz, user: text, answer: text)
20
+ """
21
+
22
+ def __init__(self):
23
+ url = os.getenv("SUPABASE_URL")
24
+ key = os.getenv("SUPABASE_KEY")
25
+ if not url or not key:
26
+ raise RuntimeError("Missing SUPABASE_URL or SUPABASE_KEY env var.")
27
+ self.client: Client = create_client(url, key)
28
+ self.session_id: Optional[str] = None
29
+
30
+ def start_session(self, model_id: str) -> str:
31
+ """
32
+ Creates a session row and returns the session UUID (string).
33
+ """
34
+ sid = str(uuid.uuid4())
35
+ payload = {"id": sid, "created_at": _utc_now_iso(), "model_id": model_id}
36
+ try:
37
+ self.client.table("Sessions").insert(payload).execute()
38
+ self.session_id = sid
39
+ return sid
40
+ except Exception as e:
41
+ print(f"[AnalyticsLogger] Failed to start session: {e}", file=sys.stderr)
42
+ raise e
43
+
44
+ def _upload_image(self, image_path: str) -> Optional[str]:
45
+ try:
46
+ with open(image_path, "rb") as img_file:
47
+ image_name = f'{uuid.uuid4()}{Path(image_path).suffix}'
48
+ response = self.client.storage.from_("Images").upload(image_name, img_file, {"cacheControl": "3600", "upsert": "true"})
49
+
50
+ return response.full_path
51
+ except:
52
+ print(f"[AnalyticsLogger] Failed to upload image: {response['error']}", file=sys.stderr)
53
+ return None
54
+
55
+ def log_interaction(self, user: str | tuple[str, str], answer: str, ts_iso: Optional[str] = None) -> None:
56
+ """
57
+ Inserts a single chat interaction.
58
+ """
59
+ if not self.session_id:
60
+ raise ValueError("Session not started. Call start_session() first.")
61
+ session_id = self.session_id
62
+
63
+ image_handle: str | None = None
64
+
65
+ if isinstance(user, tuple): # (image_path, user_name)
66
+ image, user = user
67
+
68
+ image_handle = self._upload_image(image)
69
+
70
+ chat_payload = {
71
+ "id": str(uuid.uuid4()),
72
+ "session_id": session_id,
73
+ "timestamp": ts_iso or _utc_now_iso(),
74
+ "user": user,
75
+ "answer": answer,
76
+ "user_image_path": image_handle,
77
+ }
78
+ try:
79
+ self.client.table("Chats").insert(chat_payload).execute()
80
+ except Exception as e:
81
+ print(f"[AnalyticsLogger] Failed to log interaction: {e}", file=sys.stderr)
app.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import tempfile
4
+
5
+ # subprocess.run('pip install flash-attn==2.8.0 --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
6
+
7
+ import threading
8
+
9
+ # subprocess.check_call([os.sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
10
+
11
+ import spaces
12
+ import gradio as gr
13
+ import torch
14
+ from PIL.Image import Image
15
+ from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer, TextIteratorStreamer
16
+ from kernels import get_kernel
17
+ from typing import Any, Optional, Dict
18
+
19
+ #vllm_flash_attn3 = get_kernel("kernels-community/vllm-flash-attn3")
20
+
21
+ #torch._dynamo.config.disable = True
22
+
23
+ # Login to HF to get access to the model weights
24
+ HF_LE_LLM_READ_TOKEN = os.environ.get('HF_LE_LLM_READ_TOKEN')
25
+
26
+ from huggingface_hub import login
27
+ login(token=HF_LE_LLM_READ_TOKEN)
28
+
29
+ # Constants
30
+ # MODEL_ID = "le-llm/lapa-v0.1-reasoning-only-32768"
31
+ # MODEL_ID = "le-llm/lapa-v0.1-instruct"
32
+ # MODEL_ID = "le-llm/lapa-v0.1-matt-instruction-5e06"
33
+ # MODEL_ID = "le-llm/lapa-v0.1-reprojected"
34
+ # MODEL_ID = "le-llm/lapa-v0.1.1-instruct"
35
+ MODEL_ID = "le-llm/manipulative-score-model"
36
+
37
+ MAX_TOKENS = 4096
38
+ TEMPERATURE = 0.7
39
+ TOP_P = 0.95
40
+
41
+ IMAGE_MAX_SIZE = 1024
42
+
43
+
44
+ def _begin_analytics_session():
45
+ # Called once per client on app load
46
+ pass
47
+ # _ = logger.start_session(MODEL_ID)
48
+
49
+ def load_model():
50
+ """Lazy-load model, tokenizer, and optional processor (for zeroGPU)."""
51
+ device = "cuda" # if torch.cuda.is_available() else "cpu"
52
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
53
+ processor = None
54
+ try:
55
+ processor = AutoProcessor.from_pretrained(MODEL_ID)
56
+ except Exception as err: # pragma: no cover - informative fallback
57
+ print(f"Warning: AutoProcessor not available ({err}). Falling back to tokenizer.")
58
+
59
+ model = AutoModelForCausalLM.from_pretrained(
60
+ MODEL_ID,
61
+ dtype=torch.bfloat16, # if device == "cuda" else torch.float32,
62
+ device_map="auto", # if device == "cuda" else None,
63
+ attn_implementation="flash_attention_2",# "kernels-community/vllm-flash-attn3", # #
64
+ ) # .cuda()
65
+ print(f"Selected device:", device)
66
+ return model, tokenizer, processor, device
67
+
68
+
69
+ # Load model/tokenizer each request → allows zeroGPU to cold start & then release
70
+ model, tokenizer, processor, device = load_model()
71
+
72
+
73
+ def user(user_message, image_data: Image, history: list):
74
+ """Format user message with optional image."""
75
+ import io
76
+
77
+ user_message = user_message or ""
78
+ updated_history = list(history)
79
+ has_content = False
80
+
81
+ stripped_message = user_message.strip()
82
+
83
+ # If we have an image, save it to temp file for Gradio display
84
+ if image_data is not None:
85
+ image_data.thumbnail((IMAGE_MAX_SIZE, IMAGE_MAX_SIZE))
86
+
87
+ # Save to temp file for Gradio display
88
+ fd, tmp_path = tempfile.mkstemp(suffix=".jpg")
89
+ os.close(fd)
90
+ image_data.save(tmp_path, format="JPEG")
91
+
92
+ # Also encode as base64 for model processing (stored in metadata)
93
+ buffered = io.BytesIO()
94
+ image_data.save(buffered, format="JPEG")
95
+
96
+ # TODO do we leave that message?
97
+ text_content = stripped_message if stripped_message else "Опиши це зображення"
98
+
99
+ # Store both text and image in a single message with base64 in metadata
100
+ updated_history.append({
101
+ "role": "user",
102
+ "content": text_content
103
+ })
104
+ updated_history.append({
105
+ "role": "user",
106
+ "content": {
107
+ "path": tmp_path,
108
+ "alt_text": "User uploaded image"
109
+ },
110
+ })
111
+ has_content = True
112
+ elif stripped_message:
113
+ updated_history.append({"role": "user", "content": stripped_message})
114
+ has_content = True
115
+
116
+ if not has_content:
117
+ # Nothing to submit yet; keep inputs unchanged
118
+ return user_message, image_data, history
119
+
120
+ return "", None, updated_history
121
+
122
+
123
+ def append_example_message(x: gr.SelectData, history):
124
+ if x.value["text"] is not None:
125
+ history.append({"role": "user", "content": x.value["text"]})
126
+
127
+ return history
128
+
129
+
130
+ def _extract_text_from_content(content: Any) -> str | tuple[str, str]:
131
+ """Extract text from message content for logging."""
132
+ if isinstance(content, str):
133
+ return content
134
+ if isinstance(content, tuple) and len(content) == 2:
135
+ return content # (image_path, user_text)
136
+
137
+ raise ValueError(f"Unsupported content type for text extraction: {content}")
138
+
139
+
140
+ def _clean_history_for_display(history: list[dict[str, Any]]) -> list[dict[str, Any]]:
141
+ """Remove internal metadata fields like _base64 before displaying in Gradio."""
142
+ cleaned = []
143
+ for message in history:
144
+ cleaned_message = {"role": message.get("role", "user")}
145
+ content = message.get("content")
146
+
147
+ if isinstance(content, str):
148
+ cleaned_message["content"] = content
149
+ elif isinstance(content, list):
150
+ cleaned_content = []
151
+ for item in content:
152
+ if isinstance(item, dict):
153
+ # Remove _base64 metadata
154
+ cleaned_item = {k: v for k, v in item.items() if not k.startswith("_")}
155
+ cleaned_content.append(cleaned_item)
156
+ else:
157
+ cleaned_content.append(item)
158
+ cleaned_message["content"] = cleaned_content
159
+ else:
160
+ cleaned_message["content"] = content
161
+
162
+ cleaned.append(cleaned_message)
163
+
164
+ return cleaned
165
+
166
+
167
+ @spaces.GPU
168
+ def bot(
169
+ history: list[dict[str, Any]]
170
+ ):
171
+ """Generate bot response with support for text and images."""
172
+
173
+ # Early return if no input
174
+ if not history:
175
+ return
176
+
177
+ # Extract last user message for logging
178
+ last_user_msg = next((msg for msg in reversed(history) if msg.get("role") == "user"), None)
179
+ user_message_text = _extract_text_from_content(last_user_msg.get("content")) if last_user_msg else ""
180
+ print('User message:', user_message_text)
181
+
182
+ # Check if any message contains images
183
+ has_images = any(
184
+ isinstance(msg.get("content"), tuple)
185
+ for msg in history
186
+ )
187
+
188
+ model_inputs = None
189
+
190
+ # Use processor if images are present
191
+ if processor is not None and has_images:
192
+ try:
193
+ processor_history = []
194
+ for msg in history:
195
+ role = msg.get("role", "user")
196
+ content = msg.get("content")
197
+
198
+ if isinstance(content, str):
199
+ processor_history.append({"role": role, "content": [{"type": "text", "text": content}]})
200
+ elif isinstance(content, tuple):
201
+ formatted_content = []
202
+ tmp_path, _ = content
203
+ image_input = {
204
+ "type": "image",
205
+ "url": f"{tmp_path}",
206
+ }
207
+
208
+ if processor_history[-1].get('role') == 'user':
209
+ if isinstance(processor_history[-1].get('content'), str):
210
+ previous_message = processor_history[-1].get('content')
211
+ formatted_content.append({"type": "text", "text": previous_message})
212
+ formatted_content.append(image_input)
213
+ processor_history[-1]['content'] = formatted_content
214
+ elif isinstance(processor_history[-1].get('content'), list):
215
+ processor_history[-1]['content'].append(image_input)
216
+ else:
217
+ formatted_content.append(image_input)
218
+ processor_history.append({"role": role, "content": formatted_content})
219
+
220
+ model_inputs = processor.apply_chat_template(
221
+ processor_history,
222
+ tokenize=True,
223
+ return_dict=True,
224
+ return_tensors="pt",
225
+ add_generation_prompt=True,
226
+ ).to(model.device)
227
+ print("Using processor for vision input")
228
+ except Exception as exc:
229
+ print(f"Processor failed: {exc}")
230
+ model_inputs = None
231
+
232
+ # Fallback to tokenizer for text-only
233
+ if model_inputs is None:
234
+ # Convert to text-only format for tokenizer
235
+ text_history = []
236
+ for msg in history:
237
+ role = msg.get("role", "user")
238
+ content = msg.get("content")
239
+ text_content = _extract_text_from_content(content)
240
+ if text_content:
241
+ text_history.append({"role": role, "content": text_content})
242
+
243
+ if text_history:
244
+ input_text = tokenizer.apply_chat_template(
245
+ text_history,
246
+ tokenize=False,
247
+ add_generation_prompt=True,
248
+ )
249
+ if input_text and tokenizer.bos_token:
250
+ input_text = input_text.replace(tokenizer.bos_token, "", 1)
251
+ model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
252
+ print("Using tokenizer for text-only input")
253
+
254
+ if model_inputs is None:
255
+ return
256
+
257
+ # Streamer setup
258
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
259
+
260
+ # Run model.generate in background thread
261
+ generation_kwargs = dict(
262
+ **model_inputs,
263
+ max_new_tokens=MAX_TOKENS,
264
+ temperature=TEMPERATURE,
265
+ top_p=TOP_P,
266
+ top_k=64,
267
+ do_sample=True,
268
+ streamer=streamer,
269
+ )
270
+ thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
271
+ thread.start()
272
+
273
+ history.append({"role": "assistant", "content": ""})
274
+ # Yield tokens as they come in
275
+ for new_text in streamer:
276
+ history[-1]["content"] += new_text
277
+ yield _clean_history_for_display(history)
278
+
279
+ assistant_message = history[-1]["content"]
280
+ logger.log_interaction(user=user_message_text, answer=assistant_message)
281
+
282
+
283
+ # --- drop-in UI compatible with older Gradio versions ---
284
+ import os, tempfile, time
285
+ import gradio as gr
286
+
287
+ # Ukrainian-inspired theme with deep, muted colors reflecting unbeatable spirit:
288
+ THEME = gr.themes.Soft(
289
+ primary_hue="blue", # Deep blue representing Ukrainian sky and resolve
290
+ secondary_hue="amber", # Warm amber representing golden fields and determination
291
+ neutral_hue="stone", # Earthy stone representing strength and foundation
292
+ )
293
+
294
+ # Load CSS from external file
295
+ def load_css():
296
+ try:
297
+ with open("static/style.css", "r", encoding="utf-8") as f:
298
+ return f.read()
299
+ except FileNotFoundError:
300
+ print("Warning: static/style.css not found")
301
+ return ""
302
+
303
+ CSS = load_css()
304
+
305
+ def _clear_chat():
306
+ return "", None, []
307
+
308
+ with gr.Blocks(theme=THEME, css=CSS, fill_height=True) as demo:
309
+ demo.load(fn=_begin_analytics_session, inputs=None, outputs=None)
310
+
311
+
312
+ # Header (no gr.Box to avoid version issues)
313
+ gr.HTML(
314
+ """
315
+ <div id="app-header">
316
+ <div class="app-title">✨ LAPA</div>
317
+ <div class="app-subtitle">LLM for Ukrainian Language</div>
318
+ </div>
319
+ """
320
+ )
321
+
322
+ with gr.Row(equal_height=True):
323
+ # Left side: Chat
324
+ with gr.Column(scale=7, elem_id="left-pane"):
325
+ with gr.Column(elem_id="chat-card"):
326
+ chatbot = gr.Chatbot(
327
+ type="messages",
328
+ height=560,
329
+ render_markdown=True,
330
+ show_copy_button=True,
331
+ show_label=False,
332
+ # likeable=True,
333
+ allow_tags=["think"],
334
+ elem_id="chatbot",
335
+ examples=[
336
+ {"text": i}
337
+ for i in [
338
+ "хто тримає цей район?",
339
+ "Напиши історію про Івасика-Телесика",
340
+ "Яка найвища гора в Україні?",
341
+ "Як звали батька Тараса Григоровича Шевченка?",
342
+ "Яка з цих гір не знаходиться у Європі? Говерла, Монблан, Гран-Парадізо, Еверест",
343
+ "Дай відповідь на питання\nЧому у качки жовті ноги?",
344
+ ]
345
+ ],
346
+ )
347
+
348
+ image_input = gr.Image(
349
+ label="Attach image (optional)",
350
+ type="pil",
351
+ sources=["upload", "clipboard"],
352
+ height=200,
353
+ interactive=True,
354
+ elem_id="image-input",
355
+ )
356
+
357
+ # ChatGPT-style input box with stop button
358
+ with gr.Row(elem_id="chat-input-row"):
359
+ msg = gr.Textbox(
360
+ label=None,
361
+ placeholder="Message… (Press Enter to send)",
362
+ autofocus=True,
363
+ lines=1,
364
+ max_lines=6,
365
+ container=False,
366
+ show_label=False,
367
+ elem_id="chat-input",
368
+ elem_classes=["chat-input-box"]
369
+ )
370
+ stop_btn_visible = gr.Button(
371
+ "⏹️",
372
+ variant="secondary",
373
+ elem_id="stop-btn-visible",
374
+ elem_classes=["stop-btn-chat"],
375
+ visible=False,
376
+ size="sm"
377
+ )
378
+
379
+ # Hidden buttons for functionality
380
+ with gr.Row(visible=True, elem_id="hidden-buttons"):
381
+ send_btn = gr.Button("Send", variant="primary", elem_id="send-btn")
382
+ stop_btn = gr.Button("Stop", variant="secondary", elem_id="stop-btn")
383
+ clear_btn = gr.Button("Clear", variant="secondary", elem_id="clear-btn")
384
+
385
+ # export_btn = gr.Button("Export chat (.md)", variant="secondary", elem_classes=["rounded-btn","secondary-btn"])
386
+ # exported_file = gr.File(label="", interactive=False, visible=True)
387
+ gr.HTML('<div class="footer-tip">Shortcuts: Enter to send • Shift+Enter for new line</div>')
388
+
389
+ # Helper functions for managing UI state
390
+ def show_stop_button():
391
+ return gr.update(visible=True)
392
+
393
+ def hide_stop_button():
394
+ return gr.update(visible=False)
395
+
396
+ # Events (preserve your original handlers)
397
+ e1 = msg.submit(fn=user, inputs=[msg, image_input, chatbot], outputs=[msg, image_input, chatbot], queue=True).then(
398
+ fn=show_stop_button, inputs=None, outputs=stop_btn_visible
399
+ ).then(
400
+ fn=bot, inputs=chatbot, outputs=chatbot
401
+ ).then(
402
+ fn=hide_stop_button, inputs=None, outputs=stop_btn_visible
403
+ )
404
+
405
+ e2 = send_btn.click(fn=user, inputs=[msg, image_input, chatbot], outputs=[msg, image_input, chatbot], queue=True).then(
406
+ fn=show_stop_button, inputs=None, outputs=stop_btn_visible
407
+ ).then(
408
+ fn=bot, inputs=chatbot, outputs=chatbot
409
+ ).then(
410
+ fn=hide_stop_button, inputs=None, outputs=stop_btn_visible
411
+ )
412
+
413
+ e3 = chatbot.example_select(fn=append_example_message, inputs=[chatbot], outputs=[chatbot], queue=True).then(
414
+ fn=show_stop_button, inputs=None, outputs=stop_btn_visible
415
+ ).then(
416
+ fn=bot, inputs=chatbot, outputs=chatbot
417
+ ).then(
418
+ fn=hide_stop_button, inputs=None, outputs=stop_btn_visible
419
+ )
420
+
421
+ # Stop cancels running events (both buttons work)
422
+ stop_btn.click(fn=hide_stop_button, inputs=None, outputs=stop_btn_visible, cancels=[e1, e2, e3], queue=True)
423
+ stop_btn_visible.click(fn=hide_stop_button, inputs=None, outputs=stop_btn_visible, cancels=[e1, e2, e3], queue=True)
424
+
425
+ # Clear chat + input
426
+ clear_btn.click(fn=_clear_chat, inputs=None, outputs=[msg, image_input, chatbot])
427
+
428
+ # Export markdown
429
+ # export_btn.click(fn=_export_markdown, inputs=chatbot, outputs=exported_file)
430
+
431
+ # Load and inject external JavaScript
432
+ def load_javascript():
433
+ try:
434
+ with open("static/script.js", "r", encoding="utf-8") as f:
435
+ return f"<script>{f.read()}</script>"
436
+ except FileNotFoundError:
437
+ print("Warning: static/script.js not found")
438
+ return ""
439
+
440
+ gr.HTML(load_javascript())
441
+
442
+ if __name__ == "__main__":
443
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ transformers==4.56.0
2
+ huggingface_hub
3
+ gradio
4
+ torch==2.8.0
5
+ accelerate
6
+ spaces
7
+ kernels
8
+ supabase
runtime.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ hardware: "gpu"
2
+ accelerator: "zeroGPU"
static/script.js ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ (function () {
2
+ console.log("Keyboard shortcuts script loaded");
3
+
4
+ // --- Autoscroll control ---
5
+ let lastSendTs = 0;
6
+ let userPinnedScroll = false;
7
+ let chatContainer = null;
8
+ let chatObserver = null;
9
+
10
+ const now = () => Date.now();
11
+ const SEND_AUTOSCROLL_WINDOW_MS = 2500;
12
+ const NEAR_BOTTOM_PX = 120;
13
+
14
+ const getChatContainer = () => {
15
+ if (chatContainer && document.body.contains(chatContainer)) return chatContainer;
16
+ // Prefer explicit elem_id container
17
+ const root = document.getElementById('chatbot') || document.querySelector('#chatbot');
18
+ // Fallbacks: any visible gr-chatbot within left pane
19
+ const candidates = [];
20
+ if (root) candidates.push(root);
21
+ candidates.push(
22
+ document.querySelector('#left-pane .gr-chatbot'),
23
+ document.querySelector('.gr-chatbot'),
24
+ document.querySelector('#left-pane [data-testid="chatbot"]')
25
+ );
26
+ for (const el of candidates) {
27
+ if (!el) continue;
28
+ // Try common inner scroll area
29
+ let container = el.querySelector('[data-testid="bot"]') || el.querySelector('[data-testid="chatbot"]') || el;
30
+ // Walk down to the element that actually scrolls
31
+ const stack = [container];
32
+ while (stack.length) {
33
+ const cur = stack.shift();
34
+ if (!cur) continue;
35
+ const style = cur instanceof Element ? getComputedStyle(cur) : null;
36
+ const canScroll = style && (style.overflowY === 'auto' || style.overflowY === 'scroll');
37
+ if (canScroll && cur.scrollHeight > cur.clientHeight + 10) {
38
+ chatContainer = cur;
39
+ break;
40
+ }
41
+ if (cur.children && cur.children.length) stack.push(...cur.children);
42
+ }
43
+ if (!chatContainer) chatContainer = container;
44
+ if (chatContainer) break;
45
+ }
46
+ return chatContainer;
47
+ };
48
+
49
+ const isNearBottom = (el) => {
50
+ if (!el) return true;
51
+ const distance = el.scrollHeight - el.scrollTop - el.clientHeight;
52
+ return distance <= NEAR_BOTTOM_PX;
53
+ };
54
+
55
+ const scrollToBottom = (el) => {
56
+ if (!el) return;
57
+ el.scrollTo({ top: el.scrollHeight, behavior: 'auto' });
58
+ };
59
+
60
+ const attachScrollListener = () => {
61
+ const el = getChatContainer();
62
+ if (!el) return;
63
+ el.addEventListener('scroll', () => {
64
+ // If the user scrolls up away from bottom, pin the position
65
+ userPinnedScroll = !isNearBottom(el);
66
+ }, { passive: true });
67
+ };
68
+
69
+ const observeChat = () => {
70
+ const el = getChatContainer();
71
+ if (!el) return;
72
+ if (chatObserver) chatObserver.disconnect();
73
+ chatObserver = new MutationObserver(() => {
74
+ const withinSendWindow = now() - lastSendTs < SEND_AUTOSCROLL_WINDOW_MS;
75
+ const shouldScroll = withinSendWindow || (!userPinnedScroll && isNearBottom(el));
76
+ if (shouldScroll) scrollToBottom(el);
77
+ });
78
+ chatObserver.observe(el, { childList: true, subtree: true, characterData: true });
79
+ };
80
+
81
+ const send = () => {
82
+ // Try multiple selectors to find the send button
83
+ const selectors = [
84
+ '#send-btn',
85
+ 'button[id="send-btn"]',
86
+ '.gr-button:contains("Send")',
87
+ 'button:contains("Send")',
88
+ '#send-btn button',
89
+ '[data-testid*="send"]',
90
+ 'button[variant="primary"]'
91
+ ];
92
+
93
+ let btn = null;
94
+ for (let selector of selectors) {
95
+ try {
96
+ if (selector.includes(':contains')) {
97
+ // Handle :contains selector manually
98
+ const buttons = document.querySelectorAll('button');
99
+ for (let button of buttons) {
100
+ if (button.textContent.trim() === 'Send') {
101
+ btn = button;
102
+ break;
103
+ }
104
+ }
105
+ } else {
106
+ btn = document.querySelector(selector);
107
+ }
108
+ if (btn) {
109
+ console.log("Found send button with selector:", selector);
110
+ break;
111
+ }
112
+ } catch (e) {
113
+ // Skip invalid selectors
114
+ }
115
+ }
116
+
117
+ if (btn) {
118
+ console.log("Clicking send button");
119
+ lastSendTs = now();
120
+ // When sending, allow autoscroll for initial response
121
+ userPinnedScroll = false;
122
+ // Ensure observers are up
123
+ setTimeout(() => { attachScrollListener(); observeChat(); }, 0);
124
+ btn.click();
125
+ return true;
126
+ } else {
127
+ console.log("Send button not found");
128
+ // Debug: log all buttons
129
+ const allButtons = document.querySelectorAll('button');
130
+ console.log("All buttons found:", allButtons);
131
+ return false;
132
+ }
133
+ };
134
+
135
+ const setupKeyboardShortcuts = () => {
136
+ console.log("Setting up keyboard shortcuts");
137
+ // Initialize observers once UI is present
138
+ setTimeout(() => { attachScrollListener(); observeChat(); }, 50);
139
+ setTimeout(() => { attachScrollListener(); observeChat(); }, 400);
140
+
141
+ document.addEventListener('keydown', (e) => {
142
+ const isCmdEnter = (e.metaKey || e.ctrlKey) && e.key === 'Enter';
143
+ const isEnter = e.key === 'Enter' && !e.shiftKey && !e.metaKey && !e.ctrlKey;
144
+ const isInputFocused = document.activeElement &&
145
+ (document.activeElement.matches('#chat-input textarea') ||
146
+ document.activeElement.matches('.chat-input-box textarea'));
147
+
148
+ if ((isCmdEnter || (isEnter && isInputFocused)) && isInputFocused) {
149
+ console.log("Send triggered");
150
+ e.preventDefault();
151
+ e.stopPropagation();
152
+
153
+ const success = send();
154
+ if (!success) {
155
+ console.log("Failed to send, trying again in 100ms");
156
+ setTimeout(send, 100);
157
+ }
158
+ }
159
+
160
+ if (e.key === 'Escape') {
161
+ const input = document.querySelector('#chat-input textarea') || document.querySelector('.chat-input-box textarea');
162
+ if (input) {
163
+ input.focus();
164
+ console.log("Focused input field");
165
+ }
166
+ }
167
+ }, true);
168
+
169
+ console.log("Keyboard shortcuts set up");
170
+ };
171
+
172
+ // Multiple attempts to set up shortcuts
173
+ const attempts = [100, 500, 1000, 2000];
174
+ attempts.forEach(delay => {
175
+ setTimeout(() => {
176
+ console.log(`Attempting setup after ${delay}ms`);
177
+ setupKeyboardShortcuts();
178
+ }, delay);
179
+ });
180
+
181
+ // Force light theme - disable dark mode switching
182
+ const forceTheme = () => {
183
+ // Remove any dark theme classes that might be added
184
+ document.documentElement.classList.remove('dark');
185
+ document.body.classList.remove('dark');
186
+
187
+ // Set light color scheme
188
+ document.documentElement.style.colorScheme = 'light';
189
+
190
+ // Override any theme preference that might be set
191
+ if (window.matchMedia) {
192
+ const darkModeQuery = window.matchMedia('(prefers-color-scheme: dark)');
193
+ // Override the matches property to always return false
194
+ Object.defineProperty(darkModeQuery, 'matches', {
195
+ value: false,
196
+ writable: false
197
+ });
198
+ }
199
+ };
200
+
201
+ // Apply theme override immediately and on any DOM changes
202
+ forceTheme();
203
+
204
+ // Watch for any changes and reapply theme override
205
+ const observer = new MutationObserver(() => {
206
+ forceTheme();
207
+ });
208
+
209
+ observer.observe(document.documentElement, {
210
+ attributes: true,
211
+ attributeFilter: ['class', 'data-theme', 'theme']
212
+ });
213
+
214
+ // Also set up immediately if DOM is ready
215
+ if (document.readyState !== 'loading') {
216
+ setupKeyboardShortcuts();
217
+ } else {
218
+ document.addEventListener('DOMContentLoaded', setupKeyboardShortcuts);
219
+ }
220
+
221
+ })();
static/style.css ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Import Google Fonts for better typography */
2
+ @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700;800&family=Source+Sans+Pro:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500;600&display=swap');
3
+
4
+ /* Root font styles - Applied to entire page */
5
+ * {
6
+ font-family: 'Source Sans Pro', -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', sans-serif;
7
+ }
8
+
9
+ /* Background - Light Ukrainian sky with warm golden undertones */
10
+ .gradio-container {
11
+ background: radial-gradient(1200px 600px at 20% 0%, #f1f5f9 0%, #e2e8f0 45%, #cbd5e1 100%);
12
+ font-family: 'Source Sans Pro', -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', sans-serif;
13
+ -webkit-font-smoothing: antialiased;
14
+ -moz-osx-font-smoothing: grayscale;
15
+ }
16
+
17
+ /* Header - Transparent background, only text visible */
18
+ #app-header {
19
+ position: sticky; top: 0; z-index: 10;
20
+ background: transparent;
21
+ backdrop-filter: none;
22
+ padding: 18px 8px 10px 8px;
23
+ border-bottom: none;
24
+ }
25
+ #app-header .app-title {
26
+ font-family: 'Poppins', sans-serif;
27
+ font-weight: 700;
28
+ letter-spacing: -0.02em;
29
+ font-size: 40px;
30
+ background: linear-gradient(120deg, #60a5fa 0%, #fbbf24 20%, #3b82f6 40%);
31
+ -webkit-background-clip: text; background-clip: text; color: transparent;
32
+ }
33
+ #app-header .app-subtitle {
34
+ font-family: 'Source Sans Pro', sans-serif;
35
+ color: #64748b;
36
+ margin-top: 4px;
37
+ font-weight: 400;
38
+ font-size: 16px;
39
+ letter-spacing: 0.01em;
40
+ }
41
+
42
+ /* Chat card */
43
+ #chat-card .gr-chatbot {
44
+ background: linear-gradient(180deg, rgba(255,255,255,0.9) 0%, rgba(248,250,252,0.95) 100%);
45
+ border: 1px solid rgba(59,130,246,0.2);
46
+ box-shadow: 0 10px 40px rgba(0,0,0,0.1), inset 0 1px 0 rgba(245,158,11,0.15);
47
+ backdrop-filter: blur(8px);
48
+ border-radius: 18px;
49
+ }
50
+
51
+ /* Code blocks - Using JetBrains Mono for better readability */
52
+ #chat-card .prose pre {
53
+ font-family: 'JetBrains Mono', 'Fira Code', 'Monaco', 'Cascadia Code', 'SF Mono', 'Consolas', 'DejaVu Sans Mono', monospace !important;
54
+ border: 1px solid rgba(59,130,246,0.2);
55
+ background: #f8fafc !important;
56
+ font-size: 13px;
57
+ line-height: 1.5;
58
+ font-weight: 400;
59
+ }
60
+ #chat-card .prose code {
61
+ font-family: 'JetBrains Mono', 'Fira Code', 'Monaco', 'Cascadia Code', 'SF Mono', 'Consolas', 'DejaVu Sans Mono', monospace !important;
62
+ background: rgba(59,130,246,0.1);
63
+ border-radius: 6px;
64
+ padding: 0.1rem 0.35rem;
65
+ font-size: 13px;
66
+ font-weight: 500;
67
+ }
68
+
69
+ /* Chat message text styling */
70
+ .gr-chatbot .message {
71
+ font-family: 'Source Sans Pro', sans-serif;
72
+ font-size: 15px;
73
+ line-height: 1.65;
74
+ font-weight: 400;
75
+ letter-spacing: 0.01em;
76
+ }
77
+
78
+ .gr-chatbot .message p {
79
+ font-family: 'Source Sans Pro', sans-serif;
80
+ margin-bottom: 0.75em;
81
+ }
82
+
83
+ /* ChatGPT-style input */
84
+ #chat-input-row {
85
+ margin-top: 16px;
86
+ padding: 0 8px;
87
+ display: flex !important;
88
+ align-items: center !important;
89
+ gap: 6px !important;
90
+ }
91
+
92
+ .chat-input-box textarea {
93
+ border: none !important;
94
+ outline: none !important;
95
+ box-shadow: 0 2px 8px rgba(0,0,0,0.08) !important;
96
+ border-radius: 24px !important;
97
+ background: rgba(255,255,255,0.9) !important;
98
+ color: #334155 !important;
99
+ padding: 12px 16px !important;
100
+ font-family: 'Source Sans Pro', sans-serif !important;
101
+ font-size: 15px !important;
102
+ line-height: 1.5 !important;
103
+ font-weight: 400 !important;
104
+ letter-spacing: 0.01em !important;
105
+ resize: none !important;
106
+ transition: all 0.2s ease !important;
107
+ }
108
+
109
+ .chat-input-box textarea:focus {
110
+ box-shadow: 0 4px 12px rgba(59,130,246,0.25) !important;
111
+ background: rgba(255,255,255,1) !important;
112
+ }
113
+
114
+ .chat-input-box textarea::placeholder {
115
+ color: #94a3b8 !important;
116
+ opacity: 0.8 !important;
117
+ }
118
+
119
+ /* Hide container borders */
120
+ .chat-input-box {
121
+ border: none !important;
122
+ background: transparent !important;
123
+ box-shadow: none !important;
124
+ flex: 1 !important;
125
+ width: 100% !important;
126
+ }
127
+
128
+ /* Input wrapper for send button positioning */
129
+
130
+ /* Send indicator - only show when stop button is hidden */
131
+ .chat-input-box::after {
132
+ content: "↵";
133
+ position: absolute;
134
+ right: 16px;
135
+ top: 50%;
136
+ transform: translateY(-50%);
137
+ color: #64748b;
138
+ font-size: 16px;
139
+ pointer-events: none;
140
+ opacity: 0.6;
141
+ transition: opacity 0.2s ease;
142
+ }
143
+
144
+ .chat-input-box:hover::after {
145
+ opacity: 0.8;
146
+ }
147
+
148
+ /* Stop button styling */
149
+ .stop-btn-chat {
150
+ flex-shrink: 0 !important;
151
+ width: 45px !important;
152
+ min-width: 45px !important;
153
+ max-width: 45px !important;
154
+ height: 45px !important;
155
+ margin: 0 !important;
156
+ padding: 0 !important;
157
+ border-radius: 100% !important;
158
+ }
159
+
160
+ .stop-btn-chat button {
161
+ background: rgba(239, 68, 68, 0.1) !important;
162
+ border: 1px solid rgba(239, 68, 68, 0.3) !important;
163
+ color: #ef4444 !important;
164
+ border-radius: 50% !important;
165
+ padding: 6px !important;
166
+ min-width: 45px !important;
167
+ max-width: 45px !important;
168
+ width: 45px !important;
169
+ height: 45px !important;
170
+ font-size: 14px !important;
171
+ font-weight: 500 !important;
172
+ transition: all 0.2s ease !important;
173
+ display: flex !important;
174
+ align-items: center !important;
175
+ justify-content: center !important;
176
+ flex-shrink: 0 !important;
177
+ box-sizing: border-box !important;
178
+ }
179
+
180
+ .stop-btn-chat button:hover {
181
+ background: rgba(239, 68, 68, 0.15) !important;
182
+ border-color: rgba(239, 68, 68, 0.4) !important;
183
+ transform: translateY(-1px) !important;
184
+ box-shadow: 0 4px 8px rgba(239, 68, 68, 0.2) !important;
185
+ }
186
+
187
+ /* Hide send icon when stop button is visible */
188
+ #chat-input-row:has(.stop-btn-chat:not([style*="display: none"])) .chat-input-box::after {
189
+ display: none;
190
+ }
191
+
192
+ /* Right pane cards */
193
+ .side-card {
194
+ border: 1px solid rgba(59,130,246,0.2);
195
+ border-radius: 16px;
196
+ background: linear-gradient(180deg, rgba(255,255,255,0.8) 0%, rgba(248,250,252,0.9) 100%);
197
+ padding: 8px 10px;
198
+ }
199
+
200
+ .footer-tip {
201
+ font-family: 'Source Sans Pro', sans-serif;
202
+ color: #64748b;
203
+ font-size: 12.5px;
204
+ text-align: center;
205
+ margin-top: 6px;
206
+ font-weight: 400;
207
+ letter-spacing: 0.02em;
208
+ }
209
+
210
+ /* Hidden buttons row - make invisible without using visibility */
211
+ #hidden-buttons {
212
+ opacity: 0 !important;
213
+ height: 0 !important;
214
+ overflow: hidden !important;
215
+ margin: 0 !important;
216
+ padding: 0 !important;
217
+ pointer-events: none !important;
218
+ }
219
+
220
+ /* Force light theme - Disable dark mode switching */
221
+ @media (prefers-color-scheme: dark) {
222
+ .gradio-container {
223
+ background: radial-gradient(1200px 600px at 20% 0%, #f1f5f9 0%, #e2e8f0 45%, #cbd5e1 100%) !important;
224
+ color-scheme: light !important;
225
+ }
226
+
227
+ /* Ensure all elements use light theme colors */
228
+ .gradio-container * {
229
+ color-scheme: light !important;
230
+ }
231
+
232
+ /* Override any dark mode styles that might be applied */
233
+ .gradio-container .gr-chatbot {
234
+ background: linear-gradient(180deg, rgba(255,255,255,0.9) 0%, rgba(248,250,252,0.95) 100%) !important;
235
+ color: #334155 !important;
236
+ }
237
+
238
+ .gradio-container .chat-input-box textarea {
239
+ background: rgba(255,255,255,0.9) !important;
240
+ color: #334155 !important;
241
+ }
242
+
243
+ .gradio-container #app-header .app-subtitle {
244
+ color: #64748b !important;
245
+ }
246
+
247
+ /* Force light theme for all buttons and inputs */
248
+ .gradio-container button {
249
+ color-scheme: light !important;
250
+ }
251
+
252
+ .gradio-container input,
253
+ .gradio-container textarea {
254
+ color-scheme: light !important;
255
+ }
256
+ }