littlebird13 commited on
Commit
dacd46c
·
verified ·
1 Parent(s): ac3a74a

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. app.py +663 -0
  3. assets/qwen.png +3 -0
  4. config.py +188 -0
  5. requirements.txt +4 -0
  6. ui_components/thinking_button.py +27 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/qwen.png filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from http import HTTPStatus
3
+ import os
4
+ import uuid
5
+ import time
6
+ import gradio as gr
7
+ from gradio_client import utils as client_utils
8
+ import modelscope_studio.components.antd as antd
9
+ import modelscope_studio.components.antdx as antdx
10
+ import modelscope_studio.components.base as ms
11
+ import modelscope_studio.components.pro as pro
12
+ from config import DEFAULT_THEME, DEFAULT_SYS_PROMPT, save_history, get_text, user_config, bot_config, welcome_config, markdown_config, upload_config, api_key, base_url, MODEL, THINKING_MODEL, bucket
13
+ from ui_components.logo import Logo
14
+ from ui_components.thinking_button import ThinkingButton
15
+
16
+ from openai import OpenAI
17
+
18
+ client = OpenAI(
19
+ api_key=api_key,
20
+ base_url=base_url,
21
+ )
22
+
23
+
24
+ def encode_file_to_base64(file_path):
25
+ with open(file_path, "rb") as file:
26
+ mime_type = client_utils.get_mimetype(file_path)
27
+ bae64_data = base64.b64encode(file.read()).decode("utf-8")
28
+ return f"data:{mime_type};base64,{bae64_data}"
29
+
30
+
31
+ def file_path_to_oss_url(file_path: str):
32
+ if file_path.startswith("http"):
33
+ return file_path
34
+ ext = file_path.split('.')[-1]
35
+ object_name = f'studio-temp/Qwen3-VL-Demo/{uuid.uuid4()}.{ext}'
36
+ response = bucket.put_object_from_file(object_name, file_path)
37
+ file_url = file_path
38
+ if response.status == HTTPStatus.OK:
39
+ file_url = bucket.sign_url('GET',
40
+ object_name,
41
+ 60 * 60,
42
+ slash_safe=True)
43
+ return file_url
44
+
45
+
46
+ def format_history(history, oss_cache, sys_prompt=None):
47
+ messages = [{
48
+ "role": "system",
49
+ "content": DEFAULT_SYS_PROMPT,
50
+ }]
51
+ for item in history:
52
+ if item["role"] == "user":
53
+ files = []
54
+ for file_path in item["content"][0]["content"]:
55
+ if file_path.startswith("http"):
56
+ files.append({
57
+ "type": "image_url",
58
+ "image_url": {
59
+ "url": file_path
60
+ }
61
+ })
62
+ elif os.path.exists(file_path):
63
+ file_url = oss_cache.get(file_path,
64
+ file_path_to_oss_url(file_path))
65
+ oss_cache[file_path] = file_url
66
+
67
+ file_url = file_url if file_url.startswith(
68
+ "http") else encode_file_to_base64(file_path=file_path)
69
+
70
+ mime_type = client_utils.get_mimetype(file_path)
71
+ if mime_type.startswith("image"):
72
+ files.append({
73
+ "type": "image_url",
74
+ "image_url": {
75
+ "url": file_url
76
+ }
77
+ })
78
+ elif mime_type.startswith("video"):
79
+ files.append({
80
+ "type": "video_url",
81
+ "video_url": {
82
+ "url": file_url
83
+ }
84
+ })
85
+
86
+ messages.append({
87
+ "role":
88
+ "user",
89
+ "content":
90
+ files + [{
91
+ "type": "text",
92
+ "text": item["content"][1]["content"]
93
+ }]
94
+ })
95
+ elif item["role"] == "assistant":
96
+ contents = [{
97
+ "type": "text",
98
+ "text": content["content"]
99
+ } for content in item["content"] if content["type"] == "text"]
100
+ messages.append({
101
+ "role":
102
+ "assistant",
103
+ "content":
104
+ contents[0]["text"] if len(contents) > 0 else ""
105
+ })
106
+ return messages
107
+
108
+
109
+ class Gradio_Events:
110
+
111
+ @staticmethod
112
+ def submit(state_value):
113
+
114
+ history = state_value["conversation_contexts"][
115
+ state_value["conversation_id"]]["history"]
116
+ enable_thinking = state_value["conversation_contexts"][
117
+ state_value["conversation_id"]]["enable_thinking"]
118
+ messages = format_history(history, state_value["oss_cache"])
119
+ model = THINKING_MODEL if enable_thinking else MODEL
120
+ history.append({
121
+ "role": "assistant",
122
+ "content": [],
123
+ "key": str(uuid.uuid4()),
124
+ "loading": True,
125
+ "header": "Qwen3-VL",
126
+ "status": "pending"
127
+ })
128
+
129
+ yield {
130
+ chatbot: gr.update(value=history),
131
+ state: gr.update(value=state_value),
132
+ }
133
+ try:
134
+
135
+ response = client.chat.completions.create(model=model,
136
+ messages=messages,
137
+ stream=True)
138
+ start_time = time.time()
139
+ reasoning_content = ""
140
+ answer_content = ""
141
+ is_thinking = False
142
+ is_answering = False
143
+ contents = [None, None]
144
+ for chunk in response:
145
+ if not chunk or (
146
+ not chunk.choices[0].delta.content and
147
+ (not hasattr(chunk.choices[0].delta, "reasoning_content")
148
+ or not chunk.choices[0].delta.reasoning_content)):
149
+ pass
150
+ else:
151
+ delta = chunk.choices[0].delta
152
+ if hasattr(
153
+ delta,
154
+ 'reasoning_content') and delta.reasoning_content:
155
+ if not is_thinking:
156
+ contents[0] = {
157
+ "type": "tool",
158
+ "content": "",
159
+ "options": {
160
+ "title": get_text("Thinking...", "思考中..."),
161
+ "status": "pending"
162
+ },
163
+ "copyable": False,
164
+ "editable": False
165
+ }
166
+ is_thinking = True
167
+ reasoning_content += delta.reasoning_content
168
+ if hasattr(delta, 'content') and delta.content:
169
+ if not is_answering:
170
+ thought_cost_time = "{:.2f}".format(time.time() -
171
+ start_time)
172
+ if contents[0]:
173
+ contents[0]["options"]["title"] = get_text(
174
+ f"End of Thought ({thought_cost_time}s)",
175
+ f"已深度思考 (用时{thought_cost_time}s)")
176
+ contents[0]["options"]["status"] = "done"
177
+ contents[1] = {
178
+ "type": "text",
179
+ "content": "",
180
+ }
181
+
182
+ is_answering = True
183
+ answer_content += delta.content
184
+
185
+ if contents[0]:
186
+ contents[0]["content"] = reasoning_content
187
+ if contents[1]:
188
+ contents[1]["content"] = answer_content
189
+ history[-1]["content"] = [
190
+ content for content in contents if content
191
+ ]
192
+
193
+ history[-1]["loading"] = False
194
+ yield {
195
+ chatbot: gr.update(value=history),
196
+ state: gr.update(value=state_value)
197
+ }
198
+ print("model: ", model, "-", "reasoning_content: ",
199
+ reasoning_content, "\n", "content: ", answer_content)
200
+ history[-1]["status"] = "done"
201
+ cost_time = "{:.2f}".format(time.time() - start_time)
202
+ history[-1]["footer"] = get_text(f"{cost_time}s",
203
+ f"用时{cost_time}s")
204
+ yield {
205
+ chatbot: gr.update(value=history),
206
+ state: gr.update(value=state_value),
207
+ }
208
+ except Exception as e:
209
+ print("model: ", model, "-", "Error: ", e)
210
+ history[-1]["loading"] = False
211
+ history[-1]["status"] = "done"
212
+ history[-1]["content"] += [{
213
+ "type":
214
+ "text",
215
+ "content":
216
+ f'<span style="color: var(--color-red-500)">{str(e)}</span>'
217
+ }]
218
+ yield {
219
+ chatbot: gr.update(value=history),
220
+ state: gr.update(value=state_value)
221
+ }
222
+ raise e
223
+
224
+ @staticmethod
225
+ def add_message(input_value, thinking_btn_state_value, state_value):
226
+ text = input_value["text"]
227
+ files = input_value["files"]
228
+ if not state_value["conversation_id"]:
229
+ random_id = str(uuid.uuid4())
230
+ history = []
231
+ state_value["conversation_id"] = random_id
232
+ state_value["conversation_contexts"][
233
+ state_value["conversation_id"]] = {
234
+ "history": history
235
+ }
236
+ state_value["conversations"].append({
237
+ "label": text,
238
+ "key": random_id
239
+ })
240
+
241
+ history = state_value["conversation_contexts"][
242
+ state_value["conversation_id"]]["history"]
243
+
244
+ state_value["conversation_contexts"][
245
+ state_value["conversation_id"]] = {
246
+ "history": history,
247
+ "enable_thinking": thinking_btn_state_value["enable_thinking"]
248
+ }
249
+
250
+ history.append({
251
+ "key":
252
+ str(uuid.uuid4()),
253
+ "role":
254
+ "user",
255
+ "content": [{
256
+ "type": "file",
257
+ "content": [f for f in files]
258
+ }, {
259
+ "type": "text",
260
+ "content": text
261
+ }]
262
+ })
263
+ yield Gradio_Events.preprocess_submit(clear_input=True)(state_value)
264
+
265
+ try:
266
+ for chunk in Gradio_Events.submit(state_value):
267
+ yield chunk
268
+ except Exception as e:
269
+ raise e
270
+ finally:
271
+ yield Gradio_Events.postprocess_submit(state_value)
272
+
273
+ @staticmethod
274
+ def preprocess_submit(clear_input=True):
275
+
276
+ def preprocess_submit_handler(state_value):
277
+ history = state_value["conversation_contexts"][
278
+ state_value["conversation_id"]]["history"]
279
+ return {
280
+ **({
281
+ input:
282
+ gr.update(value=None, loading=True) if clear_input else gr.update(loading=True),
283
+ } if clear_input else {}),
284
+ conversations:
285
+ gr.update(active_key=state_value["conversation_id"],
286
+ items=list(
287
+ map(
288
+ lambda item: {
289
+ **item,
290
+ "disabled":
291
+ True if item["key"] != state_value[
292
+ "conversation_id"] else False,
293
+ }, state_value["conversations"]))),
294
+ add_conversation_btn:
295
+ gr.update(disabled=True),
296
+ clear_btn:
297
+ gr.update(disabled=True),
298
+ conversation_delete_menu_item:
299
+ gr.update(disabled=True),
300
+ chatbot:
301
+ gr.update(value=history,
302
+ bot_config=bot_config(
303
+ disabled_actions=['edit', 'retry', 'delete']),
304
+ user_config=user_config(
305
+ disabled_actions=['edit', 'delete'])),
306
+ state:
307
+ gr.update(value=state_value),
308
+ }
309
+
310
+ return preprocess_submit_handler
311
+
312
+ @staticmethod
313
+ def postprocess_submit(state_value):
314
+ history = state_value["conversation_contexts"][
315
+ state_value["conversation_id"]]["history"]
316
+ return {
317
+ input:
318
+ gr.update(loading=False),
319
+ conversation_delete_menu_item:
320
+ gr.update(disabled=False),
321
+ clear_btn:
322
+ gr.update(disabled=False),
323
+ conversations:
324
+ gr.update(items=state_value["conversations"]),
325
+ add_conversation_btn:
326
+ gr.update(disabled=False),
327
+ chatbot:
328
+ gr.update(value=history,
329
+ bot_config=bot_config(),
330
+ user_config=user_config()),
331
+ state:
332
+ gr.update(value=state_value),
333
+ }
334
+
335
+ @staticmethod
336
+ def cancel(state_value):
337
+ history = state_value["conversation_contexts"][
338
+ state_value["conversation_id"]]["history"]
339
+ history[-1]["loading"] = False
340
+ history[-1]["status"] = "done"
341
+ history[-1]["footer"] = get_text("Chat completion paused", "对话已暂停")
342
+ return Gradio_Events.postprocess_submit(state_value)
343
+
344
+ @staticmethod
345
+ def delete_message(state_value, e: gr.EventData):
346
+ index = e._data["payload"][0]["index"]
347
+ history = state_value["conversation_contexts"][
348
+ state_value["conversation_id"]]["history"]
349
+ history = history[:index] + history[index + 1:]
350
+
351
+ state_value["conversation_contexts"][
352
+ state_value["conversation_id"]]["history"] = history
353
+
354
+ return gr.update(value=state_value)
355
+
356
+ @staticmethod
357
+ def edit_message(state_value, chatbot_value, e: gr.EventData):
358
+ index = e._data["payload"][0]["index"]
359
+ history = state_value["conversation_contexts"][
360
+ state_value["conversation_id"]]["history"]
361
+ history[index]["content"] = chatbot_value[index]["content"]
362
+ if not history[index].get("edited"):
363
+ history[index]["edited"] = True
364
+ history[index]["footer"] = ((history[index]["footer"]) +
365
+ " " if history[index].get("footer")
366
+ else "") + get_text("Edited", "已编辑")
367
+ return gr.update(value=state_value), gr.update(value=history)
368
+
369
+ @staticmethod
370
+ def regenerate_message(thinking_btn_state_value, state_value,
371
+ e: gr.EventData):
372
+ index = e._data["payload"][0]["index"]
373
+ history = state_value["conversation_contexts"][
374
+ state_value["conversation_id"]]["history"]
375
+ history = history[:index]
376
+
377
+ state_value["conversation_contexts"][
378
+ state_value["conversation_id"]] = {
379
+ "history": history,
380
+ "enable_thinking": thinking_btn_state_value["enable_thinking"]
381
+ }
382
+
383
+ yield Gradio_Events.preprocess_submit()(state_value)
384
+ try:
385
+ for chunk in Gradio_Events.submit(state_value):
386
+ yield chunk
387
+ except Exception as e:
388
+ raise e
389
+ finally:
390
+ yield Gradio_Events.postprocess_submit(state_value)
391
+
392
+ @staticmethod
393
+ def apply_prompt(e: gr.EventData, input_value):
394
+ input_value["text"] = e._data["payload"][0]["value"]["description"]
395
+ input_value["files"] = e._data["payload"][0]["value"]["urls"]
396
+ return gr.update(value=input_value)
397
+
398
+ @staticmethod
399
+ def new_chat(thinking_btn_state, state_value):
400
+ if not state_value["conversation_id"]:
401
+ return gr.skip()
402
+ state_value["conversation_id"] = ""
403
+ thinking_btn_state["enable_thinking"] = True
404
+ return gr.update(active_key=state_value["conversation_id"]), gr.update(
405
+ value=None), gr.update(value=thinking_btn_state), gr.update(
406
+ value=state_value)
407
+
408
+ @staticmethod
409
+ def select_conversation(thinking_btn_state_value, state_value,
410
+ e: gr.EventData):
411
+ active_key = e._data["payload"][0]
412
+ if state_value["conversation_id"] == active_key or (
413
+ active_key not in state_value["conversation_contexts"]):
414
+ return gr.skip()
415
+ state_value["conversation_id"] = active_key
416
+ thinking_btn_state_value["enable_thinking"] = state_value[
417
+ "conversation_contexts"][active_key]["enable_thinking"]
418
+ return gr.update(active_key=active_key), gr.update(
419
+ value=state_value["conversation_contexts"][active_key]
420
+ ["history"]), gr.update(value=thinking_btn_state_value), gr.update(
421
+ value=state_value)
422
+
423
+ @staticmethod
424
+ def click_conversation_menu(state_value, e: gr.EventData):
425
+ conversation_id = e._data["payload"][0]["key"]
426
+ operation = e._data["payload"][1]["key"]
427
+ if operation == "delete":
428
+ del state_value["conversation_contexts"][conversation_id]
429
+
430
+ state_value["conversations"] = [
431
+ item for item in state_value["conversations"]
432
+ if item["key"] != conversation_id
433
+ ]
434
+
435
+ if state_value["conversation_id"] == conversation_id:
436
+ state_value["conversation_id"] = ""
437
+ return gr.update(
438
+ items=state_value["conversations"],
439
+ active_key=state_value["conversation_id"]), gr.update(
440
+ value=None), gr.update(value=state_value)
441
+ else:
442
+ return gr.update(
443
+ items=state_value["conversations"]), gr.skip(), gr.update(
444
+ value=state_value)
445
+ return gr.skip()
446
+
447
+ @staticmethod
448
+ def clear_conversation_history(state_value):
449
+ if not state_value["conversation_id"]:
450
+ return gr.skip()
451
+ state_value["conversation_contexts"][
452
+ state_value["conversation_id"]]["history"] = []
453
+ return gr.update(value=None), gr.update(value=state_value)
454
+
455
+ @staticmethod
456
+ def update_browser_state(state_value):
457
+
458
+ return gr.update(value=dict(
459
+ conversations=state_value["conversations"],
460
+ conversation_contexts=state_value["conversation_contexts"]))
461
+
462
+ @staticmethod
463
+ def apply_browser_state(browser_state_value, state_value):
464
+ state_value["conversations"] = browser_state_value["conversations"]
465
+ state_value["conversation_contexts"] = browser_state_value[
466
+ "conversation_contexts"]
467
+ return gr.update(
468
+ items=browser_state_value["conversations"]), gr.update(
469
+ value=state_value)
470
+
471
+
472
+ css = """
473
+ .gradio-container {
474
+ padding: 0 !important;
475
+ }
476
+
477
+ .gradio-container > main.fillable {
478
+ padding: 0 !important;
479
+ }
480
+
481
+ #chatbot {
482
+ height: calc(100vh - 21px - 16px);
483
+ max-height: 1500px;
484
+ }
485
+
486
+ #chatbot .chatbot-conversations {
487
+ height: 100vh;
488
+ background-color: var(--ms-gr-ant-color-bg-layout);
489
+ padding-left: 4px;
490
+ padding-right: 4px;
491
+ }
492
+
493
+
494
+ #chatbot .chatbot-conversations .chatbot-conversations-list {
495
+ padding-left: 0;
496
+ padding-right: 0;
497
+ }
498
+
499
+ #chatbot .chatbot-chat {
500
+ padding: 32px;
501
+ padding-bottom: 0;
502
+ height: 100%;
503
+ }
504
+
505
+ @media (max-width: 768px) {
506
+ #chatbot .chatbot-chat {
507
+ padding: 10px;
508
+ }
509
+ }
510
+
511
+ #chatbot .chatbot-chat .chatbot-chat-messages {
512
+ flex: 1;
513
+ }
514
+ """
515
+
516
+ with gr.Blocks(css=css, fill_width=True) as demo:
517
+ state = gr.State({
518
+ "conversation_contexts": {},
519
+ "conversations": [],
520
+ "conversation_id": "",
521
+ "oss_cache": {}
522
+ })
523
+
524
+ with ms.Application(), antdx.XProvider(
525
+ theme=DEFAULT_THEME), ms.AutoLoading():
526
+ with antd.Row(gutter=[20, 20], wrap=False, elem_id="chatbot"):
527
+ # Left Column
528
+ with antd.Col(md=dict(flex="0 0 260px", span=24, order=0),
529
+ span=0,
530
+ order=1,
531
+ elem_style=dict(width=0)):
532
+ with ms.Div(elem_classes="chatbot-conversations"):
533
+ with antd.Flex(vertical=True,
534
+ gap="small",
535
+ elem_style=dict(height="100%")):
536
+ # Logo
537
+ Logo()
538
+
539
+ # New Conversation Button
540
+ with antd.Button(value=None,
541
+ color="primary",
542
+ variant="filled",
543
+ block=True) as add_conversation_btn:
544
+ ms.Text(get_text("New Conversation", "新建对话"))
545
+ with ms.Slot("icon"):
546
+ antd.Icon("PlusOutlined")
547
+
548
+ # Conversations List
549
+ with antdx.Conversations(
550
+ elem_classes="chatbot-conversations-list",
551
+ ) as conversations:
552
+ with ms.Slot('menu.items'):
553
+ with antd.Menu.Item(
554
+ label="Delete", key="delete",
555
+ danger=True
556
+ ) as conversation_delete_menu_item:
557
+ with ms.Slot("icon"):
558
+ antd.Icon("DeleteOutlined")
559
+ # Right Column
560
+ with antd.Col(flex=1, elem_style=dict(height="100%")):
561
+ with antd.Flex(vertical=True,
562
+ gap="small",
563
+ elem_classes="chatbot-chat"):
564
+ # Chatbot
565
+ chatbot = pro.Chatbot(elem_classes="chatbot-chat-messages",
566
+ height=0,
567
+ markdown_config=markdown_config(),
568
+ welcome_config=welcome_config(),
569
+ user_config=user_config(),
570
+ bot_config=bot_config())
571
+
572
+ # Input
573
+ with pro.MultimodalInput(
574
+ placeholder=get_text("How can I help you today?",
575
+ "有什么我能帮助您的吗?"),
576
+ upload_config=upload_config()) as input:
577
+ with ms.Slot("prefix"):
578
+ with antd.Flex(gap=4,
579
+ wrap=True,
580
+ elem_style=dict(
581
+ maxWidth='40vw',
582
+ display="inline-flex")):
583
+ with antd.Button(value=None,
584
+ type="text") as clear_btn:
585
+ with ms.Slot("icon"):
586
+ antd.Icon("ClearOutlined")
587
+ thinking_btn_state = ThinkingButton()
588
+
589
+ # Events Handler
590
+ # Browser State Handler
591
+ if save_history:
592
+ browser_state = gr.BrowserState(
593
+ {
594
+ "conversation_contexts": {},
595
+ "conversations": [],
596
+ },
597
+ storage_key="qwen3_vl_demo_storage")
598
+ state.change(fn=Gradio_Events.update_browser_state,
599
+ inputs=[state],
600
+ outputs=[browser_state])
601
+
602
+ demo.load(fn=Gradio_Events.apply_browser_state,
603
+ inputs=[browser_state, state],
604
+ outputs=[conversations, state])
605
+
606
+ # Conversations Handler
607
+ add_conversation_btn.click(
608
+ fn=Gradio_Events.new_chat,
609
+ inputs=[thinking_btn_state, state],
610
+ outputs=[conversations, chatbot, thinking_btn_state, state])
611
+ conversations.active_change(
612
+ fn=Gradio_Events.select_conversation,
613
+ inputs=[thinking_btn_state, state],
614
+ outputs=[conversations, chatbot, thinking_btn_state, state])
615
+ conversations.menu_click(fn=Gradio_Events.click_conversation_menu,
616
+ inputs=[state],
617
+ outputs=[conversations, chatbot, state])
618
+ # Chatbot Handler
619
+ chatbot.welcome_prompt_select(fn=Gradio_Events.apply_prompt,
620
+ inputs=[input],
621
+ outputs=[input])
622
+
623
+ chatbot.delete(fn=Gradio_Events.delete_message,
624
+ inputs=[state],
625
+ outputs=[state])
626
+ chatbot.edit(fn=Gradio_Events.edit_message,
627
+ inputs=[state, chatbot],
628
+ outputs=[state, chatbot])
629
+
630
+ regenerating_event = chatbot.retry(fn=Gradio_Events.regenerate_message,
631
+ inputs=[thinking_btn_state, state],
632
+ outputs=[
633
+ input, clear_btn,
634
+ conversation_delete_menu_item,
635
+ add_conversation_btn, conversations,
636
+ chatbot, state
637
+ ])
638
+
639
+ # Input Handler
640
+ submit_event = input.submit(fn=Gradio_Events.add_message,
641
+ inputs=[input, thinking_btn_state, state],
642
+ outputs=[
643
+ input, clear_btn,
644
+ conversation_delete_menu_item,
645
+ add_conversation_btn, conversations,
646
+ chatbot, state
647
+ ])
648
+ input.cancel(fn=Gradio_Events.cancel,
649
+ inputs=[state],
650
+ outputs=[
651
+ input, conversation_delete_menu_item, clear_btn,
652
+ conversations, add_conversation_btn, chatbot, state
653
+ ],
654
+ cancels=[submit_event, regenerating_event],
655
+ queue=False)
656
+
657
+ clear_btn.click(fn=Gradio_Events.clear_conversation_history,
658
+ inputs=[state],
659
+ outputs=[chatbot, state])
660
+
661
+ if __name__ == "__main__":
662
+ demo.queue(default_concurrency_limit=100,
663
+ max_size=100).launch(ssr_mode=False, max_threads=100)
assets/qwen.png ADDED

Git LFS Details

  • SHA256: 50660a07fbe5578cc31d452aaf543eb24b8884ccd44c114a68acd18532d380cf
  • Pointer size: 131 Bytes
  • Size of remote file: 108 kB
config.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from modelscope_studio.components.pro.chatbot import ChatbotActionConfig, ChatbotBotConfig, ChatbotUserConfig, ChatbotWelcomeConfig, ChatbotMarkdownConfig
3
+ from modelscope_studio.components.pro.multimodal_input import MultimodalInputUploadConfig
4
+ import oss2
5
+ from oss2.credentials import EnvironmentVariableCredentialsProvider
6
+
7
+ # Oss
8
+
9
+ # OSS_ACCESS_KEY_ID and OSS_ACCESS_KEY_SECRET。
10
+ auth = oss2.ProviderAuthV4(EnvironmentVariableCredentialsProvider())
11
+
12
+ endpoint = os.getenv("OSS_ENDPOINT")
13
+
14
+ region = os.getenv("OSS_REGION")
15
+
16
+ bucket_name = os.getenv("OSS_BUCKET_NAME")
17
+
18
+ bucket = oss2.Bucket(auth, endpoint, bucket_name, region=region)
19
+
20
+ # Env
21
+ is_cn = os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio'
22
+ api_key = os.getenv('API_KEY')
23
+ base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
24
+
25
+
26
+ def get_text(text: str, cn_text: str):
27
+ if is_cn:
28
+ return cn_text
29
+ return text
30
+
31
+
32
+ # Save history in browser
33
+ save_history = True
34
+ MODEL = "qwen3-vl-235b-a22b-instruct"
35
+ THINKING_MODEL = "qwen3-vl-235b-a22b-thinking"
36
+
37
+
38
+ # Chatbot Config
39
+ def markdown_config():
40
+ return ChatbotMarkdownConfig()
41
+
42
+
43
+ def user_config(disabled_actions=None):
44
+ return ChatbotUserConfig(
45
+ class_names=dict(content="user-message-content"),
46
+ actions=[
47
+ "copy", "edit",
48
+ ChatbotActionConfig(
49
+ action="delete",
50
+ popconfirm=dict(title=get_text("Delete the message", "删除消息"),
51
+ description=get_text(
52
+ "Are you sure to delete this message?",
53
+ "确认删除该消息?"),
54
+ okButtonProps=dict(danger=True)))
55
+ ],
56
+ disabled_actions=disabled_actions)
57
+
58
+
59
+ def bot_config(disabled_actions=None):
60
+ return ChatbotBotConfig(actions=[
61
+ "copy", "edit",
62
+ ChatbotActionConfig(
63
+ action="retry",
64
+ popconfirm=dict(
65
+ title=get_text("Regenerate the message", "重新生成消息"),
66
+ description=get_text(
67
+ "Regenerate the message will also delete all subsequent messages.",
68
+ "重新生成消息会删除所有后续消息。"),
69
+ okButtonProps=dict(danger=True))),
70
+ ChatbotActionConfig(action="delete",
71
+ popconfirm=dict(
72
+ title=get_text("Delete the message", "删除消息"),
73
+ description=get_text(
74
+ "Are you sure to delete this message?",
75
+ "确认删除该消息?"),
76
+ okButtonProps=dict(danger=True)))
77
+ ],
78
+ avatar="./assets/qwen.png",
79
+ disabled_actions=disabled_actions)
80
+
81
+
82
+ def welcome_config():
83
+ return ChatbotWelcomeConfig(
84
+ variant="borderless",
85
+ icon="./assets/qwen.png",
86
+ title=get_text("Hello, I'm Qwen3-VL", "你好,我是 Qwen3-VL"),
87
+ description=get_text(
88
+ "Enter text and upload images or videos to get started.",
89
+ "输入文本并上传图片或视频,开始对话吧。"),
90
+ prompts=dict(
91
+ title=get_text("How can I help you today?", "有什么我能帮助您的吗?"),
92
+ styles={
93
+ "list": {
94
+ "width": '100%',
95
+ },
96
+ "item": {
97
+ "flex": 1,
98
+ },
99
+ },
100
+ items=[{
101
+ "label":
102
+ get_text("🤔 Logic Reasoning", "🤔 逻辑推理"),
103
+ "children": [{
104
+ "urls": [
105
+ "https://misc-assets.oss-cn-beijing.aliyuncs.com/Qwen/Qwen3-VL-Demo/r-1-1.png",
106
+ "https://misc-assets.oss-cn-beijing.aliyuncs.com/Qwen/Qwen3-VL-Demo/r-1-2.png",
107
+ "https://misc-assets.oss-cn-beijing.aliyuncs.com/Qwen/Qwen3-VL-Demo/r-1-3.png"
108
+ ],
109
+ "description":
110
+ get_text(
111
+ "Which one of these does the kitty seem to want to try first?",
112
+ "这只猫看起来要尝试先做什么?")
113
+ }, {
114
+ "urls": [
115
+ "https://misc-assets.oss-cn-beijing.aliyuncs.com/Qwen/Qwen3-VL-Demo/r-2.png",
116
+ ],
117
+ "description":
118
+ get_text(
119
+ "In the circuit, the diodes are ideal and the voltage source is Vs = 4 sin(ωt) V. Find the value measured on the ammeter.",
120
+ "电路中的 diodes 是理想的,电压源为 Vs = 4 sin(ωt) V。求电流表测量的数值。")
121
+ }, {
122
+ "urls": [
123
+ "https://misc-assets.oss-cn-beijing.aliyuncs.com/Qwen/Qwen3-VL-Demo/r-3.png"
124
+ ],
125
+ "description":
126
+ get_text(
127
+ "Which is the most popular Friday drink in Boston?\nAnswer the question using a single word or phrase.",
128
+ " Boston 的星期五饮料中最受欢迎的是什么?\n请用一个单词或短语回答该问题。")
129
+ }]
130
+ }, {
131
+ "label":
132
+ get_text("👨‍💻 Coding", "👨‍💻 编程"),
133
+ "children": [
134
+ {
135
+ "urls": [
136
+ "https://misc-assets.oss-cn-beijing.aliyuncs.com/Qwen/Qwen3-VL-Demo/c-1.png"
137
+ ],
138
+ "description":
139
+ get_text(
140
+ "Create the webpage using HTML and CSS based on my sketch design. Color it in dark mode.",
141
+ "基于我的草图设计,用 HTML 和 CSS 创建网页,并暗色模式下颜色。")
142
+ },
143
+ {
144
+ "urls": [
145
+ "https://misc-assets.oss-cn-beijing.aliyuncs.com/Qwen/Qwen3-VL-Demo/c-2.png"
146
+ ],
147
+ "description":
148
+ get_text(
149
+ "Solve the problem using C++. Starter code:\nclass Solution {\npublic:\n int countStableSubsequences(vector<int>& nums) {\n\n }\n};",
150
+ "使用 C++ 解决问题。起始代码:\nclass Solution {\npublic:\n int countStableSubsequences(vector<int>& nums) {\n\n }\n};"
151
+ )
152
+ },
153
+ {
154
+ "urls": [
155
+ "https://misc-assets.oss-cn-beijing.aliyuncs.com/Qwen/Qwen3-VL-Demo/c-3.png"
156
+ ],
157
+ "description":
158
+ get_text("How to draw this plot using matplotlib?",
159
+ "如何使用 matplotlib 绘制这张图?")
160
+ },
161
+ ]
162
+ }]),
163
+ )
164
+
165
+
166
+ def upload_config():
167
+ return MultimodalInputUploadConfig(
168
+ accept="image/*,video/*",
169
+ placeholder={
170
+ "inline": {
171
+ "title":
172
+ "Upload files",
173
+ "description":
174
+ "Click or drag files to this area to upload images or videos"
175
+ },
176
+ "drop": {
177
+ "title": "Drop files here",
178
+ }
179
+ })
180
+
181
+
182
+ DEFAULT_SYS_PROMPT = "You are a helpful and harmless assistant."
183
+
184
+ DEFAULT_THEME = {
185
+ "token": {
186
+ "colorPrimary": "#6A57FF",
187
+ }
188
+ }
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ modelscope_studio
3
+ openai
4
+ oss2
ui_components/thinking_button.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import modelscope_studio.components.antd as antd
2
+ import modelscope_studio.components.base as ms
3
+ import gradio as gr
4
+ from config import get_text
5
+
6
+
7
+ def ThinkingButton():
8
+ state = gr.State({"enable_thinking": False})
9
+ with antd.Button(get_text("Thinking", "深度思考"),
10
+ shape="round",
11
+ color="primary") as thinking_btn:
12
+ with ms.Slot("icon"):
13
+ antd.Icon("SunOutlined")
14
+
15
+ def toggle_thinking(state_value):
16
+ state_value["enable_thinking"] = not state_value["enable_thinking"]
17
+ return gr.update(value=state_value)
18
+
19
+ def apply_state_change(state_value):
20
+ return gr.update(
21
+ variant="solid" if state_value["enable_thinking"] else "")
22
+
23
+ state.change(fn=apply_state_change, inputs=[state], outputs=[thinking_btn])
24
+
25
+ thinking_btn.click(fn=toggle_thinking, inputs=[state], outputs=[state])
26
+
27
+ return state