ankandrew commited on
Commit
e63de23
·
verified ·
1 Parent(s): 9498443

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -6
app.py CHANGED
@@ -5,7 +5,7 @@ from typing import Iterator, Callable
5
 
6
  import gradio as gr
7
  import spaces
8
- from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
9
  from qwen_vl_utils import process_vision_info
10
 
11
  subprocess.run(
@@ -19,6 +19,8 @@ MODEL_NAMES = {
19
  "Qwen2.5-VL-7B-Instruct": "Qwen/Qwen2.5-VL-7B-Instruct",
20
  "Qwen2.5-VL-3B-Instruct": "Qwen/Qwen2.5-VL-3B-Instruct",
21
  "Qwen2.5-VL-32B-Instruct": "Qwen/Qwen2.5-VL-32B-Instruct",
 
 
22
  }
23
 
24
 
@@ -43,11 +45,18 @@ def run_inference(model_key, input_type, text, image, video, fps, system_prompt,
43
  Load the selected Qwen2.5-VL model and run inference on text, image, or video.
44
  """
45
  model_id = MODEL_NAMES[model_key]
46
- model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
47
- model_id,
48
- torch_dtype="auto",
49
- device_map="auto",
50
- )
 
 
 
 
 
 
 
51
  processor = AutoProcessor.from_pretrained(model_id)
52
 
53
  # Text-only inference
 
5
 
6
  import gradio as gr
7
  import spaces
8
+ from transformers import Qwen2_5_VLForConditionalGeneration, Qwen3VLMoeForConditionalGeneration, AutoProcessor
9
  from qwen_vl_utils import process_vision_info
10
 
11
  subprocess.run(
 
19
  "Qwen2.5-VL-7B-Instruct": "Qwen/Qwen2.5-VL-7B-Instruct",
20
  "Qwen2.5-VL-3B-Instruct": "Qwen/Qwen2.5-VL-3B-Instruct",
21
  "Qwen2.5-VL-32B-Instruct": "Qwen/Qwen2.5-VL-32B-Instruct",
22
+ "Qwen3-VL-4B-Instruct": "Qwen/Qwen3-VL-4B-Instruct",
23
+ "Qwen3-VL-8B-Instruct": "Qwen/Qwen3-VL-8B-Instruct",
24
  }
25
 
26
 
 
45
  Load the selected Qwen2.5-VL model and run inference on text, image, or video.
46
  """
47
  model_id = MODEL_NAMES[model_key]
48
+ if "Qwen3" in model_id:
49
+ model = Qwen3VLMoeForConditionalGeneration.from_pretrained(
50
+ model_id,
51
+ torch_dtype="auto",
52
+ device_map="auto",
53
+ )
54
+ else:
55
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
56
+ model_id,
57
+ torch_dtype="auto",
58
+ device_map="auto",
59
+ )
60
  processor = AutoProcessor.from_pretrained(model_id)
61
 
62
  # Text-only inference