Spaces:
Running
on
Zero
Running
on
Zero
updated model loading
Browse files- app.py +2 -2
- requirements.txt +1 -0
app.py
CHANGED
|
@@ -9,7 +9,7 @@ from io import BytesIO
|
|
| 9 |
|
| 10 |
|
| 11 |
models = {
|
| 12 |
-
"Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct"
|
| 13 |
}
|
| 14 |
|
| 15 |
processors = {
|
|
@@ -28,7 +28,7 @@ def image_to_base64(image):
|
|
| 28 |
|
| 29 |
@spaces.GPU
|
| 30 |
def run_example(image, text_input, model_id="Qwen/Qwen2-VL-7B-Instruct"):
|
| 31 |
-
model = models[model_id].eval()
|
| 32 |
processor = processors[model_id]
|
| 33 |
|
| 34 |
messages = [
|
|
|
|
| 9 |
|
| 10 |
|
| 11 |
models = {
|
| 12 |
+
"Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", torch_dtype="auto", device_map="auto")
|
| 13 |
}
|
| 14 |
|
| 15 |
processors = {
|
|
|
|
| 28 |
|
| 29 |
@spaces.GPU
|
| 30 |
def run_example(image, text_input, model_id="Qwen/Qwen2-VL-7B-Instruct"):
|
| 31 |
+
model = models[model_id].eval()
|
| 32 |
processor = processors[model_id]
|
| 33 |
|
| 34 |
messages = [
|
requirements.txt
CHANGED
|
@@ -3,6 +3,7 @@ Pillow==10.3.0
|
|
| 3 |
Requests==2.31.0
|
| 4 |
torch
|
| 5 |
torchvision
|
|
|
|
| 6 |
git+https://github.com/huggingface/transformers.git@main
|
| 7 |
accelerate==0.30.0
|
| 8 |
qwen-vl-utils
|
|
|
|
| 3 |
Requests==2.31.0
|
| 4 |
torch
|
| 5 |
torchvision
|
| 6 |
+
transformers
|
| 7 |
git+https://github.com/huggingface/transformers.git@main
|
| 8 |
accelerate==0.30.0
|
| 9 |
qwen-vl-utils
|