Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,6 +10,8 @@ from gradio_client import Client, handle_file
|
|
| 10 |
|
| 11 |
#client = Client("fffiloni/moondream2", hf_token=hf_token)
|
| 12 |
|
|
|
|
|
|
|
| 13 |
cap_model = AutoModelForCausalLM.from_pretrained(
|
| 14 |
"vikhyatk/moondream2",
|
| 15 |
revision="2025-06-21",
|
|
@@ -33,8 +35,6 @@ def infer_cap(image):
|
|
| 33 |
return result
|
| 34 |
|
| 35 |
|
| 36 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 37 |
-
|
| 38 |
model_path = "meta-llama/Llama-2-7b-chat-hf"
|
| 39 |
|
| 40 |
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, use_auth_token=hf_token)
|
|
|
|
| 10 |
|
| 11 |
#client = Client("fffiloni/moondream2", hf_token=hf_token)
|
| 12 |
|
| 13 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 14 |
+
|
| 15 |
cap_model = AutoModelForCausalLM.from_pretrained(
|
| 16 |
"vikhyatk/moondream2",
|
| 17 |
revision="2025-06-21",
|
|
|
|
| 35 |
return result
|
| 36 |
|
| 37 |
|
|
|
|
|
|
|
| 38 |
model_path = "meta-llama/Llama-2-7b-chat-hf"
|
| 39 |
|
| 40 |
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, use_auth_token=hf_token)
|