use cpu and bigger models
Browse files- app.py +5 -3
- test.ipynb +0 -0
app.py
CHANGED
|
@@ -20,11 +20,13 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 20 |
|
| 21 |
# Whisper: Speech-to-text
|
| 22 |
model = whisper.load_model("base", device = device)
|
| 23 |
-
model_med = whisper.load_model("small", device = device)
|
|
|
|
| 24 |
|
| 25 |
#Roberta Q&A
|
| 26 |
-
model_name = "deepset/tinyroberta-squad2"
|
| 27 |
-
|
|
|
|
| 28 |
|
| 29 |
#TTS
|
| 30 |
tts_manager = ModelManager()
|
|
|
|
| 20 |
|
| 21 |
# Whisper: Speech-to-text
|
| 22 |
model = whisper.load_model("base", device = device)
|
| 23 |
+
#model_med = whisper.load_model("small", device = device)
|
| 24 |
+
model_med = whisper.load_model("medium", device = device)
|
| 25 |
|
| 26 |
#Roberta Q&A
|
| 27 |
+
#model_name = "deepset/tinyroberta-squad2"
|
| 28 |
+
model_name = "deepset/roberta-base-squad2"
|
| 29 |
+
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name, device = 0 if device == "cuda" else -1)
|
| 30 |
|
| 31 |
#TTS
|
| 32 |
tts_manager = ModelManager()
|
test.ipynb
ADDED
|
File without changes
|