Update app.py
Browse files
app.py
CHANGED
|
@@ -93,9 +93,14 @@ WRITABLE_BASE = _pick_writable_base()
|
|
| 93 |
LOCAL_BASE = WRITABLE_BASE / "my_app_cache" / "index"
|
| 94 |
LOCAL_BASE.mkdir(parents=True, exist_ok=True)
|
| 95 |
# Recreate the SAME embedding model used to build the index
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
emb = HuggingFaceEmbeddings(
|
| 97 |
model_name="google/embeddinggemma-300m",
|
| 98 |
-
model_kwargs={"device":
|
| 99 |
encode_kwargs={"normalize_embeddings": True},
|
| 100 |
)
|
| 101 |
|
|
|
|
| 93 |
LOCAL_BASE = WRITABLE_BASE / "my_app_cache" / "index"
|
| 94 |
LOCAL_BASE.mkdir(parents=True, exist_ok=True)
|
| 95 |
# Recreate the SAME embedding model used to build the index
|
| 96 |
+
try:
|
| 97 |
+
import torch
|
| 98 |
+
_emb_device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 99 |
+
except Exception:
|
| 100 |
+
_emb_device = "cpu"
|
| 101 |
emb = HuggingFaceEmbeddings(
|
| 102 |
model_name="google/embeddinggemma-300m",
|
| 103 |
+
model_kwargs={"device": _emb_device},
|
| 104 |
encode_kwargs={"normalize_embeddings": True},
|
| 105 |
)
|
| 106 |
|