Spaces:
Runtime error
Runtime error
Make LCM client adaptive.
Browse files
app.py
CHANGED
|
@@ -24,7 +24,22 @@ if not HF_TOKEN:
|
|
| 24 |
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
|
| 25 |
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 26 |
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
|
| 30 |
def init_speech_to_text_model() -> Pipeline:
|
|
|
|
| 24 |
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
|
| 25 |
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 26 |
|
| 27 |
+
|
| 28 |
+
def configure_image_client():
|
| 29 |
+
url = "https://latent-consistency-super-fast-lcm-lora-sd1-5.hf.space/?view=api"
|
| 30 |
+
try:
|
| 31 |
+
response = requests.get(url)
|
| 32 |
+
response.raise_for_status()
|
| 33 |
+
content_text = response.text
|
| 34 |
+
pattern = r'"root":"(https://latent-consistency-super-fast-lcm-lora-sd1-5.hf.space/.*?)"'
|
| 35 |
+
match = re.findall(pattern, content_text)[0]
|
| 36 |
+
return Client(match)
|
| 37 |
+
except requests.RequestException as e:
|
| 38 |
+
print(f"Error fetching URL content: {e}")
|
| 39 |
+
raise e
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
client = configure_image_client()
|
| 43 |
|
| 44 |
|
| 45 |
def init_speech_to_text_model() -> Pipeline:
|