Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -58,26 +58,6 @@ def downsample_video(video_path):
|
|
| 58 |
vidcap.release()
|
| 59 |
return frames
|
| 60 |
|
| 61 |
-
def progress_bar_html(label: str) -> str:
|
| 62 |
-
"""
|
| 63 |
-
Returns an HTML snippet for a thin progress bar with a label.
|
| 64 |
-
The progress bar is styled as a light cyan animated bar.
|
| 65 |
-
"""
|
| 66 |
-
return f'''
|
| 67 |
-
<div style="display: flex; align-items: center;">
|
| 68 |
-
<span style="margin-right: 10px; font-size: 14px;">{label}</span>
|
| 69 |
-
<div style="width: 110px; height: 5px; background-color: #B0E0E6; border-radius: 2px; overflow: hidden;">
|
| 70 |
-
<div style="width: 100%; height: 100%; background-color: #00FFFF; animation: loading 1.5s linear infinite;"></div>
|
| 71 |
-
</div>
|
| 72 |
-
</div>
|
| 73 |
-
<style>
|
| 74 |
-
@keyframes loading {{
|
| 75 |
-
0% {{ transform: translateX(-100%); }}
|
| 76 |
-
100% {{ transform: translateX(100%); }}
|
| 77 |
-
}}
|
| 78 |
-
</style>
|
| 79 |
-
'''
|
| 80 |
-
|
| 81 |
@spaces.GPU
|
| 82 |
def generate_image(text: str, image: Image.Image,
|
| 83 |
max_new_tokens: int = 1024,
|
|
@@ -113,7 +93,6 @@ def generate_image(text: str, image: Image.Image,
|
|
| 113 |
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
|
| 114 |
thread.start()
|
| 115 |
buffer = ""
|
| 116 |
-
yield progress_bar_html("Processing image with Cosmos-Reason1")
|
| 117 |
for new_text in streamer:
|
| 118 |
buffer += new_text
|
| 119 |
buffer = buffer.replace("<|im_end|>", "")
|
|
@@ -167,7 +146,6 @@ def generate_video(text: str, video_path: str,
|
|
| 167 |
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
|
| 168 |
thread.start()
|
| 169 |
buffer = ""
|
| 170 |
-
yield progress_bar_html("Processing video with Cosmos-Reason1")
|
| 171 |
for new_text in streamer:
|
| 172 |
buffer += new_text
|
| 173 |
buffer = buffer.replace("<|im_end|>", "")
|
|
|
|
| 58 |
vidcap.release()
|
| 59 |
return frames
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
@spaces.GPU
|
| 62 |
def generate_image(text: str, image: Image.Image,
|
| 63 |
max_new_tokens: int = 1024,
|
|
|
|
| 93 |
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
|
| 94 |
thread.start()
|
| 95 |
buffer = ""
|
|
|
|
| 96 |
for new_text in streamer:
|
| 97 |
buffer += new_text
|
| 98 |
buffer = buffer.replace("<|im_end|>", "")
|
|
|
|
| 146 |
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
|
| 147 |
thread.start()
|
| 148 |
buffer = ""
|
|
|
|
| 149 |
for new_text in streamer:
|
| 150 |
buffer += new_text
|
| 151 |
buffer = buffer.replace("<|im_end|>", "")
|