Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Fully rm results page
Browse files- src/app.py +5 -14
src/app.py
CHANGED
|
@@ -8,10 +8,10 @@ from model_utils import calculate_memory, get_model
|
|
| 8 |
|
| 9 |
def get_results(model_name: str, library: str, options: list, access_token: str):
|
| 10 |
model = get_model(model_name, library, access_token)
|
| 11 |
-
try:
|
| 12 |
-
|
| 13 |
-
except HfHubHTTPError:
|
| 14 |
-
|
| 15 |
title = f"## Memory usage for '{model_name}'"
|
| 16 |
data = calculate_memory(model, options)
|
| 17 |
stages = {"model": [], "gradients": [], "optimizer": [], "step": []}
|
|
@@ -45,7 +45,6 @@ def get_results(model_name: str, library: str, options: list, access_token: str)
|
|
| 45 |
gr.update(visible=True, value=pd.DataFrame(data)),
|
| 46 |
gr.update(visible=True, value=out_explain),
|
| 47 |
gr.update(visible=True, value=memory_values),
|
| 48 |
-
gr.update(visible=not has_discussion),
|
| 49 |
]
|
| 50 |
else:
|
| 51 |
return [
|
|
@@ -53,7 +52,6 @@ def get_results(model_name: str, library: str, options: list, access_token: str)
|
|
| 53 |
gr.update(visible=True, value=pd.DataFrame(data)),
|
| 54 |
gr.update(visible=False, value=""),
|
| 55 |
gr.update(visible=False, value=pd.DataFrame()),
|
| 56 |
-
gr.update(visible=not has_discussion),
|
| 57 |
]
|
| 58 |
|
| 59 |
|
|
@@ -96,20 +94,13 @@ with gr.Blocks() as demo:
|
|
| 96 |
access_token = gr.Textbox(label="API Token", placeholder="Optional (for gated models)")
|
| 97 |
with gr.Row():
|
| 98 |
btn = gr.Button("Calculate Memory Usage")
|
| 99 |
-
post_to_hub = gr.Button(
|
| 100 |
-
value="Report results in this model repo's discussions!\n(Will open in a new tab)", visible=False
|
| 101 |
-
)
|
| 102 |
|
| 103 |
btn.click(
|
| 104 |
get_results,
|
| 105 |
inputs=[inp, library, options, access_token],
|
| 106 |
-
outputs=[out_text, out, out_explain, memory_values
|
| 107 |
api_name=False,
|
| 108 |
)
|
| 109 |
|
| 110 |
-
post_to_hub.click(lambda: gr.Button(visible=False), outputs=post_to_hub, api_name=False).then(
|
| 111 |
-
report_results, inputs=[inp, library, access_token]
|
| 112 |
-
)
|
| 113 |
-
|
| 114 |
|
| 115 |
demo.launch()
|
|
|
|
| 8 |
|
| 9 |
def get_results(model_name: str, library: str, options: list, access_token: str):
|
| 10 |
model = get_model(model_name, library, access_token)
|
| 11 |
+
# try:
|
| 12 |
+
# has_discussion = check_for_discussion(model_name)
|
| 13 |
+
# except HfHubHTTPError:
|
| 14 |
+
# has_discussion = True
|
| 15 |
title = f"## Memory usage for '{model_name}'"
|
| 16 |
data = calculate_memory(model, options)
|
| 17 |
stages = {"model": [], "gradients": [], "optimizer": [], "step": []}
|
|
|
|
| 45 |
gr.update(visible=True, value=pd.DataFrame(data)),
|
| 46 |
gr.update(visible=True, value=out_explain),
|
| 47 |
gr.update(visible=True, value=memory_values),
|
|
|
|
| 48 |
]
|
| 49 |
else:
|
| 50 |
return [
|
|
|
|
| 52 |
gr.update(visible=True, value=pd.DataFrame(data)),
|
| 53 |
gr.update(visible=False, value=""),
|
| 54 |
gr.update(visible=False, value=pd.DataFrame()),
|
|
|
|
| 55 |
]
|
| 56 |
|
| 57 |
|
|
|
|
| 94 |
access_token = gr.Textbox(label="API Token", placeholder="Optional (for gated models)")
|
| 95 |
with gr.Row():
|
| 96 |
btn = gr.Button("Calculate Memory Usage")
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
btn.click(
|
| 99 |
get_results,
|
| 100 |
inputs=[inp, library, options, access_token],
|
| 101 |
+
outputs=[out_text, out, out_explain, memory_values],
|
| 102 |
api_name=False,
|
| 103 |
)
|
| 104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
demo.launch()
|