Spaces:
Sleeping
Sleeping
Update Gradio_UI.py
Browse files- Gradio_UI.py +10 -8
Gradio_UI.py
CHANGED
|
@@ -130,24 +130,26 @@ def stream_to_gradio(
|
|
| 130 |
additional_args: Optional[dict] = None,
|
| 131 |
):
|
| 132 |
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
|
| 133 |
-
if not
|
| 134 |
raise ModuleNotFoundError(
|
| 135 |
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
|
| 136 |
)
|
| 137 |
import gradio as gr
|
| 138 |
-
|
| 139 |
total_input_tokens = 0
|
| 140 |
total_output_tokens = 0
|
| 141 |
-
|
| 142 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
| 143 |
# Track tokens if model provides them
|
| 144 |
if hasattr(agent.model, "last_input_token_count"):
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
if isinstance(step_log, ActionStep):
|
| 148 |
-
step_log.input_token_count =
|
| 149 |
-
step_log.output_token_count =
|
| 150 |
-
|
| 151 |
for message in pull_messages_from_step(
|
| 152 |
step_log,
|
| 153 |
):
|
|
|
|
| 130 |
additional_args: Optional[dict] = None,
|
| 131 |
):
|
| 132 |
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
|
| 133 |
+
if not is_package_available("gradio"):
|
| 134 |
raise ModuleNotFoundError(
|
| 135 |
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
|
| 136 |
)
|
| 137 |
import gradio as gr
|
|
|
|
| 138 |
total_input_tokens = 0
|
| 139 |
total_output_tokens = 0
|
|
|
|
| 140 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
| 141 |
# Track tokens if model provides them
|
| 142 |
if hasattr(agent.model, "last_input_token_count"):
|
| 143 |
+
# Check if token counts are None before adding
|
| 144 |
+
input_tokens = agent.model.last_input_token_count or 0
|
| 145 |
+
output_tokens = agent.model.last_output_token_count or 0
|
| 146 |
+
|
| 147 |
+
total_input_tokens += input_tokens
|
| 148 |
+
total_output_tokens += output_tokens
|
| 149 |
+
|
| 150 |
if isinstance(step_log, ActionStep):
|
| 151 |
+
step_log.input_token_count = input_tokens
|
| 152 |
+
step_log.output_token_count = output_tokens
|
|
|
|
| 153 |
for message in pull_messages_from_step(
|
| 154 |
step_log,
|
| 155 |
):
|