Spaces:
				
			
			
	
			
			
					
		Running
		
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
	Add prompt logging and opt-out options for users. Additional logging and timing info added.
Browse files- .gitignore +2 -1
- app.py +99 -19
- log_chat.py +235 -0
- requirements.txt +3 -1
- styles.css +36 -2
- timer.py +114 -0
- utils.py +15 -9
    	
        .gitignore
    CHANGED
    
    | @@ -1,3 +1,4 @@ | |
| 1 | 
             
            .idea/*
         | 
| 2 | 
             
            __pycache__/
         | 
| 3 | 
            -
             | 
|  | 
|  | |
| 1 | 
             
            .idea/*
         | 
| 2 | 
             
            __pycache__/
         | 
| 3 | 
            +
            /.run*/
         | 
| 4 | 
            +
            /train.csv
         | 
    	
        app.py
    CHANGED
    
    | @@ -1,16 +1,19 @@ | |
| 1 | 
             
            import datetime
         | 
|  | |
| 2 |  | 
| 3 | 
             
            from openai import OpenAI
         | 
| 4 | 
             
            import gradio as gr
         | 
| 5 |  | 
| 6 | 
             
            from theme import apriel
         | 
| 7 | 
            -
            from utils import COMMUNITY_POSTFIX_URL, get_model_config, log_message, check_format, models_config,  | 
|  | |
|  | |
| 8 |  | 
| 9 | 
             
            MODEL_TEMPERATURE = 0.8
         | 
| 10 | 
             
            BUTTON_WIDTH = 160
         | 
|  | |
| 11 |  | 
| 12 | 
            -
            DEFAULT_MODEL_NAME = "Apriel-Nemotron-15b-Thinker" if not  | 
| 13 | 
            -
            # DEFAULT_MODEL_NAME = "Apriel-5b"
         | 
| 14 |  | 
| 15 | 
             
            print(f"Gradio version: {gr.__version__}")
         | 
| 16 |  | 
| @@ -31,6 +34,13 @@ model_config = {} | |
| 31 | 
             
            openai_client = None
         | 
| 32 |  | 
| 33 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 34 | 
             
            def update_model_and_clear_chat(model_name):
         | 
| 35 | 
             
                actual_model_name = model_name.replace("Model: ", "")
         | 
| 36 | 
             
                desc = setup_model(actual_model_name)
         | 
| @@ -75,10 +85,19 @@ def stop_chat(state): | |
| 75 | 
             
                return state
         | 
| 76 |  | 
| 77 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 78 | 
             
            def run_chat_inference(history, message, state):
         | 
| 79 | 
             
                global chat_start_count
         | 
| 80 | 
             
                state["is_streaming"] = True
         | 
| 81 | 
             
                state["stop_flag"] = False
         | 
|  | |
|  | |
|  | |
|  | |
| 82 |  | 
| 83 | 
             
                # outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn, session_state
         | 
| 84 | 
             
                log_message(f"{'-' * 80}")
         | 
| @@ -124,9 +143,21 @@ def run_chat_inference(history, message, state): | |
| 124 | 
             
                        )
         | 
| 125 | 
             
                    except Exception as e:
         | 
| 126 | 
             
                        print(f"Error: {e}")
         | 
|  | |
| 127 | 
             
                        yield ([{"role": "assistant",
         | 
| 128 | 
             
                                 "content": "😔 The model is unavailable at the moment. Please try again later."}],
         | 
| 129 | 
             
                               INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state)
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 130 | 
             
                        return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
         | 
| 131 |  | 
| 132 | 
             
                    if is_reasoning:
         | 
| @@ -197,9 +228,21 @@ def run_chat_inference(history, message, state): | |
| 197 | 
             
                    check_format(history, "messages")
         | 
| 198 | 
             
                    yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
         | 
| 199 | 
             
                finally:
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 200 | 
             
                    state["is_streaming"] = False
         | 
| 201 | 
             
                    state["stop_flag"] = False
         | 
| 202 | 
            -
                    log_message(f"chat_fn() --> Finished streaming. {chat_start_count} chats started.")
         | 
| 203 | 
             
                    return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
         | 
| 204 |  | 
| 205 |  | 
| @@ -211,7 +254,13 @@ with open('styles.css', 'r') as f: | |
| 211 | 
             
                custom_css = f.read()
         | 
| 212 |  | 
| 213 | 
             
            with gr.Blocks(theme=theme, css=custom_css) as demo:
         | 
| 214 | 
            -
                session_state = gr.State(value={ | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 215 |  | 
| 216 | 
             
                gr.HTML(f"""
         | 
| 217 | 
             
                    <style>
         | 
| @@ -234,21 +283,22 @@ with gr.Blocks(theme=theme, css=custom_css) as demo: | |
| 234 | 
             
                            min_width=400
         | 
| 235 | 
             
                        )
         | 
| 236 | 
             
                    with gr.Column(scale=4, min_width=0):
         | 
| 237 | 
            -
                         | 
| 238 |  | 
| 239 | 
             
                chatbot = gr.Chatbot(
         | 
| 240 | 
             
                    type="messages",
         | 
| 241 | 
            -
                    height="calc(100dvh -  | 
| 242 | 
             
                    elem_classes="chatbot",
         | 
| 243 | 
             
                )
         | 
| 244 |  | 
| 245 | 
             
                with gr.Row():
         | 
| 246 | 
            -
                    with gr.Column(scale=10, min_width=400 | 
| 247 | 
            -
                         | 
| 248 | 
            -
                             | 
| 249 | 
            -
             | 
| 250 | 
            -
             | 
| 251 | 
            -
             | 
|  | |
| 252 | 
             
                    with gr.Column(scale=1, min_width=BUTTON_WIDTH * 2 + 20):
         | 
| 253 | 
             
                        with gr.Row():
         | 
| 254 | 
             
                            with gr.Column(scale=1, min_width=BUTTON_WIDTH, elem_classes="send-button-container"):
         | 
| @@ -256,12 +306,28 @@ with gr.Blocks(theme=theme, css=custom_css) as demo: | |
| 256 | 
             
                                stop_btn = gr.Button("Stop", variant="cancel", visible=False)
         | 
| 257 | 
             
                            with gr.Column(scale=1, min_width=BUTTON_WIDTH, elem_classes="clear-button-container"):
         | 
| 258 | 
             
                                clear_btn = gr.ClearButton(chatbot, value="New Chat", variant="secondary")
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 259 |  | 
| 260 | 
             
                gr.on(
         | 
| 261 | 
             
                    triggers=[send_btn.click, user_input.submit],
         | 
| 262 | 
             
                    fn=run_chat_inference,  # this generator streams results. do not use logged_event_handler wrapper
         | 
| 263 | 
             
                    inputs=[chatbot, user_input, session_state],
         | 
| 264 | 
            -
                    outputs=[chatbot, user_input, send_btn, stop_btn, clear_btn, session_state]
         | 
|  | |
|  | |
| 265 | 
             
                ).then(
         | 
| 266 | 
             
                    fn=chat_finished, inputs=None, outputs=[model_dropdown, user_input, send_btn, stop_btn, clear_btn], queue=False)
         | 
| 267 |  | 
| @@ -272,22 +338,36 @@ with gr.Blocks(theme=theme, css=custom_css) as demo: | |
| 272 | 
             
                    inputs=None,
         | 
| 273 | 
             
                    outputs=[model_dropdown, user_input, send_btn, stop_btn, clear_btn],
         | 
| 274 | 
             
                    queue=False,
         | 
| 275 | 
            -
                    show_progress='hidden'
         | 
|  | |
| 276 | 
             
                )
         | 
| 277 |  | 
| 278 | 
             
                stop_btn.click(
         | 
| 279 | 
             
                    fn=stop_chat,
         | 
| 280 | 
             
                    inputs=[session_state],
         | 
| 281 | 
            -
                    outputs=[session_state]
         | 
|  | |
| 282 | 
             
                )
         | 
| 283 |  | 
|  | |
|  | |
| 284 | 
             
                # Ensure the model is reset to default on page reload
         | 
| 285 | 
            -
                demo.load( | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 286 |  | 
| 287 | 
             
                model_dropdown.change(
         | 
| 288 | 
             
                    fn=update_model_and_clear_chat,
         | 
| 289 | 
             
                    inputs=[model_dropdown],
         | 
| 290 | 
            -
                    outputs=[ | 
|  | |
| 291 | 
             
                )
         | 
| 292 |  | 
| 293 | 
            -
            demo.launch(ssr_mode=False, show_api=False)
         | 
|  | |
| 1 | 
             
            import datetime
         | 
| 2 | 
            +
            from uuid import uuid4
         | 
| 3 |  | 
| 4 | 
             
            from openai import OpenAI
         | 
| 5 | 
             
            import gradio as gr
         | 
| 6 |  | 
| 7 | 
             
            from theme import apriel
         | 
| 8 | 
            +
            from utils import COMMUNITY_POSTFIX_URL, get_model_config, log_message, check_format, models_config, \
         | 
| 9 | 
            +
                logged_event_handler, DEBUG_MODEL
         | 
| 10 | 
            +
            from log_chat import log_chat
         | 
| 11 |  | 
| 12 | 
             
            MODEL_TEMPERATURE = 0.8
         | 
| 13 | 
             
            BUTTON_WIDTH = 160
         | 
| 14 | 
            +
            DEFAULT_OPT_OUT_VALUE = False
         | 
| 15 |  | 
| 16 | 
            +
            DEFAULT_MODEL_NAME = "Apriel-Nemotron-15b-Thinker" if not DEBUG_MODEL else "Apriel-5b"
         | 
|  | |
| 17 |  | 
| 18 | 
             
            print(f"Gradio version: {gr.__version__}")
         | 
| 19 |  | 
|  | |
| 34 | 
             
            openai_client = None
         | 
| 35 |  | 
| 36 |  | 
| 37 | 
            +
            def app_loaded(state, request: gr.Request):
         | 
| 38 | 
            +
                message_html = setup_model(DEFAULT_MODEL_NAME, intial=False)
         | 
| 39 | 
            +
                state['session'] = request.session_hash if request else uuid4().hex
         | 
| 40 | 
            +
                log_message(f"app_loaded() --> Session: {state['session']}")
         | 
| 41 | 
            +
                return state, message_html
         | 
| 42 | 
            +
             | 
| 43 | 
            +
             | 
| 44 | 
             
            def update_model_and_clear_chat(model_name):
         | 
| 45 | 
             
                actual_model_name = model_name.replace("Model: ", "")
         | 
| 46 | 
             
                desc = setup_model(actual_model_name)
         | 
|  | |
| 85 | 
             
                return state
         | 
| 86 |  | 
| 87 |  | 
| 88 | 
            +
            def toggle_opt_out(state, checkbox):
         | 
| 89 | 
            +
                state["opt_out"] = checkbox
         | 
| 90 | 
            +
                return state
         | 
| 91 | 
            +
             | 
| 92 | 
            +
             | 
| 93 | 
             
            def run_chat_inference(history, message, state):
         | 
| 94 | 
             
                global chat_start_count
         | 
| 95 | 
             
                state["is_streaming"] = True
         | 
| 96 | 
             
                state["stop_flag"] = False
         | 
| 97 | 
            +
                error = None
         | 
| 98 | 
            +
             | 
| 99 | 
            +
                if len(history) == 0:
         | 
| 100 | 
            +
                    state["chat_id"] = uuid4().hex
         | 
| 101 |  | 
| 102 | 
             
                # outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn, session_state
         | 
| 103 | 
             
                log_message(f"{'-' * 80}")
         | 
|  | |
| 143 | 
             
                        )
         | 
| 144 | 
             
                    except Exception as e:
         | 
| 145 | 
             
                        print(f"Error: {e}")
         | 
| 146 | 
            +
                        error = str(e)
         | 
| 147 | 
             
                        yield ([{"role": "assistant",
         | 
| 148 | 
             
                                 "content": "😔 The model is unavailable at the moment. Please try again later."}],
         | 
| 149 | 
             
                               INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state)
         | 
| 150 | 
            +
                        if state["opt_out"] is not True:
         | 
| 151 | 
            +
                            log_chat(chat_id=state["chat_id"],
         | 
| 152 | 
            +
                                     session_id=state["session"],
         | 
| 153 | 
            +
                                     model_name=model_config.get('MODEL_NAME'),
         | 
| 154 | 
            +
                                     prompt=message,
         | 
| 155 | 
            +
                                     history=history,
         | 
| 156 | 
            +
                                     info={"is_reasoning": model_config.get("REASONING"), "temperature": MODEL_TEMPERATURE,
         | 
| 157 | 
            +
                                           "stopped": True, "error": str(e)},
         | 
| 158 | 
            +
                                     )
         | 
| 159 | 
            +
                        else:
         | 
| 160 | 
            +
                            log_message(f"User opted out of chat history. Not logging chat.")
         | 
| 161 | 
             
                        return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
         | 
| 162 |  | 
| 163 | 
             
                    if is_reasoning:
         | 
|  | |
| 228 | 
             
                    check_format(history, "messages")
         | 
| 229 | 
             
                    yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
         | 
| 230 | 
             
                finally:
         | 
| 231 | 
            +
                    if error is None:
         | 
| 232 | 
            +
                        log_message(f"chat_fn() --> Finished streaming. {chat_start_count} chats started.")
         | 
| 233 | 
            +
                        if state["opt_out"] is not True:
         | 
| 234 | 
            +
                            log_chat(chat_id=state["chat_id"],
         | 
| 235 | 
            +
                                     session_id=state["session"],
         | 
| 236 | 
            +
                                     model_name=model_config.get('MODEL_NAME'),
         | 
| 237 | 
            +
                                     prompt=message,
         | 
| 238 | 
            +
                                     history=history,
         | 
| 239 | 
            +
                                     info={"is_reasoning": model_config.get("REASONING"), "temperature": MODEL_TEMPERATURE,
         | 
| 240 | 
            +
                                           "stopped": state["stop_flag"]},
         | 
| 241 | 
            +
                                     )
         | 
| 242 | 
            +
                        else:
         | 
| 243 | 
            +
                            log_message(f"User opted out of chat history. Not logging chat.")
         | 
| 244 | 
             
                    state["is_streaming"] = False
         | 
| 245 | 
             
                    state["stop_flag"] = False
         | 
|  | |
| 246 | 
             
                    return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
         | 
| 247 |  | 
| 248 |  | 
|  | |
| 254 | 
             
                custom_css = f.read()
         | 
| 255 |  | 
| 256 | 
             
            with gr.Blocks(theme=theme, css=custom_css) as demo:
         | 
| 257 | 
            +
                session_state = gr.State(value={
         | 
| 258 | 
            +
                    "is_streaming": False,
         | 
| 259 | 
            +
                    "stop_flag": False,
         | 
| 260 | 
            +
                    "chat_id": None,
         | 
| 261 | 
            +
                    "session": None,
         | 
| 262 | 
            +
                    "opt_out": DEFAULT_OPT_OUT_VALUE,
         | 
| 263 | 
            +
                })  # Store session state as a dictionary
         | 
| 264 |  | 
| 265 | 
             
                gr.HTML(f"""
         | 
| 266 | 
             
                    <style>
         | 
|  | |
| 283 | 
             
                            min_width=400
         | 
| 284 | 
             
                        )
         | 
| 285 | 
             
                    with gr.Column(scale=4, min_width=0):
         | 
| 286 | 
            +
                        feedback_message_html = gr.HTML(description, elem_classes="model-message")
         | 
| 287 |  | 
| 288 | 
             
                chatbot = gr.Chatbot(
         | 
| 289 | 
             
                    type="messages",
         | 
| 290 | 
            +
                    height="calc(100dvh - 310px)",
         | 
| 291 | 
             
                    elem_classes="chatbot",
         | 
| 292 | 
             
                )
         | 
| 293 |  | 
| 294 | 
             
                with gr.Row():
         | 
| 295 | 
            +
                    with gr.Column(scale=10, min_width=400):
         | 
| 296 | 
            +
                        with gr.Row():
         | 
| 297 | 
            +
                            user_input = gr.Textbox(
         | 
| 298 | 
            +
                                show_label=False,
         | 
| 299 | 
            +
                                placeholder="Type your message here and press Enter",
         | 
| 300 | 
            +
                                container=False
         | 
| 301 | 
            +
                            )
         | 
| 302 | 
             
                    with gr.Column(scale=1, min_width=BUTTON_WIDTH * 2 + 20):
         | 
| 303 | 
             
                        with gr.Row():
         | 
| 304 | 
             
                            with gr.Column(scale=1, min_width=BUTTON_WIDTH, elem_classes="send-button-container"):
         | 
|  | |
| 306 | 
             
                                stop_btn = gr.Button("Stop", variant="cancel", visible=False)
         | 
| 307 | 
             
                            with gr.Column(scale=1, min_width=BUTTON_WIDTH, elem_classes="clear-button-container"):
         | 
| 308 | 
             
                                clear_btn = gr.ClearButton(chatbot, value="New Chat", variant="secondary")
         | 
| 309 | 
            +
                with gr.Row():
         | 
| 310 | 
            +
                    with gr.Column(min_width=400, elem_classes="opt-out-container"):
         | 
| 311 | 
            +
                        with gr.Row():
         | 
| 312 | 
            +
                            gr.HTML(
         | 
| 313 | 
            +
                                "We may use your chats to improve our AI. You may opt out if you don’t want your conversations saved.",
         | 
| 314 | 
            +
                                elem_classes="opt-out-message")
         | 
| 315 | 
            +
                        with gr.Row():
         | 
| 316 | 
            +
                            opt_out_checkbox = gr.Checkbox(
         | 
| 317 | 
            +
                                label="Don’t save my chat history for improvements or training",
         | 
| 318 | 
            +
                                value=DEFAULT_OPT_OUT_VALUE,
         | 
| 319 | 
            +
                                elem_classes="opt-out-checkbox",
         | 
| 320 | 
            +
                                interactive=True,
         | 
| 321 | 
            +
                                container=False
         | 
| 322 | 
            +
                            )
         | 
| 323 |  | 
| 324 | 
             
                gr.on(
         | 
| 325 | 
             
                    triggers=[send_btn.click, user_input.submit],
         | 
| 326 | 
             
                    fn=run_chat_inference,  # this generator streams results. do not use logged_event_handler wrapper
         | 
| 327 | 
             
                    inputs=[chatbot, user_input, session_state],
         | 
| 328 | 
            +
                    outputs=[chatbot, user_input, send_btn, stop_btn, clear_btn, session_state],
         | 
| 329 | 
            +
                    concurrency_limit=4,
         | 
| 330 | 
            +
                    api_name=False
         | 
| 331 | 
             
                ).then(
         | 
| 332 | 
             
                    fn=chat_finished, inputs=None, outputs=[model_dropdown, user_input, send_btn, stop_btn, clear_btn], queue=False)
         | 
| 333 |  | 
|  | |
| 338 | 
             
                    inputs=None,
         | 
| 339 | 
             
                    outputs=[model_dropdown, user_input, send_btn, stop_btn, clear_btn],
         | 
| 340 | 
             
                    queue=False,
         | 
| 341 | 
            +
                    show_progress='hidden',
         | 
| 342 | 
            +
                    api_name=False
         | 
| 343 | 
             
                )
         | 
| 344 |  | 
| 345 | 
             
                stop_btn.click(
         | 
| 346 | 
             
                    fn=stop_chat,
         | 
| 347 | 
             
                    inputs=[session_state],
         | 
| 348 | 
            +
                    outputs=[session_state],
         | 
| 349 | 
            +
                    api_name=False
         | 
| 350 | 
             
                )
         | 
| 351 |  | 
| 352 | 
            +
                opt_out_checkbox.change(fn=toggle_opt_out, inputs=[session_state, opt_out_checkbox], outputs=[session_state])
         | 
| 353 | 
            +
             | 
| 354 | 
             
                # Ensure the model is reset to default on page reload
         | 
| 355 | 
            +
                demo.load(
         | 
| 356 | 
            +
                    fn=logged_event_handler(
         | 
| 357 | 
            +
                        log_msg="Browser session started",
         | 
| 358 | 
            +
                        event_handler=app_loaded
         | 
| 359 | 
            +
                    ),
         | 
| 360 | 
            +
                    inputs=[session_state],
         | 
| 361 | 
            +
                    outputs=[session_state, feedback_message_html],
         | 
| 362 | 
            +
                    queue=True,
         | 
| 363 | 
            +
                    api_name=False
         | 
| 364 | 
            +
                )
         | 
| 365 |  | 
| 366 | 
             
                model_dropdown.change(
         | 
| 367 | 
             
                    fn=update_model_and_clear_chat,
         | 
| 368 | 
             
                    inputs=[model_dropdown],
         | 
| 369 | 
            +
                    outputs=[feedback_message_html, chatbot],
         | 
| 370 | 
            +
                    api_name=False
         | 
| 371 | 
             
                )
         | 
| 372 |  | 
| 373 | 
            +
            demo.queue(default_concurrency_limit=2).launch(ssr_mode=False, show_api=False)
         | 
    	
        log_chat.py
    ADDED
    
    | @@ -0,0 +1,235 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import csv
         | 
| 2 | 
            +
            import os
         | 
| 3 | 
            +
            import time
         | 
| 4 | 
            +
            from datetime import datetime
         | 
| 5 | 
            +
            from queue import Queue
         | 
| 6 | 
            +
            import threading
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            import pandas as pd
         | 
| 9 | 
            +
            from gradio import ChatMessage
         | 
| 10 | 
            +
            from huggingface_hub import HfApi, hf_hub_download
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            from timer import Timer
         | 
| 13 | 
            +
            from utils import log_warning, log_message
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            HF_TOKEN = os.environ.get("HF_TOKEN")
         | 
| 16 | 
            +
            DATASET_REPO_ID = os.environ.get("APRIEL_PROMPT_DATASET")
         | 
| 17 | 
            +
            CSV_FILENAME = "train.csv"
         | 
| 18 | 
            +
             | 
| 19 | 
            +
             | 
| 20 | 
            +
            def log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, history: list[str], info: dict) -> None:
         | 
| 21 | 
            +
                log_chat_queue.put((chat_id, session_id, model_name, prompt, history, info))
         | 
| 22 | 
            +
             | 
| 23 | 
            +
             | 
| 24 | 
            +
            def _log_chat_worker():
         | 
| 25 | 
            +
                while True:
         | 
| 26 | 
            +
                    chat_id, session_id, model_name, prompt, history, info = log_chat_queue.get()
         | 
| 27 | 
            +
                    try:
         | 
| 28 | 
            +
                        try:
         | 
| 29 | 
            +
                            _log_chat(chat_id, session_id, model_name, prompt, history, info)
         | 
| 30 | 
            +
                        except Exception as e:
         | 
| 31 | 
            +
                            log_warning(f"Error logging chat: {e}")
         | 
| 32 | 
            +
                    finally:
         | 
| 33 | 
            +
                        log_chat_queue.task_done()
         | 
| 34 | 
            +
             | 
| 35 | 
            +
             | 
| 36 | 
            +
            def _log_chat(chat_id: str, session_id: str, model_name: str, prompt: str, history: list[str], info: dict) -> bool:
         | 
| 37 | 
            +
                if DATASET_REPO_ID is None:
         | 
| 38 | 
            +
                    log_warning("No dataset repo ID provided. Skipping logging of prompt.")
         | 
| 39 | 
            +
                    return False
         | 
| 40 | 
            +
                if HF_TOKEN is None:
         | 
| 41 | 
            +
                    log_warning("No HF token provided. Skipping logging of prompt.")
         | 
| 42 | 
            +
                    return False
         | 
| 43 | 
            +
             | 
| 44 | 
            +
                log_timer = Timer('log_chat')
         | 
| 45 | 
            +
                log_timer.start()
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                # Initialize HF API
         | 
| 48 | 
            +
                api = HfApi(token=HF_TOKEN)
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                # Check if the dataset repo exists, if not, create it
         | 
| 51 | 
            +
                try:
         | 
| 52 | 
            +
                    repo_info = api.repo_info(repo_id=DATASET_REPO_ID, repo_type="dataset")
         | 
| 53 | 
            +
                    log_message(f"log_chat() --> Dataset repo found: {repo_info.id} private={repo_info.private}")
         | 
| 54 | 
            +
                except Exception:  # Create new dataset if none exists
         | 
| 55 | 
            +
                    log_message(f"log_chat() --> No dataset repo found, creating a new one...")
         | 
| 56 | 
            +
                    api.create_repo(repo_id=DATASET_REPO_ID, repo_type="dataset", private=True)
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                # Ensure messages are in the correct format
         | 
| 59 | 
            +
                messages = [
         | 
| 60 | 
            +
                    {"role": item.role, "content": item.content,
         | 
| 61 | 
            +
                     "type": "thought" if item.metadata else "completion"} if isinstance(
         | 
| 62 | 
            +
                        item, ChatMessage) else item
         | 
| 63 | 
            +
                    for item in history
         | 
| 64 | 
            +
                    if isinstance(item, dict) and "role" in item and "content" in item or isinstance(item, ChatMessage)
         | 
| 65 | 
            +
                ]
         | 
| 66 | 
            +
                if len(messages) != len(history):
         | 
| 67 | 
            +
                    log_warning("log_chat() --> Some messages in history are missing 'role' or 'content' keys.")
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                user_messages_count = sum(1 for item in messages if isinstance(item, dict) and item.get("role") == "user")
         | 
| 70 | 
            +
             | 
| 71 | 
            +
                # These must match the keys in the new row
         | 
| 72 | 
            +
                expected_headers = ["timestamp", "chat_id", "turns", "prompt", "messages", "model", "session_id", "info"]
         | 
| 73 | 
            +
                # Prepare new data row
         | 
| 74 | 
            +
                new_row = {
         | 
| 75 | 
            +
                    "timestamp": datetime.now().isoformat(),
         | 
| 76 | 
            +
                    "chat_id": chat_id,
         | 
| 77 | 
            +
                    "turns": user_messages_count,
         | 
| 78 | 
            +
                    "prompt": prompt,
         | 
| 79 | 
            +
                    "messages": messages,
         | 
| 80 | 
            +
                    "model": model_name,
         | 
| 81 | 
            +
                    "session_id": session_id,
         | 
| 82 | 
            +
                    "info": info,
         | 
| 83 | 
            +
                }
         | 
| 84 | 
            +
                log_timer.add_step("Prepared new data row")
         | 
| 85 | 
            +
             | 
| 86 | 
            +
                # Try to download existing CSV with retry logic
         | 
| 87 | 
            +
                max_retries = 3
         | 
| 88 | 
            +
                retry_count = 0
         | 
| 89 | 
            +
                file_exists = False
         | 
| 90 | 
            +
                while retry_count < max_retries:
         | 
| 91 | 
            +
                    try:
         | 
| 92 | 
            +
                        csv_path = hf_hub_download(
         | 
| 93 | 
            +
                            repo_id=DATASET_REPO_ID,
         | 
| 94 | 
            +
                            filename=CSV_FILENAME,
         | 
| 95 | 
            +
                            repo_type="dataset",
         | 
| 96 | 
            +
                            token=HF_TOKEN  # Only needed if not already logged in
         | 
| 97 | 
            +
                        )
         | 
| 98 | 
            +
                        pd.read_csv(csv_path)
         | 
| 99 | 
            +
                        file_exists = True
         | 
| 100 | 
            +
                        log_message(f"log_chat() --> Downloaded existing CSV with {len(pd.read_csv(csv_path))} rows")
         | 
| 101 | 
            +
                        break  # Success, exit the loop
         | 
| 102 | 
            +
                    except Exception as e:
         | 
| 103 | 
            +
                        retry_count += 1
         | 
| 104 | 
            +
                        if retry_count < max_retries:
         | 
| 105 | 
            +
                            retry_delay = 2 * retry_count  # Exponential backoff: 2s, 4s, 6s, 8s
         | 
| 106 | 
            +
                            log_warning(
         | 
| 107 | 
            +
                                f"log_chat() --> Download attempt {retry_count} failed: {e}. Retrying in {retry_delay} seconds...")
         | 
| 108 | 
            +
                            time.sleep(retry_delay)
         | 
| 109 | 
            +
                        else:
         | 
| 110 | 
            +
                            log_warning(f"log_chat() --> Failed to download CSV after {max_retries} attempts: {e}")
         | 
| 111 | 
            +
                    file_exists = False
         | 
| 112 | 
            +
             | 
| 113 | 
            +
                log_timer.add_step(f"Downloaded existing CSV (attempts: {retry_count + 1})")
         | 
| 114 | 
            +
             | 
| 115 | 
            +
                # Handle the case where the CSV file does not exist or is invalid
         | 
| 116 | 
            +
                if file_exists and len(pd.read_csv(csv_path)) == 0:
         | 
| 117 | 
            +
                    log_warning(f"log_chat() --> CSV {csv_path} exists but is empty, will create a new one.")
         | 
| 118 | 
            +
                    dump_hub_csv()
         | 
| 119 | 
            +
                    file_exists = False
         | 
| 120 | 
            +
                elif file_exists:
         | 
| 121 | 
            +
                    # Check that the headers match our standard headers of "timestamp", "chat_id", "turns", ...
         | 
| 122 | 
            +
                    existing_headers = pd.read_csv(csv_path).columns.tolist()
         | 
| 123 | 
            +
                    if set(existing_headers) != set(expected_headers):
         | 
| 124 | 
            +
                        log_warning(f"log_chat() --> CSV {csv_path} has unexpected headers: {existing_headers}. "
         | 
| 125 | 
            +
                                    f"\nExpected {existing_headers} "
         | 
| 126 | 
            +
                                    f"Will create a new one.")
         | 
| 127 | 
            +
                        dump_hub_csv()
         | 
| 128 | 
            +
                        file_exists = False
         | 
| 129 | 
            +
                    else:
         | 
| 130 | 
            +
                        log_message(f"log_chat() --> CSV {csv_path} has expected headers: {existing_headers}")
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                # Write out the new row to the CSV file (append isn't working in HF container, so recreate each time)
         | 
| 133 | 
            +
                log_message(f"log_chat() --> Writing CSV file, file_exists={file_exists}")
         | 
| 134 | 
            +
                try:
         | 
| 135 | 
            +
                    with open(CSV_FILENAME, "w", newline="\n") as f:
         | 
| 136 | 
            +
                        writer = csv.DictWriter(f, fieldnames=new_row.keys())
         | 
| 137 | 
            +
                        writer.writeheader()  # Always write the header
         | 
| 138 | 
            +
                        if file_exists:
         | 
| 139 | 
            +
                            for _, row in pd.read_csv(csv_path).iterrows():
         | 
| 140 | 
            +
                                writer.writerow(row.to_dict())  # Write existing rows
         | 
| 141 | 
            +
                        writer.writerow(new_row)  # Write the new row
         | 
| 142 | 
            +
             | 
| 143 | 
            +
                    log_message("log_chat() --> Wrote out CSV with new row")
         | 
| 144 | 
            +
                    # dump_local_csv()
         | 
| 145 | 
            +
                except Exception as e:
         | 
| 146 | 
            +
                    log_warning(f"log_chat() --> Error writing to CSV: {e}")
         | 
| 147 | 
            +
                    return False
         | 
| 148 | 
            +
             | 
| 149 | 
            +
                # Upload updated CSV
         | 
| 150 | 
            +
                api.upload_file(
         | 
| 151 | 
            +
                    path_or_fileobj=CSV_FILENAME,
         | 
| 152 | 
            +
                    path_in_repo=CSV_FILENAME,
         | 
| 153 | 
            +
                    repo_id=DATASET_REPO_ID,
         | 
| 154 | 
            +
                    repo_type="dataset",
         | 
| 155 | 
            +
                    commit_message=f"Added new chat entry at {datetime.now().isoformat()}"
         | 
| 156 | 
            +
                )
         | 
| 157 | 
            +
                log_timer.add_step("Uploaded updated CSV")
         | 
| 158 | 
            +
                log_timer.end()
         | 
| 159 | 
            +
                log_message("log_chat() --> Finished logging chat")
         | 
| 160 | 
            +
                log_message(log_timer.formatted_result())
         | 
| 161 | 
            +
             | 
| 162 | 
            +
                return True
         | 
| 163 | 
            +
             | 
| 164 | 
            +
             | 
| 165 | 
            +
            def dump_hub_csv():
         | 
| 166 | 
            +
                # Verify the file contents by loading it from the hub and printing it out
         | 
| 167 | 
            +
                try:
         | 
| 168 | 
            +
                    csv_path = hf_hub_download(
         | 
| 169 | 
            +
                        repo_id=DATASET_REPO_ID,
         | 
| 170 | 
            +
                        filename=CSV_FILENAME,
         | 
| 171 | 
            +
                        repo_type="dataset",
         | 
| 172 | 
            +
                        token=HF_TOKEN  # Only needed if not already logged in
         | 
| 173 | 
            +
                    )
         | 
| 174 | 
            +
                    df = pd.read_csv(csv_path)
         | 
| 175 | 
            +
                    print(df)
         | 
| 176 | 
            +
                    if (df.empty):
         | 
| 177 | 
            +
                        # show raw contents of downloaded csv file
         | 
| 178 | 
            +
                        print("Raw file contents:")
         | 
| 179 | 
            +
                        with open(csv_path, 'r') as f:
         | 
| 180 | 
            +
                            print(f.read())
         | 
| 181 | 
            +
                except Exception as e:
         | 
| 182 | 
            +
                    print(f"Error loading CSV from hub: {e}")
         | 
| 183 | 
            +
             | 
| 184 | 
            +
             | 
| 185 | 
            +
            def dump_local_csv():
         | 
| 186 | 
            +
                # Verify the file contents by loading it from the local file and printing it out
         | 
| 187 | 
            +
                try:
         | 
| 188 | 
            +
                    df = pd.read_csv(CSV_FILENAME)
         | 
| 189 | 
            +
                    print(df)
         | 
| 190 | 
            +
                except Exception as e:
         | 
| 191 | 
            +
                    print(f"Error loading CSV from local file: {e}")
         | 
| 192 | 
            +
             | 
| 193 | 
            +
             | 
| 194 | 
            +
            def test_log_chat():
         | 
| 195 | 
            +
                # Example usage
         | 
| 196 | 
            +
                chat_id = "12345"
         | 
| 197 | 
            +
                session_id = "67890"
         | 
| 198 | 
            +
                model_name = "Apriel-Model"
         | 
| 199 | 
            +
                prompt = "Hello"
         | 
| 200 | 
            +
                history = [{"role": "user", "content": prompt}, {"role": "assistant", "content": "Hi there!"}]
         | 
| 201 | 
            +
                prompt = "100 + 1"
         | 
| 202 | 
            +
                history = [{'role': 'user', 'content': prompt}, ChatMessage(
         | 
| 203 | 
            +
                    content='Okay, that\'s a simple addition problem. , answer is 2.\n', role='assistant',
         | 
| 204 | 
            +
                    metadata={'title': '🧠 Thought'}, options=[]),
         | 
| 205 | 
            +
                           ChatMessage(content='\nThe result of adding 1 and 1 is:\n\n**2**\n', role='assistant', metadata={},
         | 
| 206 | 
            +
                                       options=[])
         | 
| 207 | 
            +
                           ]
         | 
| 208 | 
            +
                info = {"additional_info": "Some extra data"}
         | 
| 209 | 
            +
             | 
| 210 | 
            +
                log_message("Starting test_log_chat()")
         | 
| 211 | 
            +
                dump_hub_csv()
         | 
| 212 | 
            +
                log_chat(chat_id, session_id, model_name, prompt, history, info)
         | 
| 213 | 
            +
                log_message("log_chat 1 returned")
         | 
| 214 | 
            +
                log_chat(chat_id, session_id, model_name, prompt + " + 2", history, info)
         | 
| 215 | 
            +
                log_message("log_chat 2 returned")
         | 
| 216 | 
            +
                log_chat(chat_id, session_id, model_name, prompt + " + 3", history, info)
         | 
| 217 | 
            +
                log_message("log_chat 3 returned")
         | 
| 218 | 
            +
                log_chat(chat_id, session_id, model_name, prompt + " + 4", history, info)
         | 
| 219 | 
            +
                log_message("log_chat 4 returned")
         | 
| 220 | 
            +
             | 
| 221 | 
            +
                sleep_seconds = 10
         | 
| 222 | 
            +
                log_message(f"Sleeping {sleep_seconds} seconds to let it finish and log the result.")
         | 
| 223 | 
            +
                time.sleep(sleep_seconds)
         | 
| 224 | 
            +
                log_message("Finished sleeping.")
         | 
| 225 | 
            +
                dump_hub_csv()
         | 
| 226 | 
            +
             | 
| 227 | 
            +
             | 
| 228 | 
            +
            # Create a queue for logging chat messages
         | 
| 229 | 
            +
            log_chat_queue = Queue()
         | 
| 230 | 
            +
             | 
| 231 | 
            +
            # Start the worker thread
         | 
| 232 | 
            +
            threading.Thread(target=_log_chat_worker, daemon=True).start()
         | 
| 233 | 
            +
             | 
| 234 | 
            +
            if __name__ == "__main__":
         | 
| 235 | 
            +
                test_log_chat()
         | 
    	
        requirements.txt
    CHANGED
    
    | @@ -1,3 +1,5 @@ | |
| 1 | 
             
            huggingface_hub==0.28.1
         | 
| 2 | 
             
            gradio==5.29.0
         | 
| 3 | 
            -
            openai
         | 
|  | |
|  | 
|  | |
| 1 | 
             
            huggingface_hub==0.28.1
         | 
| 2 | 
             
            gradio==5.29.0
         | 
| 3 | 
            +
            openai~=1.78.0
         | 
| 4 | 
            +
            pandas~=2.2.3
         | 
| 5 | 
            +
            datasets~=2.14.4
         | 
    	
        styles.css
    CHANGED
    
    | @@ -42,6 +42,16 @@ button.cancel:hover, .cancel[disabled] { | |
| 42 | 
             
                color: var(--button-cancel-text-color-hover);
         | 
| 43 | 
             
            }
         | 
| 44 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 45 |  | 
| 46 | 
             
            @media (max-width: 800px) {
         | 
| 47 | 
             
                .responsive-row {
         | 
| @@ -59,7 +69,7 @@ button.cancel:hover, .cancel[disabled] { | |
| 59 | 
             
                }
         | 
| 60 |  | 
| 61 | 
             
                .chatbot {
         | 
| 62 | 
            -
                    max-height:  | 
| 63 | 
             
                }
         | 
| 64 | 
             
            }
         | 
| 65 |  | 
| @@ -79,6 +89,30 @@ button.cancel:hover, .cancel[disabled] { | |
| 79 | 
             
                }
         | 
| 80 |  | 
| 81 | 
             
                .chatbot {
         | 
| 82 | 
            -
                    max-height:  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 83 | 
             
                }
         | 
| 84 | 
             
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 42 | 
             
                color: var(--button-cancel-text-color-hover);
         | 
| 43 | 
             
            }
         | 
| 44 |  | 
| 45 | 
            +
            .opt-out-message {
         | 
| 46 | 
            +
                top: 8px;
         | 
| 47 | 
            +
            }
         | 
| 48 | 
            +
             | 
| 49 | 
            +
            .opt-out-message .html-container, .opt-out-checkbox label {
         | 
| 50 | 
            +
                font-size: 14px !important;
         | 
| 51 | 
            +
                padding: 0 !important;
         | 
| 52 | 
            +
                margin: 0 !important;
         | 
| 53 | 
            +
                color: var(--neutral-400) !important;
         | 
| 54 | 
            +
            }
         | 
| 55 |  | 
| 56 | 
             
            @media (max-width: 800px) {
         | 
| 57 | 
             
                .responsive-row {
         | 
|  | |
| 69 | 
             
                }
         | 
| 70 |  | 
| 71 | 
             
                .chatbot {
         | 
| 72 | 
            +
                    max-height: 800px;
         | 
| 73 | 
             
                }
         | 
| 74 | 
             
            }
         | 
| 75 |  | 
|  | |
| 89 | 
             
                }
         | 
| 90 |  | 
| 91 | 
             
                .chatbot {
         | 
| 92 | 
            +
                    max-height: 360px;
         | 
| 93 | 
            +
                }
         | 
| 94 | 
            +
            }
         | 
| 95 | 
            +
             | 
| 96 | 
            +
            @media (max-width: 1280px) {
         | 
| 97 | 
            +
                .chatbot {
         | 
| 98 | 
            +
                    max-height: 900px;
         | 
| 99 | 
            +
                }
         | 
| 100 | 
            +
            }
         | 
| 101 | 
            +
             | 
| 102 | 
            +
            @media (max-height: 932px) {
         | 
| 103 | 
            +
                .chatbot {
         | 
| 104 | 
            +
                    max-height: calc(100dvh - 400px);
         | 
| 105 | 
             
                }
         | 
| 106 | 
             
            }
         | 
| 107 | 
            +
             | 
| 108 | 
            +
            @media (max-height: 1280px) {
         | 
| 109 | 
            +
                .chatbot {
         | 
| 110 | 
            +
                    max-height: calc(100dvh - 400px);
         | 
| 111 | 
            +
                }
         | 
| 112 | 
            +
            }
         | 
| 113 | 
            +
             | 
| 114 | 
            +
            @media (min-height: 1281px) {
         | 
| 115 | 
            +
                .chatbot {
         | 
| 116 | 
            +
                    /*max-height: calc(100dvh - 400px);*/
         | 
| 117 | 
            +
                }
         | 
| 118 | 
            +
            }
         | 
    	
        timer.py
    ADDED
    
    | @@ -0,0 +1,114 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import time
         | 
| 2 | 
            +
            import json
         | 
| 3 | 
            +
             | 
| 4 | 
            +
             | 
| 5 | 
            +
            class Timer:
         | 
| 6 | 
            +
                def __init__(self, name=None):
         | 
| 7 | 
            +
                    self.name = name
         | 
| 8 | 
            +
                    self.start_time = None
         | 
| 9 | 
            +
                    self.steps = []
         | 
| 10 | 
            +
                    self.total_time = None
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                def clear(self):
         | 
| 13 | 
            +
                    self.start_time = None
         | 
| 14 | 
            +
                    self.steps = []
         | 
| 15 | 
            +
                    self.total_time = None
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                def start(self):
         | 
| 18 | 
            +
                    """Start the timer."""
         | 
| 19 | 
            +
                    self.start_time = time.time()
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                def is_running(self):
         | 
| 22 | 
            +
                    return self.start_time is not None
         | 
| 23 | 
            +
             | 
| 24 | 
            +
                def add_step(self, step_name):
         | 
| 25 | 
            +
                    """Add a step with its duration since the last step or start."""
         | 
| 26 | 
            +
                    if self.start_time is None:
         | 
| 27 | 
            +
                        self.start()
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                    current_time = time.time()
         | 
| 30 | 
            +
                    if not self.steps:
         | 
| 31 | 
            +
                        elapsed = current_time - self.start_time
         | 
| 32 | 
            +
                    else:
         | 
| 33 | 
            +
                        elapsed = current_time - self.steps[-1]['timestamp']
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                    self.steps.append({
         | 
| 36 | 
            +
                        "step_name": step_name,
         | 
| 37 | 
            +
                        "duration": round(elapsed, 4),
         | 
| 38 | 
            +
                        "total_duration": round(current_time - self.start_time, 4),
         | 
| 39 | 
            +
                        "timestamp": current_time
         | 
| 40 | 
            +
                    })
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                def end(self):
         | 
| 43 | 
            +
                    """End the timer and calculate the total duration."""
         | 
| 44 | 
            +
                    if self.start_time is None:
         | 
| 45 | 
            +
                        raise RuntimeError("Timer has not been started.")
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                    if not self.steps:
         | 
| 48 | 
            +
                        raise RuntimeError("No steps have been added.")
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                    self.total_time = time.time() - self.start_time
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                def to_json(self):
         | 
| 53 | 
            +
                    """Return a JSON of the timing steps."""
         | 
| 54 | 
            +
                    if self.total_time is None:
         | 
| 55 | 
            +
                        raise RuntimeError("Timer has not been ended.")
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                    output_steps = {}
         | 
| 58 | 
            +
                    for step in self.steps:
         | 
| 59 | 
            +
                        output_steps[step["step_name"]] = step["duration"]
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                    highlights = {"total_time": round(self.total_time, 4)}
         | 
| 62 | 
            +
             | 
| 63 | 
            +
                    if self.name:
         | 
| 64 | 
            +
                        highlights = {"name": self.name, **highlights}
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                    output = {
         | 
| 67 | 
            +
                        **highlights,
         | 
| 68 | 
            +
                        **output_steps
         | 
| 69 | 
            +
                    }
         | 
| 70 | 
            +
                    return output
         | 
| 71 | 
            +
             | 
| 72 | 
            +
                def to_json_str(self):
         | 
| 73 | 
            +
                    """Return a human-readable JSON of the timing steps."""
         | 
| 74 | 
            +
                    return json.dumps(self.to_json(), indent=4)
         | 
| 75 | 
            +
             | 
| 76 | 
            +
                def formatted_result(self):
         | 
| 77 | 
            +
                    """Return a list of the steps, their duration, and total duration."""
         | 
| 78 | 
            +
                    if self.total_time is None:
         | 
| 79 | 
            +
                        raise RuntimeError("Timer has not been ended.")
         | 
| 80 | 
            +
                    line_buffer = []
         | 
| 81 | 
            +
                    if self.name:
         | 
| 82 | 
            +
                        line_buffer.append(f"Timer: {self.name}")
         | 
| 83 | 
            +
                    for step in self.steps:
         | 
| 84 | 
            +
                        line_buffer.append(f"[{step['duration']:05.2f}s, {step['total_duration']:05.2f}s] {step['step_name']}")
         | 
| 85 | 
            +
                    # for step in self.steps:
         | 
| 86 | 
            +
                    #     line_buffer.append(f"{step['step_name']}: {step['duration']:.2f}s ({step['total_duration']:.2f}s)")
         | 
| 87 | 
            +
                    line_buffer.append(f"Total time: {self.total_time:.2f}s")
         | 
| 88 | 
            +
                    return "\n".join(line_buffer)
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                def log_formatted_result(self):
         | 
| 91 | 
            +
                    print(self.formatted_result())
         | 
| 92 | 
            +
             | 
| 93 | 
            +
             | 
| 94 | 
            +
            def example():
         | 
| 95 | 
            +
                # Example usage
         | 
| 96 | 
            +
                timer = Timer()
         | 
| 97 | 
            +
                timer.start()
         | 
| 98 | 
            +
             | 
| 99 | 
            +
                # Simulating some steps
         | 
| 100 | 
            +
                time.sleep(1)  # Simulate work for step 1
         | 
| 101 | 
            +
                timer.add_step("Step 1")
         | 
| 102 | 
            +
             | 
| 103 | 
            +
                time.sleep(2)  # Simulate work for step 2
         | 
| 104 | 
            +
                timer.add_step("Step 2")
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                timer.end()
         | 
| 107 | 
            +
             | 
| 108 | 
            +
                # Print the timer output
         | 
| 109 | 
            +
                print(timer.formatted_result())
         | 
| 110 | 
            +
                print(timer.to_json_str())
         | 
| 111 | 
            +
             | 
| 112 | 
            +
             | 
| 113 | 
            +
            if __name__ == "__main__":
         | 
| 114 | 
            +
                example()
         | 
    	
        utils.py
    CHANGED
    
    | @@ -1,13 +1,14 @@ | |
| 1 | 
             
            import os
         | 
| 2 | 
             
            import sys
         | 
|  | |
| 3 | 
             
            from typing import Any, Literal
         | 
| 4 |  | 
| 5 | 
             
            from gradio import ChatMessage
         | 
| 6 | 
             
            from gradio.components.chatbot import Message
         | 
| 7 | 
            -
            from functools import wraps
         | 
| 8 |  | 
| 9 | 
             
            COMMUNITY_POSTFIX_URL = "/discussions"
         | 
| 10 | 
             
            DEBUG_MODE = False or os.environ.get("DEBUG_MODE") == "True"
         | 
|  | |
| 11 |  | 
| 12 | 
             
            models_config = {
         | 
| 13 | 
             
                "Apriel-Nemotron-15b-Thinker": {
         | 
| @@ -46,6 +47,10 @@ def log_message(message): | |
| 46 | 
             
                    print(f"≫≫≫ {message}")
         | 
| 47 |  | 
| 48 |  | 
|  | |
|  | |
|  | |
|  | |
| 49 | 
             
            # Gradio 5.0.1 had issues with checking the message formats.  5.29.0 does not!
         | 
| 50 | 
             
            def check_format(messages: Any, type: Literal["messages", "tuples"] = "messages") -> None:
         | 
| 51 | 
             
                if not DEBUG_MODE:
         | 
| @@ -82,23 +87,24 @@ def check_format(messages: Any, type: Literal["messages", "tuples"] = "messages" | |
| 82 | 
             
                    )
         | 
| 83 |  | 
| 84 |  | 
| 85 | 
            -
             | 
|  | |
| 86 | 
             
                @wraps(event_handler)
         | 
| 87 | 
             
                def wrapped_event_handler(*args, **kwargs):
         | 
| 88 | 
             
                    # Log before
         | 
| 89 | 
            -
                    if  | 
| 90 | 
             
                        if clear_timer:
         | 
| 91 | 
            -
                             | 
| 92 | 
            -
                         | 
| 93 | 
            -
                    log_message(f"::: Before event: { | 
| 94 |  | 
| 95 | 
             
                    # Call the original event handler
         | 
| 96 | 
             
                    result = event_handler(*args, **kwargs)
         | 
| 97 |  | 
| 98 | 
             
                    # Log after
         | 
| 99 | 
            -
                    if  | 
| 100 | 
            -
                         | 
| 101 | 
            -
                    log_message(f"::: After event: { | 
| 102 |  | 
| 103 | 
             
                    return result
         | 
| 104 |  | 
|  | |
| 1 | 
             
            import os
         | 
| 2 | 
             
            import sys
         | 
| 3 | 
            +
            from functools import wraps
         | 
| 4 | 
             
            from typing import Any, Literal
         | 
| 5 |  | 
| 6 | 
             
            from gradio import ChatMessage
         | 
| 7 | 
             
            from gradio.components.chatbot import Message
         | 
|  | |
| 8 |  | 
| 9 | 
             
            COMMUNITY_POSTFIX_URL = "/discussions"
         | 
| 10 | 
             
            DEBUG_MODE = False or os.environ.get("DEBUG_MODE") == "True"
         | 
| 11 | 
            +
            DEBUG_MODEL = False or os.environ.get("DEBUG_MODEL") == "True"
         | 
| 12 |  | 
| 13 | 
             
            models_config = {
         | 
| 14 | 
             
                "Apriel-Nemotron-15b-Thinker": {
         | 
|  | |
| 47 | 
             
                    print(f"≫≫≫ {message}")
         | 
| 48 |  | 
| 49 |  | 
| 50 | 
            +
            def log_warning(message):
         | 
| 51 | 
            +
                print(f"‼️ {message}")
         | 
| 52 | 
            +
             | 
| 53 | 
            +
             | 
| 54 | 
             
            # Gradio 5.0.1 had issues with checking the message formats.  5.29.0 does not!
         | 
| 55 | 
             
            def check_format(messages: Any, type: Literal["messages", "tuples"] = "messages") -> None:
         | 
| 56 | 
             
                if not DEBUG_MODE:
         | 
|  | |
| 87 | 
             
                    )
         | 
| 88 |  | 
| 89 |  | 
| 90 | 
            +
            # Adds timing info for a gradio event handler (non-generator functions)
         | 
| 91 | 
            +
            def logged_event_handler(log_msg='', event_handler=None, log_timer=None, clear_timer=False):
         | 
| 92 | 
             
                @wraps(event_handler)
         | 
| 93 | 
             
                def wrapped_event_handler(*args, **kwargs):
         | 
| 94 | 
             
                    # Log before
         | 
| 95 | 
            +
                    if log_timer:
         | 
| 96 | 
             
                        if clear_timer:
         | 
| 97 | 
            +
                            log_timer.clear()
         | 
| 98 | 
            +
                        log_timer.add_step(f"Start: {log_message}")
         | 
| 99 | 
            +
                    log_message(f"::: Before event: {log_msg}")
         | 
| 100 |  | 
| 101 | 
             
                    # Call the original event handler
         | 
| 102 | 
             
                    result = event_handler(*args, **kwargs)
         | 
| 103 |  | 
| 104 | 
             
                    # Log after
         | 
| 105 | 
            +
                    if log_timer:
         | 
| 106 | 
            +
                        log_timer.add_step(f"Completed: {log_msg}")
         | 
| 107 | 
            +
                    log_message(f"::: After event: {log_msg}")
         | 
| 108 |  | 
| 109 | 
             
                    return result
         | 
| 110 |  | 

