import os import gradio as gr from checks.status_check import is_endpoint_healthy from checks.endpoint_utils import wake_endpoint class ContentAgentUI: """ Gradio UI that: - shows a minimal control panel first (status + Start button), - auto-initializes the agent on load if the endpoint is already healthy, - otherwise lets the user 'Start Agent' (wake -> health -> init), - reveals the main chat panel (with header, guidance, examples, footer) after init. """ def __init__(self, endpoint_uri: str, is_healthy: bool, health_message: str, agent_initializer, agent_type:str, compute:str ): self.endpoint_uri = endpoint_uri self.is_healthy = bool(is_healthy) self.health_message = health_message or "" self.agent_initializer = agent_initializer # callable: (uri) -> CodeAgent self.agent_type = agent_type or "" self.compute = compute or "" # set in build() self.app: gr.Blocks | None = None self.status_box = None self.control_panel = None self.main_panel = None self.prompt = None self.reply = None self.agent_state = None self.examples_radio = None # ---------- helpers ---------- def _create_user_guidance(self): gr.Markdown(""" Please enter text below to get started. Content Agent will try to determine whether the language is polite and uses the following classification: - `polite` - `somewhat polite` - `neutral` - `impolite` Classificiation Scores - Scoring runs from O to 1 """) gr.Markdown(f""" Technology: - App is running `{self.agent_type}` text generation model. - Agent uses Intel's Polite Guard NLP library tool - Compute: {self.compute} - Content Agent's LLM runs on-demand rather than using resources for 24 hrs/day, 7 days a week """) def _initial_status_text(self) -> str: # neutral; on_load will set real status and maybe auto-init return "Checking endpoint status…" def _load_examples(self) -> list[str]: print("load examples") ex_dir = os.path.join(os.path.dirname(__file__), "examples") out: list[str] = [] if os.path.isdir(ex_dir): for name in sorted(os.listdir(ex_dir)): if name.lower().endswith(".txt"): p = os.path.join(ex_dir, name) print(p) try: with open(p, "r", encoding="utf-8", errors="ignore") as f: out.append(f.read()) except Exception: pass return out # ---------- agent call ---------- @staticmethod def _call_agent(text: str, agent) -> str: try: if agent is None: return "Content Agent's LLM is sleeping and will need to be started. Click 'Start Agent'." return str(agent.run(text)) # smolagents.CodeAgent API except Exception as e: return f"Error: {e}" # ---------- UI build ---------- def build(self) -> gr.Blocks: if self.app is not None: return self.app examples = self._load_examples() with gr.Blocks() as demo: # global header (always visible) gr.Markdown("# Content Agent") # Control panel (shown first; may auto-hide on load) with gr.Group(visible=True) as self.control_panel: self.status_box = gr.Textbox( label="Status", value=self._initial_status_text(), lines=8, interactive=False, ) start_btn = gr.Button("Start Agent") gr.Markdown("It may take up to 5 minutes to wake up the agent") # Main panel (hidden until agent is initialized) with gr.Group(visible=False) as self.main_panel: # English only strInput = "Content Input" strPlaceholder="Copy and paste your content for evaluation here..." strSubmit = "Submit" strOutput = "Content feedback" # Guidance / about self._create_user_guidance() # Chat controls self.agent_state = gr.State(None) self.prompt = gr.Textbox(label=strInput, placeholder=strPlaceholder) self.reply = gr.Textbox(label=strOutput, interactive=False, lines=12, max_lines=20) submit_btn = gr.Button(strSubmit) # Use bound methods to submit content submit_btn.click(self._call_agent, inputs=[self.prompt, self.agent_state], outputs=self.reply) self.prompt.submit(self._call_agent, inputs=[self.prompt, self.agent_state], outputs=self.reply) # Examples (optional) gr.Markdown("### Try one of these examples") if examples: gr.Markdown("examples found") self.examples_radio = gr.Radio(choices=examples, label="Examples") # fill the prompt when an example is picked self.examples_radio.change(lambda ex: ex, inputs=self.examples_radio, outputs=self.prompt) else: gr.Markdown("*No examples found.*") # Footer gr.Markdown("
") # --- AUTO INIT ON LOAD IF HEALTHY --- def on_load(): healthy, msg = is_endpoint_healthy(self.endpoint_uri) if healthy: try: agent = self.agent_initializer(self.endpoint_uri) return ( f"Endpoint healthy ✅ — {msg}. Agent initialized.", gr.update(visible=False), # hide control panel gr.update(visible=True), # show main panel agent, ) except Exception as e: return ( f"Agent init failed: {e}", gr.update(visible=True), gr.update(visible=False), None, ) # not healthy → keep Start button path return ( f"The AI LLM is sleeping due to inactivity: {msg}\nClick 'Start Agent' to wake and initialize.", gr.update(visible=True), gr.update(visible=False), None, ) demo.load( on_load, inputs=None, outputs=[self.status_box, self.control_panel, self.main_panel, self.agent_state], ) # --- MANUAL START (wake → health → init) --- def on_start(): lines: list[str] = [] def push(s: str): lines.append(s) return ("\n".join(lines), gr.update(), gr.update(), None) # Wake with progress yield push("Waking endpoint… (this can take several minutes for cold starts)") ok, err = wake_endpoint(self.endpoint_uri, max_wait=600, poll_every=5.0, log=lines.append) yield ("\n".join(lines), gr.update(), gr.update(), None) # flush all logs if not ok: yield push(f"[Server message] {err or 'wake failed'}") return # Health → init yield push("Endpoint awake ✅. Checking health…") healthy, msg = is_endpoint_healthy(self.endpoint_uri) if not healthy: yield push(f"[Server message] {msg}") return yield push("Initializing agent…") try: agent = self.agent_initializer(self.endpoint_uri) except Exception as e: yield push(f"Agent init failed: {e}") return yield ("Agent initialized ✅", gr.update(visible=False), gr.update(visible=True), agent) start_btn.click( on_start, inputs=None, outputs=[self.status_box, self.control_panel, self.main_panel, self.agent_state], ) self.app = demo return self.app # ---------- public API ---------- def launch(self, **kwargs): return self.build().launch(**kwargs)