kamorou's picture
Update app.py
8962abd verified
raw
history blame
18.7 kB
# import os
# import gradio as gr
# import requests
# import inspect
# import pandas as pd
# # Add this line with the other imports
# from agent import BasicAgent
# # (Keep Constants as is)
# # --- Constants ---
# DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# # --- Basic Agent Definition ---
# # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
# class BasicAgent:
# def __init__(self):
# print("BasicAgent initialized.")
# def __call__(self, question: str) -> str:
# print(f"Agent received question (first 50 chars): {question[:50]}...")
# fixed_answer = "This is a default answer."
# print(f"Agent returning fixed answer: {fixed_answer}")
# return fixed_answer
# def run_and_submit_all( profile: gr.OAuthProfile | None):
# """
# Fetches all questions, runs the BasicAgent on them, submits all answers,
# and displays the results.
# """
# # --- Determine HF Space Runtime URL and Repo URL ---
# space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
# if profile:
# username= f"{profile.username}"
# print(f"User logged in: {username}")
# else:
# print("User not logged in.")
# return "Please Login to Hugging Face with the button.", None
# api_url = DEFAULT_API_URL
# questions_url = f"{api_url}/questions"
# submit_url = f"{api_url}/submit"
# # 1. Instantiate Agent ( modify this part to create your agent)
# try:
# agent = BasicAgent()
# except Exception as e:
# print(f"Error instantiating agent: {e}")
# return f"Error initializing agent: {e}", None
# # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
# agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
# print(agent_code)
# # 2. Fetch Questions
# print(f"Fetching questions from: {questions_url}")
# try:
# response = requests.get(questions_url, timeout=15)
# response.raise_for_status()
# questions_data = response.json()
# if not questions_data:
# print("Fetched questions list is empty.")
# return "Fetched questions list is empty or invalid format.", None
# print(f"Fetched {len(questions_data)} questions.")
# except requests.exceptions.RequestException as e:
# print(f"Error fetching questions: {e}")
# return f"Error fetching questions: {e}", None
# except requests.exceptions.JSONDecodeError as e:
# print(f"Error decoding JSON response from questions endpoint: {e}")
# print(f"Response text: {response.text[:500]}")
# return f"Error decoding server response for questions: {e}", None
# except Exception as e:
# print(f"An unexpected error occurred fetching questions: {e}")
# return f"An unexpected error occurred fetching questions: {e}", None
# # 3. Run your Agent
# results_log = []
# answers_payload = []
# print(f"Running agent on {len(questions_data)} questions...")
# for item in questions_data:
# task_id = item.get("task_id")
# question_text = item.get("question")
# if not task_id or question_text is None:
# print(f"Skipping item with missing task_id or question: {item}")
# continue
# try:
# submitted_answer = agent(question_text)
# answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
# results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
# except Exception as e:
# print(f"Error running agent on task {task_id}: {e}")
# results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
# if not answers_payload:
# print("Agent did not produce any answers to submit.")
# return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
# # 4. Prepare Submission
# submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
# status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
# print(status_update)
# # 5. Submit
# print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
# try:
# response = requests.post(submit_url, json=submission_data, timeout=60)
# response.raise_for_status()
# result_data = response.json()
# final_status = (
# f"Submission Successful!\n"
# f"User: {result_data.get('username')}\n"
# f"Overall Score: {result_data.get('score', 'N/A')}% "
# f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
# f"Message: {result_data.get('message', 'No message received.')}"
# )
# print("Submission successful.")
# results_df = pd.DataFrame(results_log)
# return final_status, results_df
# except requests.exceptions.HTTPError as e:
# error_detail = f"Server responded with status {e.response.status_code}."
# try:
# error_json = e.response.json()
# error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
# except requests.exceptions.JSONDecodeError:
# error_detail += f" Response: {e.response.text[:500]}"
# status_message = f"Submission Failed: {error_detail}"
# print(status_message)
# results_df = pd.DataFrame(results_log)
# return status_message, results_df
# except requests.exceptions.Timeout:
# status_message = "Submission Failed: The request timed out."
# print(status_message)
# results_df = pd.DataFrame(results_log)
# return status_message, results_df
# except requests.exceptions.RequestException as e:
# status_message = f"Submission Failed: Network error - {e}"
# print(status_message)
# results_df = pd.DataFrame(results_log)
# return status_message, results_df
# except Exception as e:
# status_message = f"An unexpected error occurred during submission: {e}"
# print(status_message)
# results_df = pd.DataFrame(results_log)
# return status_message, results_df
# # --- Build Gradio Interface using Blocks ---
# with gr.Blocks() as demo:
# gr.Markdown("# Basic Agent Evaluation Runner")
# gr.Markdown(
# """
# **Instructions:**
# 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
# 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
# 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
# ---
# **Disclaimers:**
# Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
# This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
# """
# )
# gr.LoginButton()
# run_button = gr.Button("Run Evaluation & Submit All Answers")
# status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
# # Removed max_rows=10 from DataFrame constructor
# results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
# run_button.click(
# fn=run_and_submit_all,
# outputs=[status_output, results_table]
# )
# if __name__ == "__main__":
# print("\n" + "-"*30 + " App Starting " + "-"*30)
# # Check for SPACE_HOST and SPACE_ID at startup for information
# space_host_startup = os.getenv("SPACE_HOST")
# space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
# if space_host_startup:
# print(f"✅ SPACE_HOST found: {space_host_startup}")
# print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
# else:
# print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
# if space_id_startup: # Print repo URLs if SPACE_ID is found
# print(f"✅ SPACE_ID found: {space_id_startup}")
# print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
# print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
# else:
# print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
# print("-"*(60 + len(" App Starting ")) + "\n")
# print("Launching Gradio Interface for Basic Agent Evaluation...")
# demo.launch(debug=True, share=False)
import os
import gradio as gr
import requests
import inspect
import pandas as pd
from dotenv import load_dotenv
from typing import TypedDict, Annotated, List
# ==============================================================================
# PART 1: YOUR AGENT'S LOGIC AND DEFINITION
# All of this is new. It replaces the old placeholder.
# ==============================================================================
# LangChain and LangGraph imports
from langchain_huggingface import HuggingFaceEndpoint
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_experimental.tools import PythonREPLTool
from langchain_core.messages import BaseMessage, HumanMessage
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
# Load API keys from .env file or Space secrets
load_dotenv()
hf_token = os.getenv("HF_TOKEN")
tavily_api_key = os.getenv("TAVILY_API_KEY")
# Set the Tavily API key for the tool to use
if tavily_api_key:
os.environ["TAVILY_API_KEY"] = tavily_api_key
else:
print("Warning: TAVILY_API_KEY not found. Web search tool will not work.")
# --- Define Agent Tools ---
tools = [
TavilySearchResults(max_results=3, description="A search engine for finding up-to-date information on the web."),
PythonREPLTool()
]
tool_node = ToolNode(tools)
# --- Configure the LLM "Brain" ---
repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
SYSTEM_PROMPT = """You are a highly capable AI agent. Your mission is to accurately answer complex questions.
**Instructions:**
1. **Analyze:** Read the question to understand what is being asked.
2. **Plan:** Think step-by-step. Break the problem into smaller tasks. Decide which tool is best for each task.
3. **Execute:** Call ONE tool at a time.
4. **Observe & Reason:** After getting a tool's result, observe it. Decide if you have the final answer or if you need to use another tool.
5. **Final Answer:** Once confident, provide a clear, direct, and concise final answer."""
llm = HuggingFaceEndpoint(
repo_id=repo_id,
huggingfacehub_api_token=hf_token,
temperature=0,
max_new_tokens=2048,
)
llm_with_tools = llm.bind_tools(tools)
# --- Build the LangGraph Agent ---
class AgentState(TypedDict):
messages: Annotated[List[BaseMessage], lambda x, y: x + y]
def agent_node(state):
last_message = state['messages'][-1]
prompt_with_system = [HumanMessage(content=SYSTEM_PROMPT, name="system_prompt"), last_message]
response = llm_with_tools.invoke(prompt_with_system)
return {"messages": [response]}
def should_continue(state):
if state["messages"][-1].tool_calls:
return "tools"
return END
workflow = StateGraph(AgentState)
workflow.add_node("agent", agent_node)
workflow.add_node("tools", tool_node)
workflow.set_entry_point("agent")
workflow.add_conditional_edges("agent", should_continue, {"tools": "tools", "end": END})
workflow.add_edge("tools", "agent")
# Compile the graph into a runnable app
compiled_agent_app = workflow.compile()
# --- THIS IS THE NEW BasicAgent CLASS THAT REPLACES THE PLACEHOLDER ---
class BasicAgent:
def __init__(self):
# Check for API keys during initialization
if not hf_token or not tavily_api_key:
raise ValueError("HF_TOKEN or TAVILY_API_KEY not set. Please add them to your Space secrets.")
print("LangGraph Agent initialized successfully.")
def __call__(self, question: str) -> str:
print(f"Agent received question (first 80 chars): {question[:80]}...")
try:
inputs = {"messages": [HumanMessage(content=question)]}
final_response = ""
for s in compiled_agent_app.stream(inputs, {"recursion_limit": 15}):
if "agent" in s and s["agent"]["messages"][-1].content:
final_response = s["agent"]["messages"][-1].content
if not final_response:
final_response = "Agent finished but did not produce a clear final answer."
print(f"Agent returning final answer (first 80 chars): {final_response[:80]}...")
return final_response
except Exception as e:
print(f"An error occurred in agent execution: {e}")
return f"Error: {e}"
# ==============================================================================
# PART 2: THE GRADIO TEST HARNESS UI
# The rest of this file remains exactly as it was provided in the template.
# ==============================================================================
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
def run_and_submit_all(profile: gr.OAuthProfile | None):
"""
Fetches all questions, runs the BasicAgent on them, submits all answers,
and displays the results.
"""
# (The rest of this function remains unchanged)
# --- Determine HF Space Runtime URL and Repo URL ---
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
if profile:
username= f"{profile.username}"
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate Agent (this now calls YOUR agent class from above)
try:
agent = BasicAgent()
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(agent_code)
# 2. Fetch Questions
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
print("Fetched questions list is empty.")
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(questions_data)} questions.")
except Exception as e:
print(f"An unexpected error occurred fetching questions: {e}")
return f"An unexpected error occurred fetching questions: {e}", None
# 3. Run your Agent
results_log = []
answers_payload = []
print(f"Running agent on {len(questions_data)} questions...")
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
print(f"Skipping item with missing task_id or question: {item}")
continue
try:
# This line now calls your __call__ method in your new BasicAgent
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
print(f"Error running agent on task {task_id}: {e}")
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
if not answers_payload:
print("Agent did not produce any answers to submit.")
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
# 4. Prepare Submission
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
print(status_update)
# 5. Submit
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
print("Submission successful.")
results_df = pd.DataFrame(results_log)
return final_status, results_df
except Exception as e:
status_message = f"An unexpected error occurred during submission: {e}"
print(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df
# --- Build Gradio Interface using Blocks ---
with gr.Blocks() as demo:
gr.Markdown("# GAIA Agent Evaluation Runner")
gr.Markdown(
"""
1. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
2. Click 'Run Evaluation & Submit All Answers' to run your custom agent and see the score.
"""
)
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
demo.launch(debug=True, share=False)