Files changed (1) hide show
  1. app.py +124 -6
app.py CHANGED
@@ -1,23 +1,141 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
 
 
 
 
 
 
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
  def __init__(self):
15
- print("BasicAgent initialized.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def __call__(self, question: str) -> str:
 
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
@@ -91,7 +209,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
91
  print("Agent did not produce any answers to submit.")
92
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
 
94
- # 4. Prepare Submission
95
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
  print(status_update)
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
5
+ from typing import TypedDict
6
+ from langgraph.graph import StateGraph, END
7
 
8
  # (Keep Constants as is)
9
  # --- Constants ---
10
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
11
+ # Using a free Hugging Face model via Inference API (no auth required for public models)
12
+ HF_API_URL = "https://api-inference.huggingface.co/models"
13
+ HF_MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.2"
14
+
15
+ # --- State Definition for LangGraph ---
16
+ class AgentState(TypedDict):
17
+ question: str
18
+ answer: str
19
 
20
  # --- Basic Agent Definition ---
21
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
22
  class BasicAgent:
23
  def __init__(self):
24
+ print("BasicAgent initialized with LangGraph and Hugging Face API.")
25
+ self.api_url = f"{HF_API_URL}/{HF_MODEL_ID}"
26
+ print(f"Using Hugging Face Inference API: {self.api_url}")
27
+
28
+ # Build LangGraph workflow
29
+ self.workflow = self._build_graph()
30
+
31
+ def _build_graph(self):
32
+ """Build the LangGraph workflow for answering questions."""
33
+ workflow = StateGraph(AgentState)
34
+
35
+ # Add node: process question
36
+ workflow.add_node("process_question", self._process_question)
37
+
38
+ # Set entry point
39
+ workflow.set_entry_point("process_question")
40
+
41
+ # Connect to end
42
+ workflow.add_edge("process_question", END)
43
+
44
+ # Compile the graph
45
+ return workflow.compile()
46
+
47
+ def _call_hf_api(self, prompt: str) -> str:
48
+ """Call Hugging Face Inference API directly (free, no auth required for public models)."""
49
+ try:
50
+ # Format prompt for Mistral instruct model
51
+ formatted_prompt = f"<s>[INST] You are a helpful AI assistant. Answer the question concisely and accurately. Provide only the answer without any additional text like 'FINAL ANSWER' or explanations.\n\nQuestion: {prompt} [/INST]"
52
+
53
+ payload = {
54
+ "inputs": formatted_prompt,
55
+ "parameters": {
56
+ "max_new_tokens": 512,
57
+ "temperature": 0.7,
58
+ "return_full_text": False
59
+ }
60
+ }
61
+
62
+ response = requests.post(
63
+ self.api_url,
64
+ json=payload,
65
+ headers={"Content-Type": "application/json"},
66
+ timeout=30
67
+ )
68
+
69
+ if response.status_code == 200:
70
+ result = response.json()
71
+ # Extract generated text from response (HF API returns list with dict)
72
+ generated_text = ""
73
+ if isinstance(result, list) and len(result) > 0:
74
+ if isinstance(result[0], dict):
75
+ generated_text = result[0].get("generated_text", "")
76
+ else:
77
+ generated_text = str(result[0])
78
+ elif isinstance(result, dict):
79
+ generated_text = result.get("generated_text", result.get("text", ""))
80
+ else:
81
+ generated_text = str(result)
82
+
83
+ # Clean the response
84
+ answer = generated_text.strip()
85
+ # Remove "FINAL ANSWER:" if present (as per requirements)
86
+ answer_upper = answer.upper()
87
+ if "FINAL ANSWER:" in answer_upper:
88
+ parts = answer.split("FINAL ANSWER:", 1)
89
+ if len(parts) > 1:
90
+ answer = parts[1].strip()
91
+ elif "FINAL ANSWER" in answer_upper:
92
+ parts = answer.split("FINAL ANSWER", 1)
93
+ if len(parts) > 1:
94
+ answer = parts[1].strip()
95
+
96
+ return answer
97
+ elif response.status_code == 503:
98
+ # Model is loading, wait and retry once
99
+ error_msg = "Model is loading, please try again in a moment."
100
+ print(f"Warning: {error_msg}")
101
+ return error_msg
102
+ else:
103
+ error_msg = f"API returned status {response.status_code}: {response.text[:200]}"
104
+ print(f"Error: {error_msg}")
105
+ return f"Error: {error_msg}"
106
+
107
+ except requests.exceptions.Timeout:
108
+ return "Error: Request to Hugging Face API timed out."
109
+ except requests.exceptions.RequestException as e:
110
+ return f"Error: Failed to connect to Hugging Face API - {str(e)}"
111
+ except Exception as e:
112
+ return f"Error: Unexpected error - {str(e)}"
113
+
114
+ def _process_question(self, state: AgentState) -> AgentState:
115
+ """Process the question and generate an answer using Hugging Face API."""
116
+ question = state["question"]
117
+ print(f"Processing question: {question[:100]}...")
118
+
119
+ # Call Hugging Face API
120
+ answer = self._call_hf_api(question)
121
+ print(f"Generated answer (first 100 chars): {answer[:100]}...")
122
+
123
+ return {"question": question, "answer": answer}
124
+
125
  def __call__(self, question: str) -> str:
126
+ """Main entry point for the agent."""
127
  print(f"Agent received question (first 50 chars): {question[:50]}...")
128
+
129
+ # Run the LangGraph workflow
130
+ try:
131
+ initial_state = {"question": question, "answer": ""}
132
+ result = self.workflow.invoke(initial_state)
133
+ answer = result.get("answer", "No answer generated.")
134
+ print(f"Agent returning answer: {answer[:100]}...")
135
+ return answer
136
+ except Exception as e:
137
+ print(f"Error in agent workflow: {e}")
138
+ return f"Error processing question: {str(e)}"
139
 
140
  def run_and_submit_all( profile: gr.OAuthProfile | None):
141
  """
 
209
  print("Agent did not produce any answers to submit.")
210
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
211
 
212
+ # 4. Prepare Submission
213
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
214
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
215
  print(status_update)