Romain Fayoux commited on
Commit
3ac0a19
·
1 Parent(s): 7658a4e

Added file reading

Browse files
Files changed (3) hide show
  1. app.py +37 -21
  2. multi_agent.py +3 -3
  3. requirements.txt +1 -0
app.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
 
5
  from phoenix.otel import register
6
  from openinference.instrumentation.smolagents import SmolagentsInstrumentor
7
  from llm_only_agent import LLMOnlyAgent
@@ -40,6 +41,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None, limit: int | None):
40
 
41
  api_url = DEFAULT_API_URL
42
  questions_url = f"{api_url}/questions"
 
43
  submit_url = f"{api_url}/submit"
44
 
45
  # 1. Instantiate Agent ( modify this part to create your agent)
@@ -53,37 +55,51 @@ def run_and_submit_all( profile: gr.OAuthProfile | None, limit: int | None):
53
  print(agent_code)
54
 
55
  # 2. Fetch Questions
56
- print(f"Fetching questions from: {questions_url}")
57
- try:
58
- response = requests.get(questions_url, timeout=15)
59
- response.raise_for_status()
60
- questions_data = response.json()
61
- if not questions_data:
62
- print("Fetched questions list is empty.")
63
- return "Fetched questions list is empty or invalid format.", None
64
- print(f"Fetched {len(questions_data)} questions.")
65
- except requests.exceptions.RequestException as e:
66
- print(f"Error fetching questions: {e}")
67
- return f"Error fetching questions: {e}", None
68
- except requests.exceptions.JSONDecodeError as e:
69
- print(f"Error decoding JSON response from questions endpoint: {e}")
70
- print(f"Response text: {response.text[:500]}")
71
- return f"Error decoding server response for questions: {e}", None
72
- except Exception as e:
73
- print(f"An unexpected error occurred fetching questions: {e}")
74
- return f"An unexpected error occurred fetching questions: {e}", None
 
 
 
 
 
 
 
 
 
75
 
76
  # 3. Run your Agent
77
  results_log = []
78
  answers_payload = []
79
  # Limit for test purposes
80
- limit = 2
81
  if limit is not None:
82
  questions_data = questions_data[:limit]
83
  print(f"Running agent on {len(questions_data)} questions...")
84
  for item in questions_data:
85
  task_id = item.get("task_id")
86
- question_text = item.get("question")
 
 
 
 
 
87
  if not task_id or question_text is None:
88
  print(f"Skipping item with missing task_id or question: {item}")
89
  continue
 
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
+ import json
6
  from phoenix.otel import register
7
  from openinference.instrumentation.smolagents import SmolagentsInstrumentor
8
  from llm_only_agent import LLMOnlyAgent
 
41
 
42
  api_url = DEFAULT_API_URL
43
  questions_url = f"{api_url}/questions"
44
+ files_url = f"{api_url}/files"
45
  submit_url = f"{api_url}/submit"
46
 
47
  # 1. Instantiate Agent ( modify this part to create your agent)
 
55
  print(agent_code)
56
 
57
  # 2. Fetch Questions
58
+ # Fetch questions locally in test mode
59
+ task_ids = []
60
+ if task_ids:
61
+ print("Fetching questions from local file")
62
+ with open("data/questions.json", "r") as f:
63
+ questions_data = json.load(f)
64
+ questions_data = [q for q in questions_data if q['task_id'] in task_ids]
65
+ # Otherwise fetch from Hugging Face API
66
+ else:
67
+ print(f"Fetching questions from: {questions_url}")
68
+ try:
69
+ response = requests.get(questions_url, timeout=15)
70
+ response.raise_for_status()
71
+ questions_data = response.json()
72
+ if not questions_data:
73
+ print("Fetched questions list is empty.")
74
+ return "Fetched questions list is empty or invalid format.", None
75
+ print(f"Fetched {len(questions_data)} questions.")
76
+ except requests.exceptions.RequestException as e:
77
+ print(f"Error fetching questions: {e}")
78
+ return f"Error fetching questions: {e}", None
79
+ except requests.exceptions.JSONDecodeError as e:
80
+ print(f"Error decoding JSON response from questions endpoint: {e}")
81
+ print(f"Response text: {response.text[:500]}")
82
+ return f"Error decoding server response for questions: {e}", None
83
+ except Exception as e:
84
+ print(f"An unexpected error occurred fetching questions: {e}")
85
+ return f"An unexpected error occurred fetching questions: {e}", None
86
 
87
  # 3. Run your Agent
88
  results_log = []
89
  answers_payload = []
90
  # Limit for test purposes
91
+ limit = None
92
  if limit is not None:
93
  questions_data = questions_data[:limit]
94
  print(f"Running agent on {len(questions_data)} questions...")
95
  for item in questions_data:
96
  task_id = item.get("task_id")
97
+ file_name = item.get("file_name")
98
+ if file_name != "":
99
+ file_path = f"{files_url}/{task_id}"
100
+ question_text = item.get("question") + "The mentionned file can be downloaded from the following link: " + file_path
101
+ else:
102
+ question_text = item.get("question")
103
  if not task_id or question_text is None:
104
  print(f"Skipping item with missing task_id or question: {item}")
105
  continue
multi_agent.py CHANGED
@@ -1,4 +1,3 @@
1
- from pydoc import describe
2
  import re
3
  from smolagents import AgentMemory, CodeAgent, InferenceClientModel, FinalAnswerTool, WebSearchTool
4
  from collections.abc import Callable
@@ -17,7 +16,7 @@ class MultiAgent:
17
 
18
  # WEB AGENT
19
  self.web_agent = CodeAgent(
20
- model=InferenceClientModel("Qwen/Qwen2.5-Coder-32B-Instruct", provider="together", max_tokens=8096),
21
  tools=[WikipediaSearchTool(), WebSearchTool(), VisitWebpageTool()],
22
  name="web_agent",
23
  description="A code agent that can search the web and visit webpages",
@@ -27,13 +26,14 @@ class MultiAgent:
27
 
28
  # MANAGER AGENT
29
  self.manager_agent = CodeAgent(
30
- model=InferenceClientModel("deepseek-ai/DeepSeek-R1", provider="together", max_tokens=8096),
31
  tools=[FinalAnswerTool()],
32
  managed_agents=[self.web_agent],
33
  instructions=self.instructions,
34
  additional_authorized_imports=[
35
  "pandas",
36
  "numpy",
 
37
  ],
38
  planning_interval=5,
39
  verbosity_level=2,
 
 
1
  import re
2
  from smolagents import AgentMemory, CodeAgent, InferenceClientModel, FinalAnswerTool, WebSearchTool
3
  from collections.abc import Callable
 
16
 
17
  # WEB AGENT
18
  self.web_agent = CodeAgent(
19
+ model=InferenceClientModel("Qwen/Qwen2.5-Coder-32B-Instruct", provider="nebius", max_tokens=8096),
20
  tools=[WikipediaSearchTool(), WebSearchTool(), VisitWebpageTool()],
21
  name="web_agent",
22
  description="A code agent that can search the web and visit webpages",
 
26
 
27
  # MANAGER AGENT
28
  self.manager_agent = CodeAgent(
29
+ model=InferenceClientModel("deepseek-ai/DeepSeek-R1", provider="nebius", max_tokens=8096),
30
  tools=[FinalAnswerTool()],
31
  managed_agents=[self.web_agent],
32
  instructions=self.instructions,
33
  additional_authorized_imports=[
34
  "pandas",
35
  "numpy",
36
+ "chess"
37
  ],
38
  planning_interval=5,
39
  verbosity_level=2,
requirements.txt CHANGED
@@ -7,3 +7,4 @@ wikipedia-api
7
  markdownify
8
  requests
9
  smolagents[telemetry,toolkit]
 
 
7
  markdownify
8
  requests
9
  smolagents[telemetry,toolkit]
10
+ chess