mgbam commited on
Commit
8c14ec2
Β·
verified Β·
1 Parent(s): 526d2dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -20
app.py CHANGED
@@ -1,21 +1,6 @@
1
  """
2
  app.py – Enterprise SQL Agent (Gradio + smolagents + MCP)
3
-
4
- Provider priority
5
- ──────────────────
6
- 1. OpenAI β†’ set OPENAI_API_KEY (override model with OPENAI_MODEL, default = gpt-4o)
7
- 2. Gemini β†’ set GOOGLE_API_KEY (override model with GOOGLE_MODEL, default = gemini-pro)
8
- 3. Hugging Face Inference fallback
9
- β€’ HF_MODEL_ID (default = microsoft/Phi-3-mini-4k-instruct)
10
- β€’ HF_API_TOKEN (only if the repo is gated)
11
-
12
- File layout
13
- ────────────
14
- app.py
15
- mcp_server.py
16
- connectors/
17
- └─ salesforce_connector.py
18
- requirements.txt
19
  """
20
 
21
  import os, pathlib, json, pprint, gradio as gr
@@ -25,13 +10,13 @@ from smolagents.models import LiteLLMModel, InferenceClientModel
25
 
26
  # ───────────────────────── 1. Choose base LLM ──────────────────────────
27
  OPENAI_KEY = os.getenv("OPENAI_API_KEY")
28
- OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o") # stable id
29
 
30
  GEMINI_KEY = os.getenv("GOOGLE_API_KEY")
31
  GEM_MODEL = os.getenv("GOOGLE_MODEL", "gemini-pro")
32
 
33
  HF_MODEL_ID = os.getenv("HF_MODEL_ID", "microsoft/Phi-3-mini-4k-instruct")
34
- HF_TOKEN = os.getenv("HF_API_TOKEN") # optional
35
 
36
  if OPENAI_KEY:
37
  BASE_MODEL = LiteLLMModel(model_id=f"openai/{OPENAI_MODEL}", api_key=OPENAI_KEY)
@@ -50,8 +35,11 @@ SERVER_PATH = pathlib.Path(__file__).with_name("mcp_server.py")
50
  def respond(message: str, history: list):
51
  """Prompt β†’ CodeAgent β†’ MCP tools β†’ string reply."""
52
  params = StdioServerParameters(command="python", args=[str(SERVER_PATH)])
53
- with MCPClient(params) as tools:
54
- answer = CodeAgent(tools=tools, model=BASE_MODEL).run(message)
 
 
 
55
 
56
  # ensure plain-text output
57
  if not isinstance(answer, str):
 
1
  """
2
  app.py – Enterprise SQL Agent (Gradio + smolagents + MCP)
3
+ HubSpot Integration Only
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
 
6
  import os, pathlib, json, pprint, gradio as gr
 
10
 
11
  # ───────────────────────── 1. Choose base LLM ──────────────────────────
12
  OPENAI_KEY = os.getenv("OPENAI_API_KEY")
13
+ OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o")
14
 
15
  GEMINI_KEY = os.getenv("GOOGLE_API_KEY")
16
  GEM_MODEL = os.getenv("GOOGLE_MODEL", "gemini-pro")
17
 
18
  HF_MODEL_ID = os.getenv("HF_MODEL_ID", "microsoft/Phi-3-mini-4k-instruct")
19
+ HF_TOKEN = os.getenv("HF_API_TOKEN")
20
 
21
  if OPENAI_KEY:
22
  BASE_MODEL = LiteLLMModel(model_id=f"openai/{OPENAI_MODEL}", api_key=OPENAI_KEY)
 
35
  def respond(message: str, history: list):
36
  """Prompt β†’ CodeAgent β†’ MCP tools β†’ string reply."""
37
  params = StdioServerParameters(command="python", args=[str(SERVER_PATH)])
38
+ try:
39
+ with MCPClient(params) as tools:
40
+ answer = CodeAgent(tools=tools, model=BASE_MODEL).run(message)
41
+ except Exception as e:
42
+ answer = f"Error while querying tools: {e}"
43
 
44
  # ensure plain-text output
45
  if not isinstance(answer, str):