Spaces:
Running
Running
adds-azure-openai (#14)
Browse files- feat: adds langchain-openai (007e1866704a0417d432df3ff84fda15f04af60d)
- feat: adds langchain-openai (f02ee48522bd188e0d993610fb40ac20fcdcf0f0)
- feat: adds langchain-openai (b31e7ff8be8e34d2eecd450933f523c277975fda)
- pyproject.toml +1 -0
- requirements-dev.txt +2 -0
- requirements.txt +2 -0
- tdagent/grchat.py +183 -24
- uv.lock +52 -0
pyproject.toml
CHANGED
|
@@ -20,6 +20,7 @@ dependencies = [
|
|
| 20 |
"langchain-aws>=0.2.24",
|
| 21 |
"langchain-huggingface>=0.2.0",
|
| 22 |
"langchain-mcp-adapters>=0.1.1",
|
|
|
|
| 23 |
"langgraph>=0.4.7",
|
| 24 |
"openai>=1.84.0",
|
| 25 |
]
|
|
|
|
| 20 |
"langchain-aws>=0.2.24",
|
| 21 |
"langchain-huggingface>=0.2.0",
|
| 22 |
"langchain-mcp-adapters>=0.1.1",
|
| 23 |
+
"langchain-openai>=0.3.19",
|
| 24 |
"langgraph>=0.4.7",
|
| 25 |
"openai>=1.84.0",
|
| 26 |
]
|
requirements-dev.txt
CHANGED
|
@@ -52,6 +52,7 @@ langchain-aws==0.2.24
|
|
| 52 |
langchain-core==0.3.63
|
| 53 |
langchain-huggingface==0.2.0
|
| 54 |
langchain-mcp-adapters==0.1.1
|
|
|
|
| 55 |
langgraph==0.4.7
|
| 56 |
langgraph-checkpoint==2.0.26
|
| 57 |
langgraph-prebuilt==0.2.2
|
|
@@ -140,6 +141,7 @@ starlette==0.46.2
|
|
| 140 |
sympy==1.14.0
|
| 141 |
tenacity==9.1.2
|
| 142 |
threadpoolctl==3.6.0
|
|
|
|
| 143 |
tokenizers==0.21.1
|
| 144 |
toml==0.10.2
|
| 145 |
tomli==2.2.1 ; python_full_version <= '3.11'
|
|
|
|
| 52 |
langchain-core==0.3.63
|
| 53 |
langchain-huggingface==0.2.0
|
| 54 |
langchain-mcp-adapters==0.1.1
|
| 55 |
+
langchain-openai==0.3.19
|
| 56 |
langgraph==0.4.7
|
| 57 |
langgraph-checkpoint==2.0.26
|
| 58 |
langgraph-prebuilt==0.2.2
|
|
|
|
| 141 |
sympy==1.14.0
|
| 142 |
tenacity==9.1.2
|
| 143 |
threadpoolctl==3.6.0
|
| 144 |
+
tiktoken==0.9.0
|
| 145 |
tokenizers==0.21.1
|
| 146 |
toml==0.10.2
|
| 147 |
tomli==2.2.1 ; python_full_version <= '3.11'
|
requirements.txt
CHANGED
|
@@ -45,6 +45,7 @@ langchain-aws==0.2.24
|
|
| 45 |
langchain-core==0.3.63
|
| 46 |
langchain-huggingface==0.2.0
|
| 47 |
langchain-mcp-adapters==0.1.1
|
|
|
|
| 48 |
langgraph==0.4.7
|
| 49 |
langgraph-checkpoint==2.0.26
|
| 50 |
langgraph-prebuilt==0.2.2
|
|
@@ -117,6 +118,7 @@ starlette==0.46.2
|
|
| 117 |
sympy==1.14.0
|
| 118 |
tenacity==9.1.2
|
| 119 |
threadpoolctl==3.6.0
|
|
|
|
| 120 |
tokenizers==0.21.1
|
| 121 |
tomli==2.2.1 ; python_full_version <= '3.11'
|
| 122 |
tomlkit==0.13.2
|
|
|
|
| 45 |
langchain-core==0.3.63
|
| 46 |
langchain-huggingface==0.2.0
|
| 47 |
langchain-mcp-adapters==0.1.1
|
| 48 |
+
langchain-openai==0.3.19
|
| 49 |
langgraph==0.4.7
|
| 50 |
langgraph-checkpoint==2.0.26
|
| 51 |
langgraph-prebuilt==0.2.2
|
|
|
|
| 118 |
sympy==1.14.0
|
| 119 |
tenacity==9.1.2
|
| 120 |
threadpoolctl==3.6.0
|
| 121 |
+
tiktoken==0.9.0
|
| 122 |
tokenizers==0.21.1
|
| 123 |
tomli==2.2.1 ; python_full_version <= '3.11'
|
| 124 |
tomlkit==0.13.2
|
tdagent/grchat.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
|
|
|
| 3 |
from collections import OrderedDict
|
| 4 |
from collections.abc import Mapping, Sequence
|
| 5 |
from types import MappingProxyType
|
|
@@ -14,6 +15,7 @@ from langchain_aws import ChatBedrock
|
|
| 14 |
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
| 15 |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
| 16 |
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
|
|
| 17 |
from langgraph.prebuilt import create_react_agent
|
| 18 |
from openai import OpenAI
|
| 19 |
from openai.types.chat import ChatCompletion
|
|
@@ -77,6 +79,14 @@ MODEL_OPTIONS = OrderedDict( # Initialize with tuples to preserve options order
|
|
| 77 |
# ),
|
| 78 |
},
|
| 79 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
),
|
| 81 |
)
|
| 82 |
|
|
@@ -128,12 +138,15 @@ def create_bedrock_llm(
|
|
| 128 |
def create_hf_llm(
|
| 129 |
hf_model_id: str,
|
| 130 |
huggingfacehub_api_token: str | None = None,
|
|
|
|
|
|
|
| 131 |
) -> tuple[ChatHuggingFace | None, str]:
|
| 132 |
"""Create a LangGraph Hugging Face agent."""
|
| 133 |
try:
|
| 134 |
llm = HuggingFaceEndpoint(
|
| 135 |
model=hf_model_id,
|
| 136 |
-
temperature=
|
|
|
|
| 137 |
task="text-generation",
|
| 138 |
huggingfacehub_api_token=huggingfacehub_api_token,
|
| 139 |
)
|
|
@@ -166,6 +179,34 @@ def create_openai_llm(
|
|
| 166 |
return llm, ""
|
| 167 |
|
| 168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
#### UI functionality ####
|
| 170 |
async def gr_connect_to_bedrock( # noqa: PLR0913
|
| 171 |
model_id: str,
|
|
@@ -230,11 +271,18 @@ async def gr_connect_to_hf(
|
|
| 230 |
model_id: str,
|
| 231 |
hf_access_token_textbox: str | None,
|
| 232 |
mcp_servers: Sequence[MutableCheckBoxGroupEntry] | None,
|
|
|
|
|
|
|
| 233 |
) -> str:
|
| 234 |
"""Initialize Hugging Face agent."""
|
| 235 |
global llm_agent # noqa: PLW0603
|
| 236 |
|
| 237 |
-
llm, error = create_hf_llm(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
|
| 239 |
if llm is None:
|
| 240 |
return f"❌ Connection failed: {error}"
|
|
@@ -260,6 +308,51 @@ async def gr_connect_to_hf(
|
|
| 260 |
return "✅ Successfully connected to Hugging Face!"
|
| 261 |
|
| 262 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
async def gr_connect_to_nebius(
|
| 264 |
model_id: str,
|
| 265 |
nebius_access_token_textbox: str,
|
|
@@ -334,6 +427,9 @@ def toggle_model_fields(
|
|
| 334 |
dict[str, Any],
|
| 335 |
dict[str, Any],
|
| 336 |
dict[str, Any],
|
|
|
|
|
|
|
|
|
|
| 337 |
]: # ignore: F821
|
| 338 |
"""Toggle visibility of model fields based on the selected provider."""
|
| 339 |
# Update model choices based on the selected provider
|
|
@@ -351,6 +447,8 @@ def toggle_model_fields(
|
|
| 351 |
# Visibility settings for fields specific to each provider
|
| 352 |
is_aws = provider == "AWS Bedrock"
|
| 353 |
is_hf = provider == "HuggingFace"
|
|
|
|
|
|
|
| 354 |
return (
|
| 355 |
model_pretty,
|
| 356 |
gr.update(visible=is_aws, interactive=is_aws),
|
|
@@ -358,43 +456,62 @@ def toggle_model_fields(
|
|
| 358 |
gr.update(visible=is_aws, interactive=is_aws),
|
| 359 |
gr.update(visible=is_aws, interactive=is_aws),
|
| 360 |
gr.update(visible=is_hf, interactive=is_hf),
|
|
|
|
|
|
|
|
|
|
| 361 |
)
|
| 362 |
|
| 363 |
|
| 364 |
async def update_connection_status( # noqa: PLR0913
|
| 365 |
provider: str,
|
| 366 |
-
|
| 367 |
mcp_list_state: Sequence[MutableCheckBoxGroupEntry] | None,
|
| 368 |
aws_access_key_textbox: str,
|
| 369 |
aws_secret_key_textbox: str,
|
| 370 |
aws_session_token_textbox: str,
|
| 371 |
aws_region_dropdown: str,
|
| 372 |
hf_token: str,
|
|
|
|
|
|
|
|
|
|
| 373 |
temperature: float,
|
| 374 |
max_tokens: int,
|
| 375 |
) -> str:
|
| 376 |
"""Update the connection status based on the selected provider and model."""
|
| 377 |
-
if not provider or not
|
| 378 |
return "❌ Please select a provider and model."
|
| 379 |
-
|
| 380 |
-
model_id = MODEL_OPTIONS.get(provider, {}).get(pretty_model)
|
| 381 |
connection = "❌ Invalid provider"
|
| 382 |
-
if
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
|
| 399 |
return connection
|
| 400 |
|
|
@@ -468,13 +585,39 @@ with (
|
|
| 468 |
placeholder="Enter your Hugging Face Access Token",
|
| 469 |
visible=False,
|
| 470 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 471 |
|
| 472 |
with gr.Accordion("🧠 Model Configuration", open=True):
|
| 473 |
model_display_id = gr.Dropdown(
|
| 474 |
-
label="Select Model
|
| 475 |
choices=[],
|
| 476 |
visible=False,
|
| 477 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 478 |
model_provider.change(
|
| 479 |
toggle_model_fields,
|
| 480 |
inputs=[model_provider],
|
|
@@ -485,8 +628,21 @@ with (
|
|
| 485 |
aws_session_token_textbox,
|
| 486 |
aws_region_dropdown,
|
| 487 |
hf_token,
|
|
|
|
|
|
|
|
|
|
| 488 |
],
|
| 489 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 490 |
# Initialize the temperature and max tokens based on model specifications
|
| 491 |
temperature = gr.Slider(
|
| 492 |
label="Temperature",
|
|
@@ -510,13 +666,16 @@ with (
|
|
| 510 |
update_connection_status,
|
| 511 |
inputs=[
|
| 512 |
model_provider,
|
| 513 |
-
|
| 514 |
mcp_list.state,
|
| 515 |
aws_access_key_textbox,
|
| 516 |
aws_secret_key_textbox,
|
| 517 |
aws_session_token_textbox,
|
| 518 |
aws_region_dropdown,
|
| 519 |
hf_token,
|
|
|
|
|
|
|
|
|
|
| 520 |
temperature,
|
| 521 |
max_tokens,
|
| 522 |
],
|
|
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
| 3 |
+
import os
|
| 4 |
from collections import OrderedDict
|
| 5 |
from collections.abc import Mapping, Sequence
|
| 6 |
from types import MappingProxyType
|
|
|
|
| 15 |
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
| 16 |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
| 17 |
from langchain_mcp_adapters.client import MultiServerMCPClient
|
| 18 |
+
from langchain_openai import AzureChatOpenAI
|
| 19 |
from langgraph.prebuilt import create_react_agent
|
| 20 |
from openai import OpenAI
|
| 21 |
from openai.types.chat import ChatCompletion
|
|
|
|
| 79 |
# ),
|
| 80 |
},
|
| 81 |
),
|
| 82 |
+
(
|
| 83 |
+
"Azure OpenAI",
|
| 84 |
+
{
|
| 85 |
+
"GPT-4o": ("ggpt-4o-global-standard"),
|
| 86 |
+
"GPT-4o Mini": ("o4-mini"),
|
| 87 |
+
"GPT-4.5 Preview": ("gpt-4.5-preview"),
|
| 88 |
+
},
|
| 89 |
+
),
|
| 90 |
),
|
| 91 |
)
|
| 92 |
|
|
|
|
| 138 |
def create_hf_llm(
|
| 139 |
hf_model_id: str,
|
| 140 |
huggingfacehub_api_token: str | None = None,
|
| 141 |
+
temperature: float = 0.8,
|
| 142 |
+
max_tokens: int = 512,
|
| 143 |
) -> tuple[ChatHuggingFace | None, str]:
|
| 144 |
"""Create a LangGraph Hugging Face agent."""
|
| 145 |
try:
|
| 146 |
llm = HuggingFaceEndpoint(
|
| 147 |
model=hf_model_id,
|
| 148 |
+
temperature=temperature,
|
| 149 |
+
max_new_tokens=max_tokens,
|
| 150 |
task="text-generation",
|
| 151 |
huggingfacehub_api_token=huggingfacehub_api_token,
|
| 152 |
)
|
|
|
|
| 179 |
return llm, ""
|
| 180 |
|
| 181 |
|
| 182 |
+
def create_azure_llm(
|
| 183 |
+
model_id: str,
|
| 184 |
+
api_version: str,
|
| 185 |
+
endpoint: str,
|
| 186 |
+
token_id: str,
|
| 187 |
+
temperature: float = 0.8,
|
| 188 |
+
max_tokens: int = 512,
|
| 189 |
+
) -> tuple[AzureChatOpenAI | None, str]:
|
| 190 |
+
"""Create a LangGraph Azure OpenAI agent."""
|
| 191 |
+
try:
|
| 192 |
+
os.environ["AZURE_OPENAI_ENDPOINT"] = endpoint
|
| 193 |
+
os.environ["AZURE_OPENAI_API_KEY"] = token_id
|
| 194 |
+
if "o4-mini" in model_id:
|
| 195 |
+
kwargs = {"max_completion_tokens": max_tokens}
|
| 196 |
+
else:
|
| 197 |
+
kwargs = {"max_tokens": max_tokens}
|
| 198 |
+
llm = AzureChatOpenAI(
|
| 199 |
+
azure_deployment=model_id,
|
| 200 |
+
api_key=token_id,
|
| 201 |
+
api_version=api_version,
|
| 202 |
+
temperature=temperature,
|
| 203 |
+
**kwargs,
|
| 204 |
+
)
|
| 205 |
+
except Exception as e: # noqa: BLE001
|
| 206 |
+
return None, str(e)
|
| 207 |
+
return llm, ""
|
| 208 |
+
|
| 209 |
+
|
| 210 |
#### UI functionality ####
|
| 211 |
async def gr_connect_to_bedrock( # noqa: PLR0913
|
| 212 |
model_id: str,
|
|
|
|
| 271 |
model_id: str,
|
| 272 |
hf_access_token_textbox: str | None,
|
| 273 |
mcp_servers: Sequence[MutableCheckBoxGroupEntry] | None,
|
| 274 |
+
temperature: float = 0.8,
|
| 275 |
+
max_tokens: int = 512,
|
| 276 |
) -> str:
|
| 277 |
"""Initialize Hugging Face agent."""
|
| 278 |
global llm_agent # noqa: PLW0603
|
| 279 |
|
| 280 |
+
llm, error = create_hf_llm(
|
| 281 |
+
model_id,
|
| 282 |
+
hf_access_token_textbox,
|
| 283 |
+
temperature=temperature,
|
| 284 |
+
max_tokens=max_tokens,
|
| 285 |
+
)
|
| 286 |
|
| 287 |
if llm is None:
|
| 288 |
return f"❌ Connection failed: {error}"
|
|
|
|
| 308 |
return "✅ Successfully connected to Hugging Face!"
|
| 309 |
|
| 310 |
|
| 311 |
+
async def gr_connect_to_azure(
|
| 312 |
+
model_id: str,
|
| 313 |
+
azure_endpoint: str,
|
| 314 |
+
api_key: str,
|
| 315 |
+
api_version: str,
|
| 316 |
+
mcp_servers: Sequence[MutableCheckBoxGroupEntry] | None,
|
| 317 |
+
temperature: float = 0.8,
|
| 318 |
+
max_tokens: int = 512,
|
| 319 |
+
) -> str:
|
| 320 |
+
"""Initialize Hugging Face agent."""
|
| 321 |
+
global llm_agent # noqa: PLW0603
|
| 322 |
+
|
| 323 |
+
llm, error = create_azure_llm(
|
| 324 |
+
model_id,
|
| 325 |
+
api_version=api_version,
|
| 326 |
+
endpoint=azure_endpoint,
|
| 327 |
+
token_id=api_key,
|
| 328 |
+
temperature=temperature,
|
| 329 |
+
max_tokens=max_tokens,
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
if llm is None:
|
| 333 |
+
return f"❌ Connection failed: {error}"
|
| 334 |
+
tools = []
|
| 335 |
+
if mcp_servers:
|
| 336 |
+
client = MultiServerMCPClient(
|
| 337 |
+
{
|
| 338 |
+
server.name.replace(" ", "-"): {
|
| 339 |
+
"url": server.value,
|
| 340 |
+
"transport": "sse",
|
| 341 |
+
}
|
| 342 |
+
for server in mcp_servers
|
| 343 |
+
},
|
| 344 |
+
)
|
| 345 |
+
tools = await client.get_tools()
|
| 346 |
+
|
| 347 |
+
llm_agent = create_react_agent(
|
| 348 |
+
model=llm,
|
| 349 |
+
tools=tools,
|
| 350 |
+
prompt=SYSTEM_MESSAGE,
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
return "✅ Successfully connected to Azure OpenAI!"
|
| 354 |
+
|
| 355 |
+
|
| 356 |
async def gr_connect_to_nebius(
|
| 357 |
model_id: str,
|
| 358 |
nebius_access_token_textbox: str,
|
|
|
|
| 427 |
dict[str, Any],
|
| 428 |
dict[str, Any],
|
| 429 |
dict[str, Any],
|
| 430 |
+
dict[str, Any],
|
| 431 |
+
dict[str, Any],
|
| 432 |
+
dict[str, Any],
|
| 433 |
]: # ignore: F821
|
| 434 |
"""Toggle visibility of model fields based on the selected provider."""
|
| 435 |
# Update model choices based on the selected provider
|
|
|
|
| 447 |
# Visibility settings for fields specific to each provider
|
| 448 |
is_aws = provider == "AWS Bedrock"
|
| 449 |
is_hf = provider == "HuggingFace"
|
| 450 |
+
is_azure = provider == "Azure OpenAI"
|
| 451 |
+
# is_nebius = provider == "Nebius"
|
| 452 |
return (
|
| 453 |
model_pretty,
|
| 454 |
gr.update(visible=is_aws, interactive=is_aws),
|
|
|
|
| 456 |
gr.update(visible=is_aws, interactive=is_aws),
|
| 457 |
gr.update(visible=is_aws, interactive=is_aws),
|
| 458 |
gr.update(visible=is_hf, interactive=is_hf),
|
| 459 |
+
gr.update(visible=is_azure, interactive=is_azure),
|
| 460 |
+
gr.update(visible=is_azure, interactive=is_azure),
|
| 461 |
+
gr.update(visible=is_azure, interactive=is_azure),
|
| 462 |
)
|
| 463 |
|
| 464 |
|
| 465 |
async def update_connection_status( # noqa: PLR0913
|
| 466 |
provider: str,
|
| 467 |
+
model_id: str,
|
| 468 |
mcp_list_state: Sequence[MutableCheckBoxGroupEntry] | None,
|
| 469 |
aws_access_key_textbox: str,
|
| 470 |
aws_secret_key_textbox: str,
|
| 471 |
aws_session_token_textbox: str,
|
| 472 |
aws_region_dropdown: str,
|
| 473 |
hf_token: str,
|
| 474 |
+
azure_endpoint: str,
|
| 475 |
+
azure_api_token: str,
|
| 476 |
+
azure_api_version: str,
|
| 477 |
temperature: float,
|
| 478 |
max_tokens: int,
|
| 479 |
) -> str:
|
| 480 |
"""Update the connection status based on the selected provider and model."""
|
| 481 |
+
if not provider or not model_id:
|
| 482 |
return "❌ Please select a provider and model."
|
|
|
|
|
|
|
| 483 |
connection = "❌ Invalid provider"
|
| 484 |
+
if provider == "AWS Bedrock":
|
| 485 |
+
connection = await gr_connect_to_bedrock(
|
| 486 |
+
model_id,
|
| 487 |
+
aws_access_key_textbox,
|
| 488 |
+
aws_secret_key_textbox,
|
| 489 |
+
aws_session_token_textbox,
|
| 490 |
+
aws_region_dropdown,
|
| 491 |
+
mcp_list_state,
|
| 492 |
+
temperature,
|
| 493 |
+
max_tokens,
|
| 494 |
+
)
|
| 495 |
+
elif provider == "HuggingFace":
|
| 496 |
+
connection = await gr_connect_to_hf(
|
| 497 |
+
model_id,
|
| 498 |
+
hf_token,
|
| 499 |
+
mcp_list_state,
|
| 500 |
+
temperature,
|
| 501 |
+
max_tokens,
|
| 502 |
+
)
|
| 503 |
+
elif provider == "Azure OpenAI":
|
| 504 |
+
connection = await gr_connect_to_azure(
|
| 505 |
+
model_id,
|
| 506 |
+
azure_endpoint,
|
| 507 |
+
azure_api_token,
|
| 508 |
+
azure_api_version,
|
| 509 |
+
mcp_list_state,
|
| 510 |
+
temperature,
|
| 511 |
+
max_tokens,
|
| 512 |
+
)
|
| 513 |
+
elif provider == "Nebius":
|
| 514 |
+
connection = await gr_connect_to_nebius(model_id, hf_token, mcp_list_state)
|
| 515 |
|
| 516 |
return connection
|
| 517 |
|
|
|
|
| 585 |
placeholder="Enter your Hugging Face Access Token",
|
| 586 |
visible=False,
|
| 587 |
)
|
| 588 |
+
azure_endpoint = gr.Textbox(
|
| 589 |
+
label="Azure OpenAI Endpoint",
|
| 590 |
+
type="text",
|
| 591 |
+
placeholder="Enter your Azure OpenAI Endpoint",
|
| 592 |
+
visible=False,
|
| 593 |
+
)
|
| 594 |
+
azure_api_token = gr.Textbox(
|
| 595 |
+
label="Azure Access Token",
|
| 596 |
+
type="password",
|
| 597 |
+
placeholder="Enter your Azure OpenAI Access Token",
|
| 598 |
+
visible=False,
|
| 599 |
+
)
|
| 600 |
+
azure_api_version = gr.Textbox(
|
| 601 |
+
label="Azure OpenAI API Version",
|
| 602 |
+
type="text",
|
| 603 |
+
placeholder="Enter your Azure OpenAI API Version",
|
| 604 |
+
value="2024-12-01-preview",
|
| 605 |
+
visible=False,
|
| 606 |
+
)
|
| 607 |
|
| 608 |
with gr.Accordion("🧠 Model Configuration", open=True):
|
| 609 |
model_display_id = gr.Dropdown(
|
| 610 |
+
label="Select Model from the list",
|
| 611 |
choices=[],
|
| 612 |
visible=False,
|
| 613 |
)
|
| 614 |
+
model_id_textbox = gr.Textbox(
|
| 615 |
+
label="Model ID",
|
| 616 |
+
type="text",
|
| 617 |
+
placeholder="Enter the model ID",
|
| 618 |
+
visible=False,
|
| 619 |
+
interactive=True,
|
| 620 |
+
)
|
| 621 |
model_provider.change(
|
| 622 |
toggle_model_fields,
|
| 623 |
inputs=[model_provider],
|
|
|
|
| 628 |
aws_session_token_textbox,
|
| 629 |
aws_region_dropdown,
|
| 630 |
hf_token,
|
| 631 |
+
azure_endpoint,
|
| 632 |
+
azure_api_token,
|
| 633 |
+
azure_api_version,
|
| 634 |
],
|
| 635 |
)
|
| 636 |
+
model_display_id.change(
|
| 637 |
+
lambda x, y: gr.update(
|
| 638 |
+
value=MODEL_OPTIONS.get(y, {}).get(x),
|
| 639 |
+
visible=True,
|
| 640 |
+
)
|
| 641 |
+
if x
|
| 642 |
+
else model_id_textbox.value,
|
| 643 |
+
inputs=[model_display_id, model_provider],
|
| 644 |
+
outputs=[model_id_textbox],
|
| 645 |
+
)
|
| 646 |
# Initialize the temperature and max tokens based on model specifications
|
| 647 |
temperature = gr.Slider(
|
| 648 |
label="Temperature",
|
|
|
|
| 666 |
update_connection_status,
|
| 667 |
inputs=[
|
| 668 |
model_provider,
|
| 669 |
+
model_id_textbox,
|
| 670 |
mcp_list.state,
|
| 671 |
aws_access_key_textbox,
|
| 672 |
aws_secret_key_textbox,
|
| 673 |
aws_session_token_textbox,
|
| 674 |
aws_region_dropdown,
|
| 675 |
hf_token,
|
| 676 |
+
azure_endpoint,
|
| 677 |
+
azure_api_token,
|
| 678 |
+
azure_api_version,
|
| 679 |
temperature,
|
| 680 |
max_tokens,
|
| 681 |
],
|
uv.lock
CHANGED
|
@@ -1048,6 +1048,20 @@ wheels = [
|
|
| 1048 |
{ url = "https://files.pythonhosted.org/packages/30/68/13405f252b38a8e1bd7ef345907a4e0eda535c2ca36fe4d6821fc7e9f5de/langchain_mcp_adapters-0.1.1-py3-none-any.whl", hash = "sha256:81594b265d824012040ebd24056fbdb5aabf0b46f780e369ed132421e3411e4d", size = 12100, upload-time = "2025-05-20T14:20:34.236Z" },
|
| 1049 |
]
|
| 1050 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1051 |
[[package]]
|
| 1052 |
name = "langgraph"
|
| 1053 |
version = "0.4.7"
|
|
@@ -2853,6 +2867,7 @@ dependencies = [
|
|
| 2853 |
{ name = "langchain-aws" },
|
| 2854 |
{ name = "langchain-huggingface" },
|
| 2855 |
{ name = "langchain-mcp-adapters" },
|
|
|
|
| 2856 |
{ name = "langgraph" },
|
| 2857 |
{ name = "openai" },
|
| 2858 |
]
|
|
@@ -2880,6 +2895,7 @@ requires-dist = [
|
|
| 2880 |
{ name = "langchain-aws", specifier = ">=0.2.24" },
|
| 2881 |
{ name = "langchain-huggingface", specifier = ">=0.2.0" },
|
| 2882 |
{ name = "langchain-mcp-adapters", specifier = ">=0.1.1" },
|
|
|
|
| 2883 |
{ name = "langgraph", specifier = ">=0.4.7" },
|
| 2884 |
{ name = "openai", specifier = ">=1.84.0" },
|
| 2885 |
]
|
|
@@ -2916,6 +2932,42 @@ wheels = [
|
|
| 2916 |
{ url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" },
|
| 2917 |
]
|
| 2918 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2919 |
[[package]]
|
| 2920 |
name = "tokenizers"
|
| 2921 |
version = "0.21.1"
|
|
|
|
| 1048 |
{ url = "https://files.pythonhosted.org/packages/30/68/13405f252b38a8e1bd7ef345907a4e0eda535c2ca36fe4d6821fc7e9f5de/langchain_mcp_adapters-0.1.1-py3-none-any.whl", hash = "sha256:81594b265d824012040ebd24056fbdb5aabf0b46f780e369ed132421e3411e4d", size = 12100, upload-time = "2025-05-20T14:20:34.236Z" },
|
| 1049 |
]
|
| 1050 |
|
| 1051 |
+
[[package]]
|
| 1052 |
+
name = "langchain-openai"
|
| 1053 |
+
version = "0.3.19"
|
| 1054 |
+
source = { registry = "https://pypi.org/simple" }
|
| 1055 |
+
dependencies = [
|
| 1056 |
+
{ name = "langchain-core" },
|
| 1057 |
+
{ name = "openai" },
|
| 1058 |
+
{ name = "tiktoken" },
|
| 1059 |
+
]
|
| 1060 |
+
sdist = { url = "https://files.pythonhosted.org/packages/5c/aa/4622a8c722f7bbd5261c8492d805165d845bc3212eca16b156fa48ce5626/langchain_openai-0.3.19.tar.gz", hash = "sha256:2ff103f272d01694aef650dfe0dc64525481b89f7f9e61f5e3ef8eb21da9f5fe", size = 544254, upload-time = "2025-06-02T17:57:29.34Z" }
|
| 1061 |
+
wheels = [
|
| 1062 |
+
{ url = "https://files.pythonhosted.org/packages/2d/30/1dd3bebaccdce52afc0139f4733b31d01986b42d53e4ac1a919eca9531d8/langchain_openai-0.3.19-py3-none-any.whl", hash = "sha256:0ffb9eb86e1d25909a8e406e7e1fead3bd2e8d74a7e6daa74bacf2c6971e8b99", size = 64463, upload-time = "2025-06-02T17:57:27.938Z" },
|
| 1063 |
+
]
|
| 1064 |
+
|
| 1065 |
[[package]]
|
| 1066 |
name = "langgraph"
|
| 1067 |
version = "0.4.7"
|
|
|
|
| 2867 |
{ name = "langchain-aws" },
|
| 2868 |
{ name = "langchain-huggingface" },
|
| 2869 |
{ name = "langchain-mcp-adapters" },
|
| 2870 |
+
{ name = "langchain-openai" },
|
| 2871 |
{ name = "langgraph" },
|
| 2872 |
{ name = "openai" },
|
| 2873 |
]
|
|
|
|
| 2895 |
{ name = "langchain-aws", specifier = ">=0.2.24" },
|
| 2896 |
{ name = "langchain-huggingface", specifier = ">=0.2.0" },
|
| 2897 |
{ name = "langchain-mcp-adapters", specifier = ">=0.1.1" },
|
| 2898 |
+
{ name = "langchain-openai", specifier = ">=0.3.19" },
|
| 2899 |
{ name = "langgraph", specifier = ">=0.4.7" },
|
| 2900 |
{ name = "openai", specifier = ">=1.84.0" },
|
| 2901 |
]
|
|
|
|
| 2932 |
{ url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" },
|
| 2933 |
]
|
| 2934 |
|
| 2935 |
+
[[package]]
|
| 2936 |
+
name = "tiktoken"
|
| 2937 |
+
version = "0.9.0"
|
| 2938 |
+
source = { registry = "https://pypi.org/simple" }
|
| 2939 |
+
dependencies = [
|
| 2940 |
+
{ name = "regex" },
|
| 2941 |
+
{ name = "requests" },
|
| 2942 |
+
]
|
| 2943 |
+
sdist = { url = "https://files.pythonhosted.org/packages/ea/cf/756fedf6981e82897f2d570dd25fa597eb3f4459068ae0572d7e888cfd6f/tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d", size = 35991, upload-time = "2025-02-14T06:03:01.003Z" }
|
| 2944 |
+
wheels = [
|
| 2945 |
+
{ url = "https://files.pythonhosted.org/packages/64/f3/50ec5709fad61641e4411eb1b9ac55b99801d71f1993c29853f256c726c9/tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382", size = 1065770, upload-time = "2025-02-14T06:02:01.251Z" },
|
| 2946 |
+
{ url = "https://files.pythonhosted.org/packages/d6/f8/5a9560a422cf1755b6e0a9a436e14090eeb878d8ec0f80e0cd3d45b78bf4/tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108", size = 1009314, upload-time = "2025-02-14T06:02:02.869Z" },
|
| 2947 |
+
{ url = "https://files.pythonhosted.org/packages/bc/20/3ed4cfff8f809cb902900ae686069e029db74567ee10d017cb254df1d598/tiktoken-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0968d5beeafbca2a72c595e8385a1a1f8af58feaebb02b227229b69ca5357fd", size = 1143140, upload-time = "2025-02-14T06:02:04.165Z" },
|
| 2948 |
+
{ url = "https://files.pythonhosted.org/packages/f1/95/cc2c6d79df8f113bdc6c99cdec985a878768120d87d839a34da4bd3ff90a/tiktoken-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a5fb085a6a3b7350b8fc838baf493317ca0e17bd95e8642f95fc69ecfed1de", size = 1197860, upload-time = "2025-02-14T06:02:06.268Z" },
|
| 2949 |
+
{ url = "https://files.pythonhosted.org/packages/c7/6c/9c1a4cc51573e8867c9381db1814223c09ebb4716779c7f845d48688b9c8/tiktoken-0.9.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15a2752dea63d93b0332fb0ddb05dd909371ededa145fe6a3242f46724fa7990", size = 1259661, upload-time = "2025-02-14T06:02:08.889Z" },
|
| 2950 |
+
{ url = "https://files.pythonhosted.org/packages/cd/4c/22eb8e9856a2b1808d0a002d171e534eac03f96dbe1161978d7389a59498/tiktoken-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:26113fec3bd7a352e4b33dbaf1bd8948de2507e30bd95a44e2b1156647bc01b4", size = 894026, upload-time = "2025-02-14T06:02:12.841Z" },
|
| 2951 |
+
{ url = "https://files.pythonhosted.org/packages/4d/ae/4613a59a2a48e761c5161237fc850eb470b4bb93696db89da51b79a871f1/tiktoken-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f32cc56168eac4851109e9b5d327637f15fd662aa30dd79f964b7c39fbadd26e", size = 1065987, upload-time = "2025-02-14T06:02:14.174Z" },
|
| 2952 |
+
{ url = "https://files.pythonhosted.org/packages/3f/86/55d9d1f5b5a7e1164d0f1538a85529b5fcba2b105f92db3622e5d7de6522/tiktoken-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:45556bc41241e5294063508caf901bf92ba52d8ef9222023f83d2483a3055348", size = 1009155, upload-time = "2025-02-14T06:02:15.384Z" },
|
| 2953 |
+
{ url = "https://files.pythonhosted.org/packages/03/58/01fb6240df083b7c1916d1dcb024e2b761213c95d576e9f780dfb5625a76/tiktoken-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03935988a91d6d3216e2ec7c645afbb3d870b37bcb67ada1943ec48678e7ee33", size = 1142898, upload-time = "2025-02-14T06:02:16.666Z" },
|
| 2954 |
+
{ url = "https://files.pythonhosted.org/packages/b1/73/41591c525680cd460a6becf56c9b17468d3711b1df242c53d2c7b2183d16/tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3d80aad8d2c6b9238fc1a5524542087c52b860b10cbf952429ffb714bc1136", size = 1197535, upload-time = "2025-02-14T06:02:18.595Z" },
|
| 2955 |
+
{ url = "https://files.pythonhosted.org/packages/7d/7c/1069f25521c8f01a1a182f362e5c8e0337907fae91b368b7da9c3e39b810/tiktoken-0.9.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b2a21133be05dc116b1d0372af051cd2c6aa1d2188250c9b553f9fa49301b336", size = 1259548, upload-time = "2025-02-14T06:02:20.729Z" },
|
| 2956 |
+
{ url = "https://files.pythonhosted.org/packages/6f/07/c67ad1724b8e14e2b4c8cca04b15da158733ac60136879131db05dda7c30/tiktoken-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:11a20e67fdf58b0e2dea7b8654a288e481bb4fc0289d3ad21291f8d0849915fb", size = 893895, upload-time = "2025-02-14T06:02:22.67Z" },
|
| 2957 |
+
{ url = "https://files.pythonhosted.org/packages/cf/e5/21ff33ecfa2101c1bb0f9b6df750553bd873b7fb532ce2cb276ff40b197f/tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03", size = 1065073, upload-time = "2025-02-14T06:02:24.768Z" },
|
| 2958 |
+
{ url = "https://files.pythonhosted.org/packages/8e/03/a95e7b4863ee9ceec1c55983e4cc9558bcfd8f4f80e19c4f8a99642f697d/tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210", size = 1008075, upload-time = "2025-02-14T06:02:26.92Z" },
|
| 2959 |
+
{ url = "https://files.pythonhosted.org/packages/40/10/1305bb02a561595088235a513ec73e50b32e74364fef4de519da69bc8010/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794", size = 1140754, upload-time = "2025-02-14T06:02:28.124Z" },
|
| 2960 |
+
{ url = "https://files.pythonhosted.org/packages/1b/40/da42522018ca496432ffd02793c3a72a739ac04c3794a4914570c9bb2925/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22", size = 1196678, upload-time = "2025-02-14T06:02:29.845Z" },
|
| 2961 |
+
{ url = "https://files.pythonhosted.org/packages/5c/41/1e59dddaae270ba20187ceb8aa52c75b24ffc09f547233991d5fd822838b/tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2", size = 1259283, upload-time = "2025-02-14T06:02:33.838Z" },
|
| 2962 |
+
{ url = "https://files.pythonhosted.org/packages/5b/64/b16003419a1d7728d0d8c0d56a4c24325e7b10a21a9dd1fc0f7115c02f0a/tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16", size = 894897, upload-time = "2025-02-14T06:02:36.265Z" },
|
| 2963 |
+
{ url = "https://files.pythonhosted.org/packages/7a/11/09d936d37f49f4f494ffe660af44acd2d99eb2429d60a57c71318af214e0/tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb", size = 1064919, upload-time = "2025-02-14T06:02:37.494Z" },
|
| 2964 |
+
{ url = "https://files.pythonhosted.org/packages/80/0e/f38ba35713edb8d4197ae602e80837d574244ced7fb1b6070b31c29816e0/tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63", size = 1007877, upload-time = "2025-02-14T06:02:39.516Z" },
|
| 2965 |
+
{ url = "https://files.pythonhosted.org/packages/fe/82/9197f77421e2a01373e27a79dd36efdd99e6b4115746ecc553318ecafbf0/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01", size = 1140095, upload-time = "2025-02-14T06:02:41.791Z" },
|
| 2966 |
+
{ url = "https://files.pythonhosted.org/packages/f2/bb/4513da71cac187383541facd0291c4572b03ec23c561de5811781bbd988f/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139", size = 1195649, upload-time = "2025-02-14T06:02:43Z" },
|
| 2967 |
+
{ url = "https://files.pythonhosted.org/packages/fa/5c/74e4c137530dd8504e97e3a41729b1103a4ac29036cbfd3250b11fd29451/tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a", size = 1258465, upload-time = "2025-02-14T06:02:45.046Z" },
|
| 2968 |
+
{ url = "https://files.pythonhosted.org/packages/de/a8/8f499c179ec900783ffe133e9aab10044481679bb9aad78436d239eee716/tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95", size = 894669, upload-time = "2025-02-14T06:02:47.341Z" },
|
| 2969 |
+
]
|
| 2970 |
+
|
| 2971 |
[[package]]
|
| 2972 |
name = "tokenizers"
|
| 2973 |
version = "0.21.1"
|