Spaces:
Sleeping
Sleeping
| """ | |
| Jade Code IDE - HuggingFace Space Backend v3.1 | |
| Modo Agêntico Melhorado + Context-Aware + Multi-Provider | |
| """ | |
| import os | |
| import sys | |
| import io | |
| import json | |
| import base64 | |
| import traceback | |
| import re | |
| from contextlib import redirect_stdout, redirect_stderr | |
| from typing import Optional, List, Dict | |
| from fastapi import FastAPI, HTTPException, Header | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import BaseModel | |
| import gradio as gr | |
| from providers import get_provider, list_all_providers | |
| from tools import AGENT_TOOLS, ToolExecutor, parse_tool_calls, get_response_content, has_tool_calls | |
| # ============== App Setup ============== | |
| app = FastAPI(title="Jade Code IDE API", version="3.1") | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # ============== Request Models ============== | |
| class ChatRequest(BaseModel): | |
| message: str | |
| provider: str = "groq" | |
| model: str = "llama-3.3-70b-versatile" | |
| files: Optional[Dict[str, str]] = {} # {filename: content} | |
| current_file: Optional[str] = None | |
| agentic: bool = True | |
| history: Optional[List[Dict]] = [] | |
| class RunRequest(BaseModel): | |
| code: str | |
| class CompleteRequest(BaseModel): | |
| prefix: str | |
| suffix: str = "" | |
| language: str = "python" | |
| provider: str = "groq" | |
| model: str = "llama-3.3-70b-versatile" | |
| # ============== Helper: Build Context ============== | |
| def build_file_context(files: Dict[str, str], current_file: Optional[str], max_chars: int = 8000) -> str: | |
| """ | |
| Constrói contexto dos arquivos para o LLM. | |
| Inclui conteúdo resumido de todos os arquivos. | |
| """ | |
| if not files: | |
| return "" | |
| context_parts = [] | |
| total_chars = 0 | |
| # Lista de arquivos | |
| context_parts.append(f"📁 **PROJETO ({len(files)} arquivos)**") | |
| context_parts.append("```") | |
| for name in files.keys(): | |
| is_current = " ← [ABERTO]" if name == current_file else "" | |
| context_parts.append(f" {name}{is_current}") | |
| context_parts.append("```\n") | |
| # Conteúdo do arquivo atual (prioridade máxima) | |
| if current_file and current_file in files: | |
| content = files[current_file] | |
| if len(content) > 4000: | |
| content = content[:4000] + f"\n... [truncado, {len(content)} chars total]" | |
| context_parts.append(f"📄 **ARQUIVO ABERTO: {current_file}**") | |
| context_parts.append(f"```python\n{content}\n```\n") | |
| total_chars += len(content) | |
| # Outros arquivos (resumido) | |
| for name, content in files.items(): | |
| if name == current_file: | |
| continue | |
| if total_chars > max_chars: | |
| context_parts.append(f"_... mais {len(files) - len(context_parts) + 5} arquivos não mostrados_") | |
| break | |
| # Mostra preview pequeno | |
| preview = content[:500] if len(content) > 500 else content | |
| if len(content) > 500: | |
| preview += f"\n... [{len(content)} chars total]" | |
| context_parts.append(f"📄 **{name}**") | |
| ext = name.split('.')[-1] if '.' in name else 'text' | |
| lang = {'py': 'python', 'js': 'javascript', 'md': 'markdown', 'json': 'json'}.get(ext, ext) | |
| context_parts.append(f"```{lang}\n{preview}\n```\n") | |
| total_chars += len(preview) | |
| return "\n".join(context_parts) | |
| # ============== System Prompt ============== | |
| SYSTEM_PROMPT = """Você é **CodeJade**, um assistente de programação integrado a uma IDE web. | |
| ## Suas Capacidades: | |
| - Você tem acesso COMPLETO ao projeto do usuário (arquivos listados abaixo) | |
| - Você pode ver o arquivo que está aberto no editor | |
| - Você ajuda a escrever código, debugar, explicar e melhorar | |
| ## Regras: | |
| 1. LEIA o contexto do projeto antes de responder | |
| 2. Seja técnico e direto | |
| 3. Use blocos ```python para código | |
| 4. Se for modificar código, mostre apenas as partes que mudam | |
| 5. Considere TODO o projeto, não só o arquivo aberto | |
| ## Formato de Resposta: | |
| - Para explicações: texto normal com formatação markdown | |
| - Para código: blocos de código com linguagem especificada | |
| - Para bugs: mostre o problema E a solução | |
| """ | |
| # ============== Endpoints ============== | |
| async def root(): | |
| return {"status": "ok", "message": "Jade Code IDE API v3.1 - Context-Aware Mode", "version": "3.1"} | |
| async def get_providers(): | |
| return {"providers": list_all_providers()} | |
| async def get_models(provider: str): | |
| try: | |
| prov = get_provider(provider, "dummy") | |
| return {"models": prov.list_models()} | |
| except ValueError as e: | |
| raise HTTPException(status_code=400, detail=str(e)) | |
| async def chat(req: ChatRequest, x_api_key: str = Header(..., alias="X-API-Key")): | |
| """ | |
| Chat com contexto completo do projeto. | |
| Funciona com ou sem suporte a tool calls. | |
| """ | |
| if not x_api_key or len(x_api_key) < 10: | |
| raise HTTPException(status_code=401, detail="API Key inválida") | |
| try: | |
| provider = get_provider(req.provider, x_api_key) | |
| # Constrói contexto dos arquivos | |
| file_context = build_file_context(req.files, req.current_file) | |
| # System prompt com contexto | |
| system_with_context = SYSTEM_PROMPT | |
| if file_context: | |
| system_with_context += f"\n\n---\n\n# Contexto do Projeto\n\n{file_context}" | |
| # Monta mensagens | |
| messages = [{"role": "system", "content": system_with_context}] | |
| # Histórico | |
| for msg in (req.history or [])[-6:]: | |
| messages.append(msg) | |
| # Mensagem do usuário | |
| messages.append({"role": "user", "content": req.message}) | |
| # Tenta com tools primeiro (se agentic) | |
| tool_results = [] | |
| if req.agentic: | |
| try: | |
| # Tenta chamada com tools | |
| tool_executor = ToolExecutor(req.files) | |
| response = provider.chat(messages, req.model, tools=AGENT_TOOLS) | |
| # Se tiver tool calls, executa | |
| if has_tool_calls(response): | |
| for _ in range(3): # Max 3 iterações | |
| tool_calls = parse_tool_calls(response) | |
| if not tool_calls: | |
| break | |
| assistant_msg = response["choices"][0]["message"] | |
| messages.append(assistant_msg) | |
| for tc in tool_calls: | |
| result = tool_executor.execute(tc["name"], tc["args"]) | |
| tool_results.append({"tool": tc["name"], "result": result[:300]}) | |
| messages.append({ | |
| "role": "tool", | |
| "tool_call_id": tc["id"], | |
| "content": result | |
| }) | |
| response = provider.chat(messages, req.model, tools=AGENT_TOOLS) | |
| if not has_tool_calls(response): | |
| break | |
| content = get_response_content(response) | |
| return { | |
| "response": content, | |
| "tool_calls": tool_results if tool_results else None | |
| } | |
| except Exception as tool_error: | |
| # Fallback: chamada sem tools (modelo não suporta) | |
| print(f"Tool call failed, falling back: {tool_error}") | |
| response = provider.chat(messages, req.model) | |
| content = get_response_content(response) | |
| return { | |
| "response": content, | |
| "tool_calls": None, | |
| "note": "Modelo não suporta tools, usando contexto direto" | |
| } | |
| else: | |
| # Modo sem tools | |
| response = provider.chat(messages, req.model) | |
| content = get_response_content(response) | |
| return {"response": content} | |
| except Exception as e: | |
| traceback.print_exc() | |
| error_msg = str(e) | |
| if "401" in error_msg or "authentication" in error_msg.lower(): | |
| raise HTTPException(status_code=401, detail="API Key inválida para este provider") | |
| raise HTTPException(status_code=500, detail=f"Erro: {error_msg}") | |
| async def run_code(req: RunRequest): | |
| """Executa código Python.""" | |
| code = req.code | |
| f_out = io.StringIO() | |
| f_err = io.StringIO() | |
| image_base64 = None | |
| try: | |
| import matplotlib | |
| matplotlib.use('Agg') | |
| import matplotlib.pyplot as plt | |
| exec_globals = { | |
| "__builtins__": __builtins__, | |
| "__name__": "__main__", | |
| "plt": plt, | |
| } | |
| try: | |
| import numpy as np | |
| exec_globals["np"] = np | |
| exec_globals["numpy"] = np | |
| except ImportError: | |
| pass | |
| try: | |
| import pandas as pd | |
| exec_globals["pd"] = pd | |
| except ImportError: | |
| pass | |
| with redirect_stdout(f_out), redirect_stderr(f_err): | |
| exec(code, exec_globals) | |
| if plt.get_fignums(): | |
| buf = io.BytesIO() | |
| plt.savefig(buf, format='png', dpi=100, bbox_inches='tight', | |
| facecolor='#09090b', edgecolor='none') | |
| buf.seek(0) | |
| image_base64 = base64.b64encode(buf.read()).decode('utf-8') | |
| plt.close('all') | |
| return { | |
| "output": f_out.getvalue(), | |
| "error": f_err.getvalue() or None, | |
| "image": image_base64 | |
| } | |
| except Exception as e: | |
| return { | |
| "output": f_out.getvalue(), | |
| "error": f"❌ {type(e).__name__}: {str(e)}", | |
| "image": None | |
| } | |
| async def autocomplete(req: CompleteRequest, x_api_key: str = Header(..., alias="X-API-Key")): | |
| """Autocomplete via LLM.""" | |
| if not x_api_key or len(x_api_key) < 10: | |
| raise HTTPException(status_code=401, detail="API Key inválida") | |
| try: | |
| provider = get_provider(req.provider, x_api_key) | |
| prompt = f"""Complete o código. Responda APENAS JSON: | |
| ANTES DO CURSOR: | |
| ```{req.language} | |
| {req.prefix[-400:]} | |
| ``` | |
| DEPOIS: | |
| ```{req.language} | |
| {req.suffix[:150] if req.suffix else ''} | |
| ``` | |
| JSON (max 5): | |
| [{{"label": "nome", "insertText": "código", "detail": "desc", "kind": "Function"}}]""" | |
| response = provider.chat( | |
| messages=[ | |
| {"role": "system", "content": "Responda APENAS JSON válido."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| model=req.model | |
| ) | |
| response_text = get_response_content(response) or "" | |
| match = re.search(r'\[.*\]', response_text, re.DOTALL) | |
| if match: | |
| suggestions = json.loads(match.group(0)) | |
| return {"suggestions": suggestions} | |
| return {"suggestions": []} | |
| except Exception as e: | |
| return {"suggestions": [], "error": str(e)} | |
| # ============== Gradio UI ============== | |
| def gradio_chat(message, provider, model, api_key): | |
| if not api_key: | |
| return "❌ Coloque sua API Key" | |
| try: | |
| prov = get_provider(provider, api_key) | |
| response = prov.chat( | |
| messages=[ | |
| {"role": "system", "content": SYSTEM_PROMPT}, | |
| {"role": "user", "content": message} | |
| ], | |
| model=model | |
| ) | |
| return get_response_content(response) or "Sem resposta" | |
| except Exception as e: | |
| return f"❌ Erro: {str(e)}" | |
| with gr.Blocks(title="Jade Code IDE API") as demo: | |
| gr.Markdown("# 🟢 Jade Code IDE - API Backend v3.1") | |
| gr.Markdown("**Context-Aware Mode** | Multi-Provider | API Docs: `/docs`") | |
| with gr.Row(): | |
| provider_dd = gr.Dropdown( | |
| choices=["groq", "cerebras", "openrouter"], | |
| value="groq", | |
| label="Provider" | |
| ) | |
| model_dd = gr.Dropdown( | |
| choices=["llama-3.3-70b-versatile"], | |
| value="llama-3.3-70b-versatile", | |
| label="Model" | |
| ) | |
| api_key_input = gr.Textbox(label="API Key", type="password") | |
| msg_input = gr.Textbox(label="Mensagem", lines=3) | |
| send_btn = gr.Button("Enviar") | |
| output = gr.Textbox(label="Resposta", lines=10) | |
| send_btn.click( | |
| gradio_chat, | |
| inputs=[msg_input, provider_dd, model_dd, api_key_input], | |
| outputs=output | |
| ) | |
| app = gr.mount_gradio_app(app, demo, path="/ui") | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |