CoderAI / app.py
Kiy-K's picture
Update app.py
8273e0b verified
raw
history blame
7.05 kB
import os
import gradio as gr
from huggingface_hub import InferenceClient
import random
def get_coding_samples():
"""Diverse coding challenges"""
return [
"Write a function to reverse a linked list",
"Implement binary search in Python",
"Create a REST API endpoint for user authentication",
"Write a SQL query to find duplicate records",
"Build a React component for a todo list",
"Implement quicksort algorithm",
"Parse JSON and handle errors gracefully",
"Create a decorator for timing function execution",
"Write regex to validate email addresses",
"Implement a LRU cache in Python",
"Build a simple web scraper with BeautifulSoup",
"Create a custom hook in React",
"Write unit tests for a calculator function",
"Implement depth-first search for a graph",
"Build a CLI tool with argparse",
"Create a database migration script",
"Write a function to detect palindromes",
"Implement JWT token authentication",
"Build a responsive navbar with Tailwind CSS",
"Create a rate limiter middleware",
]
def create_coder_system_message():
"""System prompt for coding assistance"""
return """You are CoderAI, an expert programming assistant.
**Your Approach:**
1. Understand the requirements clearly
2. Provide clean, working code with comments
3. Explain key concepts and design decisions
4. Suggest best practices and optimizations
5. Include error handling where appropriate
**Code Quality:**
- Write readable, maintainable code
- Follow language conventions and style guides
- Use meaningful variable names
- Add docstrings/comments for complex logic
- Consider edge cases and error handling
**Format:**
- Use markdown code blocks with language tags
- Explain code sections when helpful
- Provide usage examples
- Mention dependencies if needed
Be practical, efficient, and educational."""
def respond(message, history, system_message, max_tokens, temperature, top_p, model_choice):
"""Streaming response with error handling"""
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
yield "❌ **Error:** HF_TOKEN not found. Set it in environment or Spaces secrets."
return
# Model selection
models = {
"DeepSeek Coder": "deepseek-ai/deepseek-coder-33b-instruct",
"CodeLlama": "codellama/CodeLlama-34b-Instruct-hf",
"Qwen Coder": "Qwen/Qwen2.5-Coder-32B-Instruct",
}
client = InferenceClient(
model=models.get(model_choice, models["Qwen Coder"]),
token=hf_token
)
messages = [{"role": "system", "content": system_message}]
for msg in history:
if isinstance(msg, dict):
role = msg.get("role", "user")
content = msg.get("content", "")
else:
role = "user"
content = str(msg)
if content:
messages.append({"role": role, "content": content})
messages.append({"role": "user", "content": message})
try:
response_text = ""
for chunk in client.chat_completion(
messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True
):
if chunk.choices[0].delta.content:
response_text += chunk.choices[0].delta.content
yield response_text
except Exception as e:
error_msg = f"❌ **Error:** {str(e)}\n\nTry:\n- Checking your HF_TOKEN\n- Simplifying the request\n- Using a different model"
yield error_msg
def get_random_sample():
"""Get random coding challenge"""
return random.choice(get_coding_samples())
# Gradio Interface
with gr.Blocks(title="πŸ’» CoderAI", theme=gr.themes.Soft()) as demo:
gr.Markdown("# πŸ’» **CoderAI**\n*Your AI Programming Assistant*")
chatbot = gr.Chatbot(
height=500,
type='messages',
label="πŸ’¬ Conversation",
show_copy_button=True
)
msg = gr.Textbox(
placeholder="Ask a coding question or describe what you need...",
show_label=False,
scale=4
)
with gr.Row():
submit = gr.Button("πŸš€ Code", variant="primary", scale=1)
clear = gr.Button("πŸ—‘οΈ Clear", variant="secondary", scale=1)
sample = gr.Button("🎲 Random", variant="secondary", scale=1)
with gr.Accordion("βš™οΈ Advanced Settings", open=False):
model_dropdown = gr.Dropdown(
choices=["Qwen Coder", "DeepSeek Coder", "CodeLlama"],
value="Qwen Coder",
label="Model Selection"
)
temp_slider = gr.Slider(0.1, 1.0, value=0.2, step=0.1, label="Temperature")
tokens_slider = gr.Slider(512, 4096, value=2048, step=256, label="Max Tokens")
top_p_slider = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
with gr.Accordion("πŸ’‘ Help & Examples", open=False):
gr.Markdown("""
**Tips:**
- Be specific about language and requirements
- Mention if you need comments or tests
- Ask for explanations of complex parts
- Request specific design patterns or styles
**What I can help with:**
- Writing functions and classes
- Debugging code
- Code review and optimization
- Algorithm implementation
- API design and database queries
- Testing and documentation
""")
gr.Examples(
examples=[
["Write a Python function to check if a string is a palindrome"],
["Create a React component for a searchable dropdown"],
["Implement a binary tree traversal in JavaScript"],
["Write a SQL query to find the top 5 customers by revenue"],
["Build a simple Flask API with error handling"],
["Create a custom validation decorator in Python"],
],
inputs=msg
)
system_msg = gr.State(create_coder_system_message())
def chat_response(message, history, sys_msg, max_tok, temp, top_p, model):
"""Handle chat with streaming"""
if not message.strip():
return history, ""
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": ""})
for response in respond(message, history[:-1], sys_msg, max_tok, temp, top_p, model):
history[-1]["content"] = response
yield history, ""
return history, ""
def clear_chat():
return [], ""
msg.submit(
chat_response,
[msg, chatbot, system_msg, tokens_slider, temp_slider, top_p_slider, model_dropdown],
[chatbot, msg]
)
submit.click(
chat_response,
[msg, chatbot, system_msg, tokens_slider, temp_slider, top_p_slider, model_dropdown],
[chatbot, msg]
)
clear.click(clear_chat, outputs=[chatbot, msg])
sample.click(get_random_sample, outputs=msg)
demo.launch()