import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline model_id = "Qwen/Qwen2.5-Coder-7B" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) def code_agent(prompt): output = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.2) return output[0]["generated_text"] demo = gr.Interface(fn=code_agent, inputs="text", outputs="text") demo.launch()