Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| # Load model and tokenizer | |
| MODEL_NAME = "Qwen/Qwen2.5-Coder-1.5B" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, trust_remote_code=True) | |
| # Define the refactor function | |
| def refactor_code(message, code): | |
| input_text = f"{message}\n\nCode:\n{code}" | |
| inputs = tokenizer(input_text, return_tensors="pt", max_length=1024, truncation=True) | |
| outputs = model.generate(inputs["input_ids"], max_new_tokens=200) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Gradio Interface | |
| interface = gr.Interface( | |
| fn=refactor_code, | |
| inputs=[ | |
| gr.Textbox(label="Message (Instruction)"), | |
| gr.Textbox(label="Code", lines=15), | |
| ], | |
| outputs="text", | |
| title="Code Refactor with Qwen Model", | |
| description="Provide an instruction and code to refactor. The model will return the updated code." | |
| ) | |
| # Launch the app | |
| interface.launch(share=True) | |