Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from huggingface_hub import login | |
| import torch | |
| import os | |
| # Login with token from HF Space Secrets (Settings β Secrets) | |
| login(os.environ["HF_TOKEN"]) | |
| model_id = "UBC-NLP/NileChat-3B" | |
| # Load tokenizer and model with authentication | |
| tokenizer = AutoTokenizer.from_pretrained(model_id, token=os.environ["HF_TOKEN"]) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| token=os.environ["HF_TOKEN"] | |
| ) | |
| # Simple prompt β reply function | |
| def chat(prompt): | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| output = model.generate( | |
| **inputs, | |
| max_new_tokens=256, | |
| do_sample=True, | |
| top_p=0.95, | |
| temperature=0.8 | |
| ) | |
| decoded = tokenizer.decode(output[0], skip_special_tokens=True) | |
| return decoded.replace(prompt, "").strip() | |
| # Gradio chat interface | |
| gr.ChatInterface(chat, title="NileCoach πͺπ¬ Masry LLM Agent").launch() | |