Spaces:
Running
on
Zero
Running
on
Zero
| import gradio as gr | |
| import torch | |
| from transformers import ( | |
| AutoModelForCausalLM, | |
| AutoTokenizer, | |
| TextIteratorStreamer, | |
| ) | |
| import os | |
| from threading import Thread | |
| import spaces | |
| import time | |
| import subprocess | |
| print("\n=== Environment Setup ===") | |
| if torch.cuda.is_available(): | |
| print(f"GPU detected: {torch.cuda.get_device_name(0)}") | |
| try: | |
| subprocess.run( | |
| "pip install flash-attn --no-build-isolation", | |
| shell=True, | |
| check=True, | |
| ) | |
| print("✅ flash-attn installed successfully") | |
| except subprocess.CalledProcessError as e: | |
| print("⚠️ flash-attn installation failed:", e) | |
| else: | |
| print("⚙️ CPU detected — skipping flash-attn installation") | |
| # Disable flash-attn references safely | |
| os.environ["DISABLE_FLASH_ATTN"] = "1" | |
| os.environ["FLASH_ATTENTION_SKIP_CUDA_BUILD"] = "TRUE" | |
| try: | |
| from transformers.utils import import_utils | |
| if "flash_attn" not in import_utils.PACKAGE_DISTRIBUTION_MAPPING: | |
| import_utils.PACKAGE_DISTRIBUTION_MAPPING["flash_attn"] = "flash-attn" | |
| except Exception as e: | |
| print("⚠️ Patch skipped:", e) | |
| token = os.environ["HF_TOKEN"] | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "microsoft/phi-4", | |
| token=token, | |
| trust_remote_code=True, | |
| torch_dtype=torch.bfloat16 | |
| ) | |
| tok = AutoTokenizer.from_pretrained("microsoft/phi-4", token=token) | |
| terminators = [ | |
| tok.eos_token_id, | |
| ] | |
| if torch.cuda.is_available(): | |
| device = torch.device("cuda") | |
| print(f"Using GPU: {torch.cuda.get_device_name(device)}") | |
| else: | |
| device = torch.device("cpu") | |
| print("Using CPU") | |
| model = model.to(device) | |
| # Dispatch Errors | |
| def chat(message, history, temperature, do_sample, max_tokens): | |
| chat = [] | |
| for item in history: | |
| chat.append({"role": "user", "content": item[0]}) | |
| if item[1] is not None: | |
| chat.append({"role": "assistant", "content": item[1]}) | |
| chat.append({"role": "user", "content": message}) | |
| messages = tok.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) | |
| model_inputs = tok([messages], return_tensors="pt").to(device) | |
| streamer = TextIteratorStreamer( | |
| tok, timeout=20.0, skip_prompt=True, skip_special_tokens=True | |
| ) | |
| generate_kwargs = dict( | |
| model_inputs, | |
| streamer=streamer, | |
| max_new_tokens=max_tokens, | |
| do_sample=True, | |
| temperature=temperature, | |
| eos_token_id=terminators, | |
| ) | |
| if temperature == 0: | |
| generate_kwargs["do_sample"] = False | |
| t = Thread(target=model.generate, kwargs=generate_kwargs) | |
| t.start() | |
| partial_text = "" | |
| for new_text in streamer: | |
| partial_text += new_text | |
| yield partial_text | |
| yield partial_text | |
| demo = gr.ChatInterface( | |
| fn=chat, | |
| examples=[["Write me a poem about Machine Learning."]], | |
| # multimodal=False, | |
| additional_inputs_accordion=gr.Accordion( | |
| label="⚙️ Parameters", open=False, render=False | |
| ), | |
| additional_inputs=[ | |
| gr.Slider( | |
| minimum=0, maximum=1, step=0.1, value=0.9, label="Temperature", render=False | |
| ), | |
| gr.Checkbox(label="Sampling", value=True), | |
| gr.Slider( | |
| minimum=128, | |
| maximum=4096, | |
| step=1, | |
| value=512, | |
| label="Max new tokens", | |
| render=False, | |
| ), | |
| ], | |
| stop_btn="Stop Generation", | |
| title="Chat With LLMs", | |
| description="Now Running [microsoft/phi-4](https://huggingface.co/microsoft/phi-4)", | |
| ) | |
| demo.launch() | |