Spaces:
Runtime error
Runtime error
| import math | |
| import torch | |
| from transformers import AutoTokenizer, AutoModel, AutoProcessor | |
| import gradio as gr | |
| from PIL import Image | |
| # === ει ε±ε°ε€ GPU === | |
| def split_model(model_path): | |
| from transformers import AutoConfig | |
| device_map = {} | |
| world_size = torch.cuda.device_count() | |
| config = AutoConfig.from_pretrained(model_path, trust_remote_code=True) | |
| num_layers = config.llm_config.num_hidden_layers | |
| num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5)) | |
| num_layers_per_gpu = [num_layers_per_gpu] * world_size | |
| num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5) | |
| layer_cnt = 0 | |
| for i, num_layer in enumerate(num_layers_per_gpu): | |
| for _ in range(num_layer): | |
| device_map[f'language_model.model.layers.{layer_cnt}'] = i | |
| layer_cnt += 1 | |
| device_map['vision_model'] = 0 | |
| device_map['mlp1'] = 0 | |
| device_map['language_model.model.tok_embeddings'] = 0 | |
| device_map['language_model.model.embed_tokens'] = 0 | |
| device_map['language_model.output'] = 0 | |
| device_map['language_model.model.norm'] = 0 | |
| device_map['language_model.model.rotary_emb'] = 0 | |
| device_map['language_model.lm_head'] = 0 | |
| device_map[f'language_model.model.layers.{num_layers - 1}'] = 0 | |
| return device_map | |
| # === 樑εθ·―εΎ === | |
| model_path = "OpenGVLab/InternVL3-14B" | |
| device_map = split_model(model_path) | |
| # === ε 载樑εεε€ηε¨ === | |
| model = AutoModel.from_pretrained( | |
| model_path, | |
| torch_dtype=torch.bfloat16, | |
| low_cpu_mem_usage=True, | |
| use_flash_attn=True, | |
| trust_remote_code=True, | |
| device_map=device_map | |
| ).eval() | |
| tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) | |
| processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True) | |
| # === ζ¨ηε½ζ° === | |
| def infer(image: Image.Image, prompt: str): | |
| inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda") | |
| output = model.generate(**inputs, max_new_tokens=512) | |
| answer = tokenizer.decode(output[0], skip_special_tokens=True) | |
| return answer | |
| # === Gradio ηι’ === | |
| gr.Interface( | |
| fn=infer, | |
| inputs=[ | |
| gr.Image(type="pil", label="Upload Image"), | |
| gr.Textbox(label="Your Prompt", placeholder="Ask a question about the image...") | |
| ], | |
| outputs="text", | |
| title="InternVL3-14B Multimodal Demo", | |
| description="Upload an image and ask a question. InternVL3-14B will answer using vision + language." | |
| ).launch() | |