Spaces:
Running
on
Zero
Running
on
Zero
| import torch.nn as nn | |
| import re | |
| import torch | |
| from transformers import AutoConfig, AutoModel, PretrainedConfig, PreTrainedModel | |
| class IdentityMap(nn.Module): | |
| def __init__(self): | |
| super().__init__() | |
| def forward(self, x, *args, **kwargs): | |
| return x | |
| def config(self): | |
| return {"mm_projector_type": "identity"} | |
| class SimpleResBlock(nn.Module): | |
| def __init__(self, channels): | |
| super().__init__() | |
| self.pre_norm = nn.LayerNorm(channels) | |
| self.proj = nn.Sequential( | |
| nn.Linear(channels, channels), nn.GELU(), nn.Linear(channels, channels) | |
| ) | |
| def forward(self, x): | |
| x = self.pre_norm(x) | |
| return x + self.proj(x) | |
| class DownSampleBlock(nn.Module): | |
| def forward(self, x): | |
| vit_embeds = x | |
| h = w = int(vit_embeds.shape[1] ** 0.5) | |
| vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1) | |
| vit_embeds = self.flat_square(vit_embeds) | |
| vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1]) | |
| return vit_embeds | |
| def flat_square(self, x): | |
| n, w, h, c = x.size() | |
| if w % 2 == 1: | |
| x = torch.concat([x, torch.zeros((n, 1, h, c), dtype=x.dtype).to(x.device)], dim=1).contiguous() | |
| n, w, h, c = x.size() | |
| if h % 2 == 1: | |
| x = torch.concat([x, torch.zeros((n, w, 1, c), dtype=x.dtype).to(x.device)], dim=2).contiguous() | |
| n, w, h, c = x.size() | |
| x = x.view(n, w, int(h / 2), int(c * 2)) | |
| x = x.permute(0, 2, 1, 3).contiguous() | |
| x = x.view(n, int(h / 2), int(w / 2), int(c * 4)) | |
| return x | |
| class MultimodalProjectorConfig(PretrainedConfig): | |
| model_type = "v2l_projector" | |
| def __init__(self, mm_projector_type: str=None, **kwargs): | |
| super().__init__() | |
| self.mm_projector_type = mm_projector_type | |
| class MultimodalProjector(PreTrainedModel): | |
| config_class = MultimodalProjectorConfig | |
| def __init__( | |
| self, mm_projector_cfg: MultimodalProjectorConfig, config: PretrainedConfig | |
| ): | |
| super().__init__(mm_projector_cfg) | |
| mm_projector_type = mm_projector_cfg.mm_projector_type | |
| if mm_projector_type == "identity": | |
| self.layers = IdentityMap() | |
| elif mm_projector_type == "linear": | |
| self.layers = nn.Linear(config.mm_hidden_size, config.hidden_size) | |
| elif mm_projector_type == "mlp_downsample": | |
| self.layers = nn.Sequential( | |
| DownSampleBlock(), | |
| nn.LayerNorm(config.mm_hidden_size * 4), | |
| nn.Linear(config.mm_hidden_size * 4, config.hidden_size), | |
| nn.GELU(), | |
| nn.Linear(config.hidden_size, config.hidden_size) | |
| ) | |
| else: | |
| mlp_gelu_match = re.match(r"^mlp(\d+)x_gelu$", mm_projector_type) | |
| if mlp_gelu_match: | |
| mlp_depth = int(mlp_gelu_match.group(1)) | |
| modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] | |
| for _ in range(1, mlp_depth): | |
| modules.append(nn.GELU()) | |
| modules.append(nn.Linear(config.hidden_size, config.hidden_size)) | |
| self.layers = nn.Sequential(*modules) | |
| else: | |
| raise ValueError(f"Unknown projector type: {mm_projector_type}") | |
| def forward(self, x, *args, **kwargs): | |
| return self.layers(x) | |
| AutoConfig.register("v2l_projector", MultimodalProjectorConfig) | |
| AutoModel.register(MultimodalProjectorConfig, MultimodalProjector) |