Commit
·
b1b2476
1
Parent(s):
7515eca
big renaming
Browse files- README.md +68 -0
- config.json +5 -5
- configuration_img2html.py → configuration_vmistral.py +14 -14
- image_processing_idefics.py +168 -0
- modeling_img2html.py → modeling_vmistral.py +21 -33
- processing_idefics.py +414 -0
README.md
CHANGED
|
@@ -19,6 +19,74 @@ It is based on a very early checkpoint of our forthcoming vision-language founda
|
|
| 19 |
|
| 20 |
This is very much an alpha version. The goal is to kick off an effort to develop improved models capable of converting a website screenshot into actual code.
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
# Model Details
|
| 23 |
|
| 24 |
- **Developed by:** Hugging Face
|
|
|
|
| 19 |
|
| 20 |
This is very much an alpha version. The goal is to kick off an effort to develop improved models capable of converting a website screenshot into actual code.
|
| 21 |
|
| 22 |
+
# Code snippet
|
| 23 |
+
|
| 24 |
+
```python
|
| 25 |
+
import torch
|
| 26 |
+
|
| 27 |
+
from PIL import Image
|
| 28 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
| 29 |
+
|
| 30 |
+
from transformers.image_utils import to_numpy_array, PILImageResampling, ChannelDimension
|
| 31 |
+
from transformers.image_transforms import resize, to_channel_dimension_format
|
| 32 |
+
|
| 33 |
+
DEVICE = torch.device("cuda")
|
| 34 |
+
PROCESSOR = AutoProcessor.from_pretrained(
|
| 35 |
+
"HuggingFaceM4/VLM_WebSight_finetuned",
|
| 36 |
+
token=API_TOKEN,
|
| 37 |
+
)
|
| 38 |
+
MODEL = AutoModelForCausalLM.from_pretrained(
|
| 39 |
+
"HuggingFaceM4/VLM_WebSight_finetuned",
|
| 40 |
+
token=API_TOKEN,
|
| 41 |
+
trust_remote_code=True,
|
| 42 |
+
torch_dtype=torch.bfloat16,
|
| 43 |
+
).to(DEVICE)
|
| 44 |
+
image_seq_len = MODEL.config.perceiver_config.resampler_n_latents
|
| 45 |
+
BOS_TOKEN = PROCESSOR.tokenizer.bos_token
|
| 46 |
+
BAD_WORDS_IDS = PROCESSOR.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def convert_to_rgb(image):
|
| 50 |
+
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
|
| 51 |
+
# for transparent images. The call to `alpha_composite` handles this case
|
| 52 |
+
if image.mode == "RGB":
|
| 53 |
+
return image
|
| 54 |
+
|
| 55 |
+
image_rgba = image.convert("RGBA")
|
| 56 |
+
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
|
| 57 |
+
alpha_composite = Image.alpha_composite(background, image_rgba)
|
| 58 |
+
alpha_composite = alpha_composite.convert("RGB")
|
| 59 |
+
return alpha_composite
|
| 60 |
+
|
| 61 |
+
# The processor is the same as the Idefics processor except for the BILINEAR interpolation,
|
| 62 |
+
# so this is a hack in order to redefine ONLY the transform method
|
| 63 |
+
def custom_transform(x):
|
| 64 |
+
x = convert_to_rgb(x)
|
| 65 |
+
x = to_numpy_array(x)
|
| 66 |
+
x = resize(x, (960, 960), resample=PILImageResampling.BILINEAR)
|
| 67 |
+
x = PROCESSOR.image_processor.rescale(x, scale=1 / 255)
|
| 68 |
+
x = PROCESSOR.image_processor.normalize(
|
| 69 |
+
x,
|
| 70 |
+
mean=PROCESSOR.image_processor.image_mean,
|
| 71 |
+
std=PROCESSOR.image_processor.image_std
|
| 72 |
+
)
|
| 73 |
+
x = to_channel_dimension_format(x, ChannelDimension.FIRST)
|
| 74 |
+
x = torch.tensor(x)
|
| 75 |
+
return x
|
| 76 |
+
|
| 77 |
+
inputs = PROCESSOR.tokenizer(
|
| 78 |
+
f"{BOS_TOKEN}<fake_token_around_image>{'<image>' * image_seq_len}<fake_token_around_image>",
|
| 79 |
+
return_tensors="pt",
|
| 80 |
+
add_special_tokens=False,
|
| 81 |
+
)
|
| 82 |
+
inputs["pixel_values"] = PROCESSOR.image_processor([image], transform=custom_transform)
|
| 83 |
+
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
| 84 |
+
generated_ids = MODEL.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_length=4096)
|
| 85 |
+
generated_text = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 86 |
+
|
| 87 |
+
print(generated_text)
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
# Model Details
|
| 91 |
|
| 92 |
- **Developed by:** Hugging Face
|
config.json
CHANGED
|
@@ -6,12 +6,12 @@
|
|
| 6 |
"alpha_type": "float",
|
| 7 |
"alphas_initializer_range": 0.0,
|
| 8 |
"architectures": [
|
| 9 |
-
"
|
| 10 |
],
|
| 11 |
"attention_dropout": 0.0,
|
| 12 |
"auto_map": {
|
| 13 |
-
"AutoConfig": "
|
| 14 |
-
"AutoModelForCausalLM": "
|
| 15 |
},
|
| 16 |
"bos_token_id": 1,
|
| 17 |
"cross_layer_interval": 1,
|
|
@@ -27,7 +27,7 @@
|
|
| 27 |
"initializer_range": 0.02,
|
| 28 |
"intermediate_size": 14336,
|
| 29 |
"max_position_embeddings": 32768,
|
| 30 |
-
"model_type": "
|
| 31 |
"num_attention_heads": 32,
|
| 32 |
"num_hidden_layers": 32,
|
| 33 |
"num_key_value_heads": 8,
|
|
@@ -52,7 +52,7 @@
|
|
| 52 |
"hidden_size": 1152,
|
| 53 |
"image_size": 960,
|
| 54 |
"intermediate_size": 4304,
|
| 55 |
-
"model_type": "
|
| 56 |
"num_attention_heads": 16,
|
| 57 |
"num_hidden_layers": 27,
|
| 58 |
"patch_size": 14
|
|
|
|
| 6 |
"alpha_type": "float",
|
| 7 |
"alphas_initializer_range": 0.0,
|
| 8 |
"architectures": [
|
| 9 |
+
"VMistralForVisionText2Text"
|
| 10 |
],
|
| 11 |
"attention_dropout": 0.0,
|
| 12 |
"auto_map": {
|
| 13 |
+
"AutoConfig": "configuration_vmistral.VMistralConfig",
|
| 14 |
+
"AutoModelForCausalLM": "modeling_vmistral.VMistralForVisionText2Text"
|
| 15 |
},
|
| 16 |
"bos_token_id": 1,
|
| 17 |
"cross_layer_interval": 1,
|
|
|
|
| 27 |
"initializer_range": 0.02,
|
| 28 |
"intermediate_size": 14336,
|
| 29 |
"max_position_embeddings": 32768,
|
| 30 |
+
"model_type": "vmistral",
|
| 31 |
"num_attention_heads": 32,
|
| 32 |
"num_hidden_layers": 32,
|
| 33 |
"num_key_value_heads": 8,
|
|
|
|
| 52 |
"hidden_size": 1152,
|
| 53 |
"image_size": 960,
|
| 54 |
"intermediate_size": 4304,
|
| 55 |
+
"model_type": "vmistral",
|
| 56 |
"num_attention_heads": 16,
|
| 57 |
"num_hidden_layers": 27,
|
| 58 |
"patch_size": 14
|
configuration_img2html.py → configuration_vmistral.py
RENAMED
|
@@ -12,7 +12,7 @@
|
|
| 12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
| 15 |
-
"""
|
| 16 |
from transformers.configuration_utils import PretrainedConfig
|
| 17 |
from transformers.utils import logging
|
| 18 |
|
|
@@ -20,14 +20,14 @@ from transformers.utils import logging
|
|
| 20 |
logger = logging.get_logger(__name__)
|
| 21 |
|
| 22 |
MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 23 |
-
"HuggingFaceM4/
|
| 24 |
}
|
| 25 |
|
| 26 |
|
| 27 |
-
class
|
| 28 |
r"""
|
| 29 |
"""
|
| 30 |
-
model_type = "
|
| 31 |
|
| 32 |
def __init__(
|
| 33 |
self,
|
|
@@ -63,7 +63,7 @@ class Img2HTMLVisionConfig(PretrainedConfig):
|
|
| 63 |
self._flash_attn_2_enabled = _flash_attn_2_enabled
|
| 64 |
|
| 65 |
|
| 66 |
-
class
|
| 67 |
r"""
|
| 68 |
TThis is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
|
| 69 |
Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
|
@@ -89,7 +89,7 @@ class Img2HTMLPerceiverConfig(PretrainedConfig):
|
|
| 89 |
qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
|
| 90 |
Whether or not to use qk layer norms in perceiver
|
| 91 |
"""
|
| 92 |
-
model_type = "
|
| 93 |
|
| 94 |
def __init__(
|
| 95 |
self,
|
|
@@ -109,7 +109,7 @@ class Img2HTMLPerceiverConfig(PretrainedConfig):
|
|
| 109 |
super().__init__(**kwargs)
|
| 110 |
|
| 111 |
|
| 112 |
-
class
|
| 113 |
r"""
|
| 114 |
This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
|
| 115 |
Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
|
@@ -201,7 +201,7 @@ class Img2HTMLConfig(PretrainedConfig):
|
|
| 201 |
>>> # Accessing the model configuration
|
| 202 |
>>> configuration = model.config
|
| 203 |
```"""
|
| 204 |
-
model_type = "
|
| 205 |
is_composition = False
|
| 206 |
|
| 207 |
def __init__(
|
|
@@ -280,17 +280,17 @@ class Img2HTMLConfig(PretrainedConfig):
|
|
| 280 |
self.attention_dropout = attention_dropout
|
| 281 |
|
| 282 |
if perceiver_config is None:
|
| 283 |
-
self.perceiver_config =
|
| 284 |
elif isinstance(perceiver_config, dict):
|
| 285 |
-
self.perceiver_config =
|
| 286 |
-
elif isinstance(perceiver_config,
|
| 287 |
self.perceiver_config = perceiver_config
|
| 288 |
|
| 289 |
if vision_config is None:
|
| 290 |
-
self.vision_config =
|
| 291 |
elif isinstance(vision_config, dict):
|
| 292 |
-
self.vision_config =
|
| 293 |
-
elif isinstance(vision_config,
|
| 294 |
self.vision_config = vision_config
|
| 295 |
|
| 296 |
super().__init__(
|
|
|
|
| 12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
| 15 |
+
""" VMistral model configuration"""
|
| 16 |
from transformers.configuration_utils import PretrainedConfig
|
| 17 |
from transformers.utils import logging
|
| 18 |
|
|
|
|
| 20 |
logger = logging.get_logger(__name__)
|
| 21 |
|
| 22 |
MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| 23 |
+
"HuggingFaceM4/VLM_WebSight_finetuned": "https://huggingface.co/HuggingFaceM4/VLM_WebSight_finetuned/resolve/main/config.json",
|
| 24 |
}
|
| 25 |
|
| 26 |
|
| 27 |
+
class VMistralVisionConfig(PretrainedConfig):
|
| 28 |
r"""
|
| 29 |
"""
|
| 30 |
+
model_type = "vmistral"
|
| 31 |
|
| 32 |
def __init__(
|
| 33 |
self,
|
|
|
|
| 63 |
self._flash_attn_2_enabled = _flash_attn_2_enabled
|
| 64 |
|
| 65 |
|
| 66 |
+
class VMistralPerceiverConfig(PretrainedConfig):
|
| 67 |
r"""
|
| 68 |
TThis is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
|
| 69 |
Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
|
|
|
| 89 |
qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
|
| 90 |
Whether or not to use qk layer norms in perceiver
|
| 91 |
"""
|
| 92 |
+
model_type = "vmistral"
|
| 93 |
|
| 94 |
def __init__(
|
| 95 |
self,
|
|
|
|
| 109 |
super().__init__(**kwargs)
|
| 110 |
|
| 111 |
|
| 112 |
+
class VMistralConfig(PretrainedConfig):
|
| 113 |
r"""
|
| 114 |
This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
|
| 115 |
Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
|
|
|
| 201 |
>>> # Accessing the model configuration
|
| 202 |
>>> configuration = model.config
|
| 203 |
```"""
|
| 204 |
+
model_type = "vmistral"
|
| 205 |
is_composition = False
|
| 206 |
|
| 207 |
def __init__(
|
|
|
|
| 280 |
self.attention_dropout = attention_dropout
|
| 281 |
|
| 282 |
if perceiver_config is None:
|
| 283 |
+
self.perceiver_config = VMistralPerceiverConfig()
|
| 284 |
elif isinstance(perceiver_config, dict):
|
| 285 |
+
self.perceiver_config = VMistralPerceiverConfig(**perceiver_config)
|
| 286 |
+
elif isinstance(perceiver_config, VMistralPerceiverConfig):
|
| 287 |
self.perceiver_config = perceiver_config
|
| 288 |
|
| 289 |
if vision_config is None:
|
| 290 |
+
self.vision_config = VMistralVisionConfig()
|
| 291 |
elif isinstance(vision_config, dict):
|
| 292 |
+
self.vision_config = VMistralVisionConfig(**vision_config)
|
| 293 |
+
elif isinstance(vision_config, VMistralVisionConfig):
|
| 294 |
self.vision_config = vision_config
|
| 295 |
|
| 296 |
super().__init__(
|
image_processing_idefics.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Image processor class for Idefics."""
|
| 16 |
+
|
| 17 |
+
from typing import Callable, Dict, List, Optional, Union
|
| 18 |
+
|
| 19 |
+
from PIL import Image
|
| 20 |
+
|
| 21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature
|
| 22 |
+
from ...image_transforms import resize, to_channel_dimension_format
|
| 23 |
+
from ...image_utils import (
|
| 24 |
+
ChannelDimension,
|
| 25 |
+
ImageInput,
|
| 26 |
+
PILImageResampling,
|
| 27 |
+
make_list_of_images,
|
| 28 |
+
to_numpy_array,
|
| 29 |
+
valid_images,
|
| 30 |
+
)
|
| 31 |
+
from ...utils import TensorType, is_torch_available
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
| 35 |
+
IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711]
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def convert_to_rgb(image):
|
| 39 |
+
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
|
| 40 |
+
# for transparent images. The call to `alpha_composite` handles this case
|
| 41 |
+
if image.mode == "RGB":
|
| 42 |
+
return image
|
| 43 |
+
|
| 44 |
+
image_rgba = image.convert("RGBA")
|
| 45 |
+
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
|
| 46 |
+
alpha_composite = Image.alpha_composite(background, image_rgba)
|
| 47 |
+
alpha_composite = alpha_composite.convert("RGB")
|
| 48 |
+
return alpha_composite
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class IdeficsImageProcessor(BaseImageProcessor):
|
| 52 |
+
r"""
|
| 53 |
+
Constructs a Idefics image processor.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
image_size (`int`, *optional*, defaults to 224):
|
| 57 |
+
Resize to image size
|
| 58 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
|
| 59 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
| 60 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
|
| 61 |
+
overridden by the `image_mean` parameter in the `preprocess` method.
|
| 62 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
|
| 63 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
| 64 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 65 |
+
Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 66 |
+
image_num_channels (`int`, *optional*, defaults to 3):
|
| 67 |
+
Number of image channels.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
model_input_names = ["pixel_values"]
|
| 71 |
+
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
image_size: int = 224,
|
| 75 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 76 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 77 |
+
image_num_channels: Optional[int] = 3,
|
| 78 |
+
**kwargs,
|
| 79 |
+
) -> None:
|
| 80 |
+
super().__init__(**kwargs)
|
| 81 |
+
|
| 82 |
+
self.image_size = image_size
|
| 83 |
+
self.image_num_channels = image_num_channels
|
| 84 |
+
self.image_mean = image_mean
|
| 85 |
+
self.image_std = image_std
|
| 86 |
+
|
| 87 |
+
def preprocess(
|
| 88 |
+
self,
|
| 89 |
+
images: ImageInput,
|
| 90 |
+
image_num_channels: Optional[int] = 3,
|
| 91 |
+
image_size: Optional[Dict[str, int]] = None,
|
| 92 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
| 93 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
| 94 |
+
transform: Callable = None,
|
| 95 |
+
**kwargs,
|
| 96 |
+
) -> TensorType.PYTORCH:
|
| 97 |
+
"""
|
| 98 |
+
Preprocess a batch of images.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
images (`ImageInput`):
|
| 102 |
+
A list of images to preprocess.
|
| 103 |
+
image_size (`int`, *optional*, defaults to `self.image_size`):
|
| 104 |
+
Resize to image size
|
| 105 |
+
image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`):
|
| 106 |
+
Number of image channels.
|
| 107 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
|
| 108 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
| 109 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can
|
| 110 |
+
be overridden by the `image_mean` parameter in the `preprocess` method.
|
| 111 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
|
| 112 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
| 113 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess`
|
| 114 |
+
method. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
| 115 |
+
transform (`Callable`, *optional*, defaults to `None`):
|
| 116 |
+
A custom transform function that accepts a single image can be passed for training. For example,
|
| 117 |
+
`torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
|
| 118 |
+
assumed - and then a preset of inference-specific transforms will be applied to the images
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
a PyTorch tensor of the processed images
|
| 122 |
+
|
| 123 |
+
"""
|
| 124 |
+
image_size = image_size if image_size is not None else self.image_size
|
| 125 |
+
image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels
|
| 126 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
| 127 |
+
image_std = image_std if image_std is not None else self.image_std
|
| 128 |
+
size = (image_size, image_size)
|
| 129 |
+
|
| 130 |
+
if isinstance(images, list) and len(images) == 0:
|
| 131 |
+
return []
|
| 132 |
+
|
| 133 |
+
images = make_list_of_images(images)
|
| 134 |
+
|
| 135 |
+
if not valid_images(images):
|
| 136 |
+
raise ValueError(
|
| 137 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 138 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# For training a user needs to pass their own set of transforms as a Callable.
|
| 142 |
+
# For reference this is what was used in the original IDEFICS training:
|
| 143 |
+
# transform = transforms.Compose([
|
| 144 |
+
# convert_to_rgb,
|
| 145 |
+
# transforms.RandomResizedCrop((size, size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
|
| 146 |
+
# transforms.ToTensor(),
|
| 147 |
+
# transforms.Normalize(mean=image_mean, std=image_std),
|
| 148 |
+
# ])
|
| 149 |
+
if transform is not None:
|
| 150 |
+
if not is_torch_available():
|
| 151 |
+
raise ImportError("To pass in `transform` torch must be installed")
|
| 152 |
+
import torch
|
| 153 |
+
|
| 154 |
+
images = [transform(x) for x in images]
|
| 155 |
+
return torch.stack(images)
|
| 156 |
+
|
| 157 |
+
# for inference we do the exact transforms that were used to train IDEFICS
|
| 158 |
+
images = [convert_to_rgb(x) for x in images]
|
| 159 |
+
# further transforms expect numpy arrays
|
| 160 |
+
images = [to_numpy_array(x) for x in images]
|
| 161 |
+
images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images]
|
| 162 |
+
images = [self.rescale(image=image, scale=1 / 255) for image in images]
|
| 163 |
+
images = [self.normalize(x, mean=image_mean, std=image_std) for x in images]
|
| 164 |
+
images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images]
|
| 165 |
+
# TODO: this converts to torch tensors - switch to convert_to_tensors once it becomes available
|
| 166 |
+
images = BatchFeature(data={"pixel_values": images}, tensor_type=TensorType.PYTORCH)["pixel_values"]
|
| 167 |
+
|
| 168 |
+
return images
|
modeling_img2html.py → modeling_vmistral.py
RENAMED
|
@@ -17,7 +17,7 @@
|
|
| 17 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
# See the License for the specific language governing permissions and
|
| 19 |
# limitations under the License.
|
| 20 |
-
""" PyTorch
|
| 21 |
from dataclasses import dataclass
|
| 22 |
import inspect
|
| 23 |
import math
|
|
@@ -43,7 +43,7 @@ from transformers import PreTrainedModel
|
|
| 43 |
from transformers.utils import logging
|
| 44 |
from transformers.modeling_outputs import ModelOutput
|
| 45 |
|
| 46 |
-
from .
|
| 47 |
from .vision import SiglipVisionModel
|
| 48 |
|
| 49 |
|
|
@@ -55,16 +55,16 @@ if is_flash_attn_2_available():
|
|
| 55 |
|
| 56 |
logger = logging.get_logger(__name__)
|
| 57 |
|
| 58 |
-
_CONFIG_FOR_DOC = "
|
| 59 |
|
| 60 |
-
|
| 61 |
-
"HuggingFaceM4/
|
| 62 |
]
|
| 63 |
|
| 64 |
@dataclass
|
| 65 |
-
class
|
| 66 |
"""
|
| 67 |
-
Base class for
|
| 68 |
|
| 69 |
Args:
|
| 70 |
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
@@ -107,9 +107,9 @@ class Img2HTMLBaseModelOutputWithPast(ModelOutput):
|
|
| 107 |
|
| 108 |
|
| 109 |
@dataclass
|
| 110 |
-
class
|
| 111 |
"""
|
| 112 |
-
Base class for
|
| 113 |
|
| 114 |
Args:
|
| 115 |
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
@@ -162,7 +162,6 @@ def expand_inputs_for_generation(
|
|
| 162 |
input_ids = input_ids.index_select(0, expanded_return_idx)
|
| 163 |
model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None)
|
| 164 |
model_kwargs["image_hidden_states"] = model_kwargs.get("image_hidden_states", None)
|
| 165 |
-
# model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask", None)
|
| 166 |
|
| 167 |
if "token_type_ids" in model_kwargs:
|
| 168 |
token_type_ids = model_kwargs["token_type_ids"]
|
|
@@ -171,11 +170,6 @@ def expand_inputs_for_generation(
|
|
| 171 |
if attention_mask is not None:
|
| 172 |
model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
|
| 173 |
|
| 174 |
-
# if model_kwargs["image_attention_mask"] is not None:
|
| 175 |
-
# model_kwargs["image_attention_mask"] = model_kwargs["image_attention_mask"].index_select(
|
| 176 |
-
# 0, expanded_return_idx
|
| 177 |
-
# )
|
| 178 |
-
|
| 179 |
if model_kwargs["pixel_values"] is not None:
|
| 180 |
model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx)
|
| 181 |
|
|
@@ -203,10 +197,6 @@ def update_model_kwargs_for_generation(outputs, model_kwargs):
|
|
| 203 |
model_kwargs["attention_mask"] = torch.cat(
|
| 204 |
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
|
| 205 |
)
|
| 206 |
-
# if "image_attention_mask" in model_kwargs:
|
| 207 |
-
# image_attention_mask = model_kwargs["image_attention_mask"]
|
| 208 |
-
# last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
|
| 209 |
-
# model_kwargs["image_attention_mask"] = last_mask
|
| 210 |
|
| 211 |
# Get the precomputed image_hidden_states
|
| 212 |
model_kwargs["image_hidden_states"] = outputs.image_hidden_states
|
|
@@ -234,7 +224,6 @@ def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs):
|
|
| 234 |
|
| 235 |
pixel_values = kwargs.get("pixel_values", None)
|
| 236 |
image_hidden_states = kwargs.get("image_hidden_states", None)
|
| 237 |
-
# image_attention_mask = kwargs.get("image_attention_mask", None)
|
| 238 |
|
| 239 |
return {
|
| 240 |
"input_ids": input_ids,
|
|
@@ -245,7 +234,6 @@ def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs):
|
|
| 245 |
"token_type_ids": token_type_ids,
|
| 246 |
"pixel_values": pixel_values,
|
| 247 |
"image_hidden_states": image_hidden_states,
|
| 248 |
-
# "image_attention_mask": image_attention_mask,
|
| 249 |
}
|
| 250 |
|
| 251 |
|
|
@@ -696,7 +684,7 @@ class MistralAttention(nn.Module):
|
|
| 696 |
and "Generating Long Sequences with Sparse Transformers".
|
| 697 |
"""
|
| 698 |
|
| 699 |
-
def __init__(self, config:
|
| 700 |
super().__init__()
|
| 701 |
self.config = config
|
| 702 |
self.hidden_size = config.hidden_size
|
|
@@ -1091,7 +1079,7 @@ class MistralFlashAttention2(MistralAttention):
|
|
| 1091 |
|
| 1092 |
|
| 1093 |
class MistralDecoderLayer(nn.Module):
|
| 1094 |
-
def __init__(self, config:
|
| 1095 |
super().__init__()
|
| 1096 |
self.hidden_size = config.hidden_size
|
| 1097 |
self.self_attn = (
|
|
@@ -1174,7 +1162,7 @@ MISTRAL_START_DOCSTRING = r"""
|
|
| 1174 |
and behavior.
|
| 1175 |
|
| 1176 |
Parameters:
|
| 1177 |
-
config ([`
|
| 1178 |
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 1179 |
load the weights associated with the model, only the configuration. Check out the
|
| 1180 |
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
|
@@ -1186,7 +1174,7 @@ MISTRAL_START_DOCSTRING = r"""
|
|
| 1186 |
MISTRAL_START_DOCSTRING,
|
| 1187 |
)
|
| 1188 |
class VMistralPreTrainedModel(PreTrainedModel):
|
| 1189 |
-
config_class =
|
| 1190 |
base_model_prefix = "model"
|
| 1191 |
supports_gradient_checkpointing = True
|
| 1192 |
_no_split_modules = ["MistralDecoderLayer"]
|
|
@@ -1288,10 +1276,10 @@ class VMistralModel(VMistralPreTrainedModel):
|
|
| 1288 |
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`]
|
| 1289 |
|
| 1290 |
Args:
|
| 1291 |
-
config:
|
| 1292 |
"""
|
| 1293 |
|
| 1294 |
-
def __init__(self, config:
|
| 1295 |
super().__init__(config)
|
| 1296 |
self.config = config
|
| 1297 |
self.padding_idx = config.pad_token_id
|
|
@@ -1435,7 +1423,7 @@ class VMistralModel(VMistralPreTrainedModel):
|
|
| 1435 |
output_attentions: Optional[bool] = None,
|
| 1436 |
output_hidden_states: Optional[bool] = None,
|
| 1437 |
return_dict: Optional[bool] = None,
|
| 1438 |
-
) -> Union[Tuple,
|
| 1439 |
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1440 |
|
| 1441 |
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
@@ -1599,7 +1587,7 @@ class VMistralModel(VMistralPreTrainedModel):
|
|
| 1599 |
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states]
|
| 1600 |
if v is not None
|
| 1601 |
)
|
| 1602 |
-
return
|
| 1603 |
last_hidden_state=hidden_states,
|
| 1604 |
past_key_values=next_cache,
|
| 1605 |
hidden_states=all_hidden_states,
|
|
@@ -1608,7 +1596,7 @@ class VMistralModel(VMistralPreTrainedModel):
|
|
| 1608 |
)
|
| 1609 |
|
| 1610 |
|
| 1611 |
-
class
|
| 1612 |
_tied_weights_keys = ["lm_head.weight"]
|
| 1613 |
|
| 1614 |
def __init__(self, config, vision_model=None):
|
|
@@ -1665,7 +1653,7 @@ class Img2HTMLForVisionText2Text(VMistralPreTrainedModel):
|
|
| 1665 |
output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings
|
| 1666 |
|
| 1667 |
@add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| 1668 |
-
@replace_return_docstrings(output_type=
|
| 1669 |
def forward(
|
| 1670 |
self,
|
| 1671 |
input_ids: torch.LongTensor = None,
|
|
@@ -1680,7 +1668,7 @@ class Img2HTMLForVisionText2Text(VMistralPreTrainedModel):
|
|
| 1680 |
output_attentions: Optional[bool] = None,
|
| 1681 |
output_hidden_states: Optional[bool] = None,
|
| 1682 |
return_dict: Optional[bool] = None,
|
| 1683 |
-
) -> Union[Tuple,
|
| 1684 |
r"""
|
| 1685 |
Args:
|
| 1686 |
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
@@ -1736,7 +1724,7 @@ class Img2HTMLForVisionText2Text(VMistralPreTrainedModel):
|
|
| 1736 |
output = (logits,) + outputs[1:]
|
| 1737 |
return (loss,) + output if loss is not None else output
|
| 1738 |
|
| 1739 |
-
return
|
| 1740 |
loss=loss,
|
| 1741 |
logits=logits,
|
| 1742 |
past_key_values=outputs.past_key_values,
|
|
|
|
| 17 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 18 |
# See the License for the specific language governing permissions and
|
| 19 |
# limitations under the License.
|
| 20 |
+
""" PyTorch VMistral model."""
|
| 21 |
from dataclasses import dataclass
|
| 22 |
import inspect
|
| 23 |
import math
|
|
|
|
| 43 |
from transformers.utils import logging
|
| 44 |
from transformers.modeling_outputs import ModelOutput
|
| 45 |
|
| 46 |
+
from .configuration_vmistral import VMistralConfig
|
| 47 |
from .vision import SiglipVisionModel
|
| 48 |
|
| 49 |
|
|
|
|
| 55 |
|
| 56 |
logger = logging.get_logger(__name__)
|
| 57 |
|
| 58 |
+
_CONFIG_FOR_DOC = "VMistralConfig"
|
| 59 |
|
| 60 |
+
VMistral_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 61 |
+
"HuggingFaceM4/VLM_WebSight_finetuned"
|
| 62 |
]
|
| 63 |
|
| 64 |
@dataclass
|
| 65 |
+
class VMistralBaseModelOutputWithPast(ModelOutput):
|
| 66 |
"""
|
| 67 |
+
Base class for VMistral model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
| 68 |
|
| 69 |
Args:
|
| 70 |
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
| 107 |
|
| 108 |
|
| 109 |
@dataclass
|
| 110 |
+
class VMistralCausalLMOutputWithPast(ModelOutput):
|
| 111 |
"""
|
| 112 |
+
Base class for VMistral causal language model (or autoregressive) outputs.
|
| 113 |
|
| 114 |
Args:
|
| 115 |
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
| 162 |
input_ids = input_ids.index_select(0, expanded_return_idx)
|
| 163 |
model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None)
|
| 164 |
model_kwargs["image_hidden_states"] = model_kwargs.get("image_hidden_states", None)
|
|
|
|
| 165 |
|
| 166 |
if "token_type_ids" in model_kwargs:
|
| 167 |
token_type_ids = model_kwargs["token_type_ids"]
|
|
|
|
| 170 |
if attention_mask is not None:
|
| 171 |
model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
|
| 172 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
if model_kwargs["pixel_values"] is not None:
|
| 174 |
model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx)
|
| 175 |
|
|
|
|
| 197 |
model_kwargs["attention_mask"] = torch.cat(
|
| 198 |
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
|
| 199 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
# Get the precomputed image_hidden_states
|
| 202 |
model_kwargs["image_hidden_states"] = outputs.image_hidden_states
|
|
|
|
| 224 |
|
| 225 |
pixel_values = kwargs.get("pixel_values", None)
|
| 226 |
image_hidden_states = kwargs.get("image_hidden_states", None)
|
|
|
|
| 227 |
|
| 228 |
return {
|
| 229 |
"input_ids": input_ids,
|
|
|
|
| 234 |
"token_type_ids": token_type_ids,
|
| 235 |
"pixel_values": pixel_values,
|
| 236 |
"image_hidden_states": image_hidden_states,
|
|
|
|
| 237 |
}
|
| 238 |
|
| 239 |
|
|
|
|
| 684 |
and "Generating Long Sequences with Sparse Transformers".
|
| 685 |
"""
|
| 686 |
|
| 687 |
+
def __init__(self, config: VMistralConfig, qk_layer_norms: bool = False):
|
| 688 |
super().__init__()
|
| 689 |
self.config = config
|
| 690 |
self.hidden_size = config.hidden_size
|
|
|
|
| 1079 |
|
| 1080 |
|
| 1081 |
class MistralDecoderLayer(nn.Module):
|
| 1082 |
+
def __init__(self, config: VMistralConfig):
|
| 1083 |
super().__init__()
|
| 1084 |
self.hidden_size = config.hidden_size
|
| 1085 |
self.self_attn = (
|
|
|
|
| 1162 |
and behavior.
|
| 1163 |
|
| 1164 |
Parameters:
|
| 1165 |
+
config ([`VMistralConfig`]):
|
| 1166 |
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 1167 |
load the weights associated with the model, only the configuration. Check out the
|
| 1168 |
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
|
|
|
| 1174 |
MISTRAL_START_DOCSTRING,
|
| 1175 |
)
|
| 1176 |
class VMistralPreTrainedModel(PreTrainedModel):
|
| 1177 |
+
config_class = VMistralConfig
|
| 1178 |
base_model_prefix = "model"
|
| 1179 |
supports_gradient_checkpointing = True
|
| 1180 |
_no_split_modules = ["MistralDecoderLayer"]
|
|
|
|
| 1276 |
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`]
|
| 1277 |
|
| 1278 |
Args:
|
| 1279 |
+
config: VMistralConfig
|
| 1280 |
"""
|
| 1281 |
|
| 1282 |
+
def __init__(self, config: VMistralConfig, vision_model=None):
|
| 1283 |
super().__init__(config)
|
| 1284 |
self.config = config
|
| 1285 |
self.padding_idx = config.pad_token_id
|
|
|
|
| 1423 |
output_attentions: Optional[bool] = None,
|
| 1424 |
output_hidden_states: Optional[bool] = None,
|
| 1425 |
return_dict: Optional[bool] = None,
|
| 1426 |
+
) -> Union[Tuple, VMistralBaseModelOutputWithPast]:
|
| 1427 |
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 1428 |
|
| 1429 |
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
|
|
| 1587 |
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states]
|
| 1588 |
if v is not None
|
| 1589 |
)
|
| 1590 |
+
return VMistralBaseModelOutputWithPast(
|
| 1591 |
last_hidden_state=hidden_states,
|
| 1592 |
past_key_values=next_cache,
|
| 1593 |
hidden_states=all_hidden_states,
|
|
|
|
| 1596 |
)
|
| 1597 |
|
| 1598 |
|
| 1599 |
+
class VMistralForVisionText2Text(VMistralPreTrainedModel):
|
| 1600 |
_tied_weights_keys = ["lm_head.weight"]
|
| 1601 |
|
| 1602 |
def __init__(self, config, vision_model=None):
|
|
|
|
| 1653 |
output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings
|
| 1654 |
|
| 1655 |
@add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| 1656 |
+
@replace_return_docstrings(output_type=VMistralCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 1657 |
def forward(
|
| 1658 |
self,
|
| 1659 |
input_ids: torch.LongTensor = None,
|
|
|
|
| 1668 |
output_attentions: Optional[bool] = None,
|
| 1669 |
output_hidden_states: Optional[bool] = None,
|
| 1670 |
return_dict: Optional[bool] = None,
|
| 1671 |
+
) -> Union[Tuple, VMistralCausalLMOutputWithPast]:
|
| 1672 |
r"""
|
| 1673 |
Args:
|
| 1674 |
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
|
|
| 1724 |
output = (logits,) + outputs[1:]
|
| 1725 |
return (loss,) + output if loss is not None else output
|
| 1726 |
|
| 1727 |
+
return VMistralCausalLMOutputWithPast(
|
| 1728 |
loss=loss,
|
| 1729 |
logits=logits,
|
| 1730 |
past_key_values=outputs.past_key_values,
|
processing_idefics.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""
|
| 16 |
+
Processor class for IDEFICS.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
from typing import Callable, List, Optional, Union
|
| 20 |
+
from urllib.parse import urlparse
|
| 21 |
+
|
| 22 |
+
from ...feature_extraction_utils import BatchFeature
|
| 23 |
+
from ...processing_utils import ProcessorMixin
|
| 24 |
+
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
|
| 25 |
+
from ...utils import TensorType, is_torch_available
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
if is_torch_available():
|
| 29 |
+
import torch
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
IMAGE_TOKEN = "<image>"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# copied from m4.training.packing
|
| 36 |
+
def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1):
|
| 37 |
+
# This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]]
|
| 38 |
+
|
| 39 |
+
# If any of images index are more than num_classes, set them to -1.
|
| 40 |
+
# Words after the max number of images allowed have been seen don't attend on anything
|
| 41 |
+
if num_classes != -1:
|
| 42 |
+
incremental_mask[incremental_mask >= num_classes] = -1
|
| 43 |
+
|
| 44 |
+
negatives = incremental_mask == -1
|
| 45 |
+
incremental_mask[negatives] = 0
|
| 46 |
+
attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes)
|
| 47 |
+
attn_mask[negatives, :] = 0
|
| 48 |
+
return attn_mask
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# copied from m4.training.packing
|
| 52 |
+
def image_attention_mask_for_packed_input_ids(input_ids, tokenizer):
|
| 53 |
+
image_attention_mask = torch.full_like(input_ids, fill_value=-1)
|
| 54 |
+
next_image_attention_mask = torch.full_like(input_ids, fill_value=-1)
|
| 55 |
+
image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
|
| 56 |
+
eod_token_id = tokenizer.eos_token_id
|
| 57 |
+
for batch_idx in range(input_ids.size(0)):
|
| 58 |
+
count = -1
|
| 59 |
+
seen_eod = False
|
| 60 |
+
for idx, token_id in enumerate(input_ids[batch_idx]):
|
| 61 |
+
if token_id == image_token_id:
|
| 62 |
+
count += 1
|
| 63 |
+
image_attention_mask[batch_idx][idx] = count
|
| 64 |
+
seen_eod = False
|
| 65 |
+
else:
|
| 66 |
+
image_attention_mask[batch_idx][idx] = count
|
| 67 |
+
|
| 68 |
+
if seen_eod:
|
| 69 |
+
image_attention_mask[batch_idx][idx] = -1
|
| 70 |
+
|
| 71 |
+
if token_id == eod_token_id:
|
| 72 |
+
seen_eod = True
|
| 73 |
+
|
| 74 |
+
for batch_idx in range(input_ids.size(0)):
|
| 75 |
+
count = -1
|
| 76 |
+
seen_eod = False
|
| 77 |
+
for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1):
|
| 78 |
+
token_id = input_ids[batch_idx][idx]
|
| 79 |
+
if token_id == image_token_id:
|
| 80 |
+
count += 1
|
| 81 |
+
next_image_attention_mask[batch_idx][idx] = count
|
| 82 |
+
seen_eod = False
|
| 83 |
+
else:
|
| 84 |
+
next_image_attention_mask[batch_idx][idx] = count
|
| 85 |
+
|
| 86 |
+
if token_id == eod_token_id:
|
| 87 |
+
seen_eod = True
|
| 88 |
+
|
| 89 |
+
if seen_eod:
|
| 90 |
+
next_image_attention_mask[batch_idx][idx] = -1
|
| 91 |
+
|
| 92 |
+
non_negative_indices = next_image_attention_mask[batch_idx] != -1
|
| 93 |
+
next_image_attention_mask[batch_idx][non_negative_indices] -= count
|
| 94 |
+
next_image_attention_mask[batch_idx][non_negative_indices] *= -1
|
| 95 |
+
|
| 96 |
+
return image_attention_mask, next_image_attention_mask
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def is_url(string):
|
| 100 |
+
"""Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
|
| 101 |
+
invalidated the url"""
|
| 102 |
+
if " " in string:
|
| 103 |
+
return False
|
| 104 |
+
result = urlparse(string)
|
| 105 |
+
return all([result.scheme, result.netloc])
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class IdeficsProcessor(ProcessorMixin):
|
| 109 |
+
r"""
|
| 110 |
+
Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
|
| 111 |
+
|
| 112 |
+
[`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
|
| 113 |
+
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
image_processor (`IdeficsImageProcessor`):
|
| 117 |
+
An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
|
| 118 |
+
tokenizer (`LlamaTokenizerFast`):
|
| 119 |
+
An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
|
| 120 |
+
image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image)
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
attributes = ["image_processor", "tokenizer"]
|
| 124 |
+
image_processor_class = "IdeficsImageProcessor"
|
| 125 |
+
tokenizer_class = "LlamaTokenizerFast"
|
| 126 |
+
|
| 127 |
+
def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
|
| 128 |
+
if image_processor is None:
|
| 129 |
+
raise ValueError("You need to specify an `image_processor`.")
|
| 130 |
+
if tokenizer is None:
|
| 131 |
+
raise ValueError("You need to specify a `tokenizer`.")
|
| 132 |
+
|
| 133 |
+
super().__init__(image_processor, tokenizer)
|
| 134 |
+
self.current_processor = self.image_processor
|
| 135 |
+
self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
|
| 136 |
+
|
| 137 |
+
self.default_image_dims = (
|
| 138 |
+
self.image_processor.image_num_channels,
|
| 139 |
+
self.image_processor.image_size,
|
| 140 |
+
self.image_processor.image_size,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
self.tokenizer_was_trained_with_end_of_utterance_token = (
|
| 144 |
+
True
|
| 145 |
+
if "<end_of_utterance>" in self.tokenizer.special_tokens_map.get("additional_special_tokens", [])
|
| 146 |
+
else False
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
def __call__(
|
| 150 |
+
self,
|
| 151 |
+
prompts: Union[List[TextInput], List[List[TextInput]]],
|
| 152 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
| 153 |
+
truncation: Union[bool, str, TruncationStrategy] = None,
|
| 154 |
+
max_length: Optional[int] = None,
|
| 155 |
+
transform: Callable = None,
|
| 156 |
+
add_eos_token=False,
|
| 157 |
+
add_end_of_utterance_token=None,
|
| 158 |
+
debug=False,
|
| 159 |
+
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
| 160 |
+
) -> BatchEncoding:
|
| 161 |
+
"""This method takes batched or non-batched prompts made of text and images and converts them into prompts that
|
| 162 |
+
the model was trained on and prepares the image pixel values for the model to process.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
prompts (`Union[List[TextInput], [List[List[TextInput]]]]`):
|
| 166 |
+
either a single prompt or a batched list of prompts - see the detailed description immediately after
|
| 167 |
+
the end of the arguments doc section.
|
| 168 |
+
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
|
| 169 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding
|
| 170 |
+
index) among:
|
| 171 |
+
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
| 172 |
+
sequence if provided).
|
| 173 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
| 174 |
+
acceptable input length for the model if that argument is not provided.
|
| 175 |
+
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
| 176 |
+
lengths).
|
| 177 |
+
max_length (`int`, *optional*):
|
| 178 |
+
Maximum length of the returned list and optionally padding length (see above).
|
| 179 |
+
truncation (`bool`, *optional*):
|
| 180 |
+
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
|
| 181 |
+
transform (`Callable`, *optional*):
|
| 182 |
+
A custom transform function that accepts a single image can be passed for training. For example,
|
| 183 |
+
`torchvision.Compose` can be used to compose multiple functions. If `None` a preset inference-specific
|
| 184 |
+
set of transforms will be applied to the images
|
| 185 |
+
add_eos_token (`bool`, *optional*, defaults to `False`):
|
| 186 |
+
Adds `eos_token` at the end of the final prompt if True`
|
| 187 |
+
add_end_of_utterance_token (`bool`, *optional*)
|
| 188 |
+
Whether to automatically add `<end_of_utterance>` after each prompt's text input (unless followed by an
|
| 189 |
+
image). If `None` the tokenizer will be checked instead and if this token is found in
|
| 190 |
+
`additional_special_tokens` then the value will be `True`.
|
| 191 |
+
debug (`bool`, *optional*, defaults to `False`):
|
| 192 |
+
`True` value will help debug prompt generation by dumping useful information
|
| 193 |
+
return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
|
| 194 |
+
The type of tensors to return. Can be one of:
|
| 195 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
|
| 199 |
+
directly passed to `model.generate`
|
| 200 |
+
|
| 201 |
+
Detailed explanation:
|
| 202 |
+
|
| 203 |
+
Each entry in `prompts` is either a text to be passed as is or an image that will be processed.
|
| 204 |
+
|
| 205 |
+
An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
|
| 206 |
+
|
| 207 |
+
When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>`
|
| 208 |
+
entry into the prompt.
|
| 209 |
+
|
| 210 |
+
Example:
|
| 211 |
+
|
| 212 |
+
```python
|
| 213 |
+
checkpoint = "HuggingFaceM4/idefics-9b"
|
| 214 |
+
processor = AutoProcessor.from_pretrained(checkpoint)
|
| 215 |
+
url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
|
| 216 |
+
img = processor.image_processor.fetch_images([url])[0]
|
| 217 |
+
|
| 218 |
+
prompts = [
|
| 219 |
+
"User:",
|
| 220 |
+
img,
|
| 221 |
+
"Describe this image.\nAssistant: An image of two kittens in grass.\n",
|
| 222 |
+
"User:",
|
| 223 |
+
"https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
|
| 224 |
+
"Describe this image.\nAssistant:",
|
| 225 |
+
]
|
| 226 |
+
|
| 227 |
+
inputs = processor(prompts, return_tensors="pt")
|
| 228 |
+
generated_ids = model.generate(**inputs, max_length=100)
|
| 229 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 230 |
+
```
|
| 231 |
+
|
| 232 |
+
In this example the `prompts` will be converted into:
|
| 233 |
+
|
| 234 |
+
```
|
| 235 |
+
<s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
|
| 236 |
+
Assistant: An image of two kittens in grass.
|
| 237 |
+
User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
|
| 238 |
+
Assistant:'
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
|
| 242 |
+
`pixel_values` dict entry of the return value.
|
| 243 |
+
|
| 244 |
+
This example also examplifies that images can be passed as objects or as text urls. It can be seen that the
|
| 245 |
+
first image is passed as object and the second one as a url.
|
| 246 |
+
|
| 247 |
+
To do training do:
|
| 248 |
+
|
| 249 |
+
```python
|
| 250 |
+
image_transform = transforms.Compose(
|
| 251 |
+
[
|
| 252 |
+
transforms.RandomResizedCrop(
|
| 253 |
+
(w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
|
| 254 |
+
),
|
| 255 |
+
transforms.ToTensor(),
|
| 256 |
+
transforms.Normalize(mean=self.image_mean, std=self.image_std),
|
| 257 |
+
]
|
| 258 |
+
)
|
| 259 |
+
inputs = processor(prompts, transform=image_transform, return_tensors="pt")
|
| 260 |
+
```
|
| 261 |
+
|
| 262 |
+
In order to help debug prompt generation enable `debug=True` which will show you what's happening.
|
| 263 |
+
|
| 264 |
+
"""
|
| 265 |
+
|
| 266 |
+
# if the value isn't overriden by the user, check if the tokenizer was trained with this token and then use it
|
| 267 |
+
if add_end_of_utterance_token is None:
|
| 268 |
+
add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token
|
| 269 |
+
|
| 270 |
+
# turn non-batched prompts into batched
|
| 271 |
+
if not any(isinstance(i, list) for i in prompts):
|
| 272 |
+
prompts = [prompts]
|
| 273 |
+
|
| 274 |
+
fake_token = "<fake_token_around_image>"
|
| 275 |
+
image_token = "<image>"
|
| 276 |
+
end_of_utterance_token = "<end_of_utterance>"
|
| 277 |
+
|
| 278 |
+
def image_tokens(last_was_image):
|
| 279 |
+
if last_was_image:
|
| 280 |
+
return image_token + fake_token
|
| 281 |
+
else:
|
| 282 |
+
return fake_token + image_token + fake_token
|
| 283 |
+
|
| 284 |
+
all_prompts = []
|
| 285 |
+
all_images = []
|
| 286 |
+
for sample in prompts:
|
| 287 |
+
# the model was trained on samples starting with <s>
|
| 288 |
+
full_text = f"{self.tokenizer.bos_token}"
|
| 289 |
+
|
| 290 |
+
# an image can either be an image object in the item or the url, everything else is a verbatim prompt text
|
| 291 |
+
image_objects = []
|
| 292 |
+
last_was_image = False
|
| 293 |
+
last_was_text = False
|
| 294 |
+
for i, item in enumerate(sample):
|
| 295 |
+
if i > 0:
|
| 296 |
+
last_was_text = True if not last_was_image else False
|
| 297 |
+
|
| 298 |
+
if isinstance(item, str):
|
| 299 |
+
item = item.strip(" ")
|
| 300 |
+
if is_url(item):
|
| 301 |
+
image = self.image_processor.fetch_images(item)
|
| 302 |
+
full_text += image_tokens(last_was_image)
|
| 303 |
+
image_objects.append(image)
|
| 304 |
+
last_was_image = True
|
| 305 |
+
else:
|
| 306 |
+
# we add end_of_utterance_token between each subsequent text prompts (but not at the last one!)
|
| 307 |
+
if add_end_of_utterance_token and last_was_text:
|
| 308 |
+
full_text += end_of_utterance_token
|
| 309 |
+
full_text += item
|
| 310 |
+
last_was_image = False
|
| 311 |
+
else:
|
| 312 |
+
# must be an image obj
|
| 313 |
+
full_text += image_tokens(last_was_image)
|
| 314 |
+
image_objects.append(item)
|
| 315 |
+
last_was_image = True
|
| 316 |
+
|
| 317 |
+
if add_eos_token:
|
| 318 |
+
full_text += self.tokenizer.eos_token
|
| 319 |
+
|
| 320 |
+
if debug is True:
|
| 321 |
+
print(f"{full_text=}")
|
| 322 |
+
|
| 323 |
+
image_objects = self.image_processor(image_objects, transform=transform)
|
| 324 |
+
|
| 325 |
+
all_prompts.append(full_text)
|
| 326 |
+
all_images.append(image_objects)
|
| 327 |
+
|
| 328 |
+
text_encoding = self.tokenizer(
|
| 329 |
+
text=all_prompts,
|
| 330 |
+
add_special_tokens=False,
|
| 331 |
+
padding=padding,
|
| 332 |
+
truncation=truncation,
|
| 333 |
+
max_length=max_length,
|
| 334 |
+
)
|
| 335 |
+
all_texts = text_encoding["input_ids"]
|
| 336 |
+
|
| 337 |
+
max_seq_len = max(len(x) for x in all_texts)
|
| 338 |
+
|
| 339 |
+
# max_num_images has to be at least 1 even when there are no images
|
| 340 |
+
max_num_images = max(len(x) for x in all_images)
|
| 341 |
+
max_num_images = max(1, max_num_images)
|
| 342 |
+
|
| 343 |
+
at_least_one_image = sum(len(x) for x in all_images) > 0
|
| 344 |
+
output_input_ids = []
|
| 345 |
+
output_images = []
|
| 346 |
+
output_attention_masks = []
|
| 347 |
+
for text, images in zip(all_texts, all_images):
|
| 348 |
+
padded_input_ids = [self.tokenizer.pad_token_id] * max_seq_len
|
| 349 |
+
unpadded_seq_len = len(text)
|
| 350 |
+
start = max_seq_len - unpadded_seq_len
|
| 351 |
+
padded_input_ids[start:] = text[:max_seq_len]
|
| 352 |
+
|
| 353 |
+
attention_mask = torch.zeros((max_seq_len,), dtype=torch.long)
|
| 354 |
+
attention_mask[start:] = 1
|
| 355 |
+
|
| 356 |
+
image_count = padded_input_ids.count(self.image_token_id)
|
| 357 |
+
local_max_num_images = min(image_count, max_num_images)
|
| 358 |
+
|
| 359 |
+
current_images = images[:local_max_num_images]
|
| 360 |
+
|
| 361 |
+
if len(current_images) > 0:
|
| 362 |
+
padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:])
|
| 363 |
+
padded_image_tensor[: current_images.size(0)] = current_images
|
| 364 |
+
else:
|
| 365 |
+
padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims)
|
| 366 |
+
|
| 367 |
+
output_images.append(padded_image_tensor)
|
| 368 |
+
output_input_ids.append(torch.tensor(padded_input_ids))
|
| 369 |
+
|
| 370 |
+
output_attention_masks.append(attention_mask)
|
| 371 |
+
|
| 372 |
+
output_input_ids = torch.stack(output_input_ids)
|
| 373 |
+
output_images = torch.stack(output_images)
|
| 374 |
+
output_attention_masks = torch.stack(output_attention_masks)
|
| 375 |
+
|
| 376 |
+
if at_least_one_image:
|
| 377 |
+
image_attention_mask, _ = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer)
|
| 378 |
+
image_attention_mask = incremental_to_binary_attention_mask(
|
| 379 |
+
image_attention_mask, num_classes=max_num_images
|
| 380 |
+
)
|
| 381 |
+
else:
|
| 382 |
+
# in full language mode we set the image mask to all-0s
|
| 383 |
+
image_attention_mask = torch.zeros(
|
| 384 |
+
output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
return BatchFeature(
|
| 388 |
+
data={
|
| 389 |
+
"input_ids": output_input_ids,
|
| 390 |
+
"attention_mask": output_attention_masks,
|
| 391 |
+
"pixel_values": output_images,
|
| 392 |
+
"image_attention_mask": image_attention_mask,
|
| 393 |
+
}
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
def batch_decode(self, *args, **kwargs):
|
| 397 |
+
"""
|
| 398 |
+
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
| 399 |
+
refer to the docstring of this method for more information.
|
| 400 |
+
"""
|
| 401 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
| 402 |
+
|
| 403 |
+
def decode(self, *args, **kwargs):
|
| 404 |
+
"""
|
| 405 |
+
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
| 406 |
+
the docstring of this method for more information.
|
| 407 |
+
"""
|
| 408 |
+
return self.tokenizer.decode(*args, **kwargs)
|
| 409 |
+
|
| 410 |
+
@property
|
| 411 |
+
def model_input_names(self):
|
| 412 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
| 413 |
+
image_processor_input_names = self.image_processor.model_input_names
|
| 414 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|