Changyao commited on
Commit
20f5bc9
·
verified ·
1 Parent(s): 7d37e36

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 92544,
3
+ "</img>": 92547,
4
+ "</quad>": 92549,
5
+ "</ref>": 92551,
6
+ "<IMG_CONTEXT>": 92546,
7
+ "<IMG_FRAME_BREAK>": 92555,
8
+ "<IMG_LINE_BREAK>": 92554,
9
+ "<box>": 92545,
10
+ "<img>": 92548,
11
+ "<img_uncond>": 92553,
12
+ "<quad>": 92550,
13
+ "<ref>": 92552
14
+ }
config.json ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "anyres_image_size": true,
4
+ "architectures": [
5
+ "NaViL"
6
+ ],
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_navil_chat.NaViLChatConfig",
9
+ "AutoModel": "modeling_navil_chat.NaViL",
10
+ "AutoModelForCausalLM": "modeling_navil_chat.NaViL"
11
+ },
12
+ "downsample_ratio": 0.5,
13
+ "force_image_size": 32,
14
+ "llm_config": {
15
+ "_name_or_path": "./pretrained/internlm2-1_8b",
16
+ "add_cross_attention": false,
17
+ "architectures": [
18
+ "InternLM2VEForCausalLM"
19
+ ],
20
+ "attn_implementation": "flash_attention_2",
21
+ "bad_words_ids": null,
22
+ "begin_suppress_tokens": null,
23
+ "bias": false,
24
+ "bos_token_id": 1,
25
+ "chunk_size_feed_forward": 0,
26
+ "cross_attention_hidden_size": null,
27
+ "decoder_start_token_id": null,
28
+ "diversity_penalty": 0.0,
29
+ "do_sample": false,
30
+ "early_stopping": false,
31
+ "encoder_no_repeat_ngram_size": 0,
32
+ "eos_token_id": 2,
33
+ "exponential_decay_length_penalty": null,
34
+ "finetuning_task": null,
35
+ "forced_bos_token_id": null,
36
+ "forced_eos_token_id": null,
37
+ "hidden_act": "silu",
38
+ "hidden_size": 2048,
39
+ "id2label": {
40
+ "0": "LABEL_0",
41
+ "1": "LABEL_1"
42
+ },
43
+ "initializer_range": 0.02,
44
+ "intermediate_size": 8192,
45
+ "is_decoder": false,
46
+ "is_encoder_decoder": false,
47
+ "label2id": {
48
+ "LABEL_0": 0,
49
+ "LABEL_1": 1
50
+ },
51
+ "length_penalty": 1.0,
52
+ "max_length": 20,
53
+ "max_position_embeddings": 32768,
54
+ "min_length": 0,
55
+ "model_type": "internlm2",
56
+ "mrope_section": [
57
+ 16,
58
+ 24,
59
+ 24
60
+ ],
61
+ "no_repeat_ngram_size": 0,
62
+ "num_attention_heads": 16,
63
+ "num_beam_groups": 1,
64
+ "num_beams": 1,
65
+ "num_hidden_layers": 24,
66
+ "num_key_value_heads": 8,
67
+ "num_return_sequences": 1,
68
+ "output_attentions": false,
69
+ "output_hidden_states": false,
70
+ "output_scores": false,
71
+ "pad_token_id": 2,
72
+ "prefix": null,
73
+ "pretraining_tp": 1,
74
+ "problem_type": null,
75
+ "pruned_heads": {},
76
+ "remove_invalid_values": false,
77
+ "repetition_penalty": 1.0,
78
+ "return_dict": true,
79
+ "return_dict_in_generate": false,
80
+ "rms_norm_eps": 1e-05,
81
+ "rope_scaling": null,
82
+ "rope_theta": 1000000,
83
+ "sep_token_id": null,
84
+ "suppress_tokens": null,
85
+ "task_specific_params": null,
86
+ "temperature": 1.0,
87
+ "tf_legacy_loss": false,
88
+ "tie_encoder_decoder": false,
89
+ "tie_word_embeddings": false,
90
+ "tokenizer_class": null,
91
+ "top_k": 50,
92
+ "top_p": 1.0,
93
+ "torch_dtype": "bfloat16",
94
+ "torchscript": false,
95
+ "transformers_version": "4.45.0",
96
+ "typical_p": 1.0,
97
+ "use_bfloat16": false,
98
+ "use_cache": false,
99
+ "use_mrope": false,
100
+ "vocab_size": 92556
101
+ },
102
+ "max_dynamic_patch": 24576,
103
+ "min_dynamic_patch": 256,
104
+ "model_type": "navil_chat",
105
+ "pad2square": false,
106
+ "ps_version": "v2",
107
+ "scale_downsample_ratio": 0.7071,
108
+ "select_layer": -1,
109
+ "template": "internlm2-chat",
110
+ "torch_dtype": "bfloat16",
111
+ "transformers_version": null,
112
+ "use_backbone_lora": 0,
113
+ "use_llm_lora": 0,
114
+ "vision_config": {
115
+ "_name_or_path": "",
116
+ "add_cross_attention": false,
117
+ "architectures": [
118
+ "NaViLVisionModelAnyRes"
119
+ ],
120
+ "attention_dropout": 0.0,
121
+ "bad_words_ids": null,
122
+ "begin_suppress_tokens": null,
123
+ "bos_token_id": null,
124
+ "chunk_size_feed_forward": 0,
125
+ "cross_attention_hidden_size": null,
126
+ "decoder_start_token_id": null,
127
+ "diversity_penalty": 0.0,
128
+ "do_sample": false,
129
+ "downsample_ratio": 0.5,
130
+ "drop_path_rate": 0.0,
131
+ "dropout": 0.0,
132
+ "early_stopping": false,
133
+ "encoder_no_repeat_ngram_size": 0,
134
+ "eos_token_id": null,
135
+ "exponential_decay_length_penalty": null,
136
+ "finetuning_task": null,
137
+ "forced_bos_token_id": null,
138
+ "forced_eos_token_id": null,
139
+ "fullatt_block_indexes": null,
140
+ "hidden_act": "gelu",
141
+ "hidden_size": 1280,
142
+ "id2label": {
143
+ "0": "LABEL_0",
144
+ "1": "LABEL_1"
145
+ },
146
+ "image_size": 32,
147
+ "initializer_factor": 1.0,
148
+ "initializer_range": 0.02,
149
+ "intermediate_size": 5120,
150
+ "is_decoder": false,
151
+ "is_encoder_decoder": false,
152
+ "label2id": {
153
+ "LABEL_0": 0,
154
+ "LABEL_1": 1
155
+ },
156
+ "layer_norm_eps": 1e-06,
157
+ "length_penalty": 1.0,
158
+ "max_length": 20,
159
+ "min_length": 0,
160
+ "model_type": "navil_vit",
161
+ "no_repeat_ngram_size": 0,
162
+ "norm_type": "layer_norm",
163
+ "num_attention_heads": 20,
164
+ "num_beam_groups": 1,
165
+ "num_beams": 1,
166
+ "num_channels": 3,
167
+ "num_hidden_layers": 32,
168
+ "num_return_sequences": 1,
169
+ "output_attentions": false,
170
+ "output_hidden_states": false,
171
+ "output_scores": false,
172
+ "pad_token_id": null,
173
+ "patch_size": 16,
174
+ "prefix": null,
175
+ "problem_type": null,
176
+ "pruned_heads": {},
177
+ "qk_normalization": false,
178
+ "qkv_bias": true,
179
+ "remove_invalid_values": false,
180
+ "repetition_penalty": 1.0,
181
+ "return_dict": true,
182
+ "return_dict_in_generate": false,
183
+ "sep_token_id": null,
184
+ "suppress_tokens": null,
185
+ "task_specific_params": null,
186
+ "temperature": 1.0,
187
+ "tf_legacy_loss": false,
188
+ "tie_encoder_decoder": false,
189
+ "tie_word_embeddings": true,
190
+ "tokenizer_class": null,
191
+ "top_k": 50,
192
+ "top_p": 1.0,
193
+ "torch_dtype": "bfloat16",
194
+ "torchscript": false,
195
+ "transformers_version": "4.45.0",
196
+ "typical_p": 1.0,
197
+ "use_bfloat16": true,
198
+ "use_flash_attn": true,
199
+ "vision_fullatt_block_indexes": null,
200
+ "window_size": 8
201
+ }
202
+ }
configuration_internlm2.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ InternLM2 model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ # Modified from transformers.model.llama.configuration_llama.LlamaConfig
27
+ class InternLM2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
30
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32000):
39
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`InternLM2Model`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 11008):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
56
+ `num_attention_heads`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
70
+ Whether to tie weight embeddings
71
+ Example:
72
+
73
+ """
74
+ model_type = 'internlm2'
75
+ _auto_class = 'AutoConfig'
76
+
77
+ def __init__( # pylint: disable=W0102
78
+ self,
79
+ vocab_size=103168,
80
+ hidden_size=4096,
81
+ intermediate_size=11008,
82
+ num_hidden_layers=32,
83
+ num_attention_heads=32,
84
+ num_key_value_heads=None,
85
+ hidden_act='silu',
86
+ max_position_embeddings=2048,
87
+ initializer_range=0.02,
88
+ rms_norm_eps=1e-6,
89
+ use_cache=True,
90
+ pad_token_id=0,
91
+ bos_token_id=1,
92
+ eos_token_id=2,
93
+ tie_word_embeddings=False,
94
+ bias=True,
95
+ rope_theta=10000,
96
+ rope_scaling=None,
97
+ attn_implementation='eager',
98
+ skip_final_norm_weight=False,
99
+ **kwargs,
100
+ ):
101
+ self.vocab_size = vocab_size
102
+ self.max_position_embeddings = max_position_embeddings
103
+ self.hidden_size = hidden_size
104
+ self.intermediate_size = intermediate_size
105
+ self.num_hidden_layers = num_hidden_layers
106
+ self.num_attention_heads = num_attention_heads
107
+ self.bias = bias
108
+
109
+ if num_key_value_heads is None:
110
+ num_key_value_heads = num_attention_heads
111
+ self.num_key_value_heads = num_key_value_heads
112
+
113
+ self.hidden_act = hidden_act
114
+ self.initializer_range = initializer_range
115
+ self.rms_norm_eps = rms_norm_eps
116
+ self.use_cache = use_cache
117
+ self.rope_theta = rope_theta
118
+ self.rope_scaling = rope_scaling
119
+ self._rope_scaling_validation()
120
+
121
+ self.attn_implementation = attn_implementation
122
+ if self.attn_implementation is None:
123
+ self.attn_implementation = 'eager'
124
+
125
+ self.skip_final_norm_weight = skip_final_norm_weight
126
+ super().__init__(
127
+ pad_token_id=pad_token_id,
128
+ bos_token_id=bos_token_id,
129
+ eos_token_id=eos_token_id,
130
+ tie_word_embeddings=tie_word_embeddings,
131
+ **kwargs,
132
+ )
133
+
134
+ def _rope_scaling_validation(self):
135
+ """
136
+ Validate the `rope_scaling` configuration.
137
+ """
138
+ if self.rope_scaling is None:
139
+ return
140
+
141
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
142
+ raise ValueError(
143
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
144
+ f'got {self.rope_scaling}'
145
+ )
146
+ rope_scaling_type = self.rope_scaling.get('type', None)
147
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
148
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
149
+ raise ValueError(
150
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
151
+ )
152
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
153
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
154
+
155
+
156
+
157
+ class InternLM2VEConfig(InternLM2Config):
158
+
159
+ model_type = 'internlm2ve'
160
+ _auto_class = 'AutoConfig'
161
+
162
+ def __init__( # pylint: disable=W0102
163
+ self,
164
+ vocab_size=103168,
165
+ hidden_size=4096,
166
+ intermediate_size=11008,
167
+ num_hidden_layers=32,
168
+ num_attention_heads=32,
169
+ num_key_value_heads=None,
170
+ hidden_act='silu',
171
+ max_position_embeddings=2048,
172
+ initializer_range=0.02,
173
+ rms_norm_eps=1e-6,
174
+ use_cache=True,
175
+ pad_token_id=0,
176
+ bos_token_id=1,
177
+ eos_token_id=2,
178
+ tie_word_embeddings=False,
179
+ bias=True,
180
+ rope_theta=10000,
181
+ rope_scaling=None,
182
+ attn_implementation='eager',
183
+ **kwargs,
184
+ ):
185
+ super().__init__(
186
+ vocab_size=vocab_size,
187
+ hidden_size=hidden_size,
188
+ intermediate_size=intermediate_size,
189
+ num_hidden_layers=num_hidden_layers,
190
+ num_attention_heads=num_attention_heads,
191
+ num_key_value_heads=num_key_value_heads,
192
+ hidden_act=hidden_act,
193
+ max_position_embeddings=max_position_embeddings,
194
+ initializer_range=initializer_range,
195
+ rms_norm_eps=rms_norm_eps,
196
+ use_cache=use_cache,
197
+ pad_token_id=pad_token_id,
198
+ bos_token_id=bos_token_id,
199
+ eos_token_id=eos_token_id,
200
+ tie_word_embeddings=tie_word_embeddings,
201
+ bias=bias,
202
+ rope_theta=rope_theta,
203
+ rope_scaling=rope_scaling,
204
+ attn_implementation=attn_implementation,
205
+ **kwargs,
206
+ )
configuration_navil_chat.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # NaViL
3
+ # Copyright (c) 2025 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from transformers import AutoConfig, LlamaConfig
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ from .configuration_navil_vit import NaViLVisionConfig
14
+
15
+ from .configuration_internlm2 import InternLM2Config
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+
20
+ class NaViLChatConfig(PretrainedConfig):
21
+ model_type = 'navil_chat'
22
+ is_composition = True
23
+
24
+ def __init__(
25
+ self,
26
+ vision_config=None,
27
+ llm_config=None,
28
+ use_backbone_lora=0,
29
+ use_llm_lora=0,
30
+ pad2square=False,
31
+ select_layer=-1,
32
+ force_image_size=None,
33
+ downsample_ratio=0.5,
34
+ template=None,
35
+ anyres_image_size=True,
36
+ scale_downsample_ratio=0.7071,
37
+ ps_version='v1',
38
+ min_dynamic_patch=256,
39
+ max_dynamic_patch=24576,
40
+ **kwargs):
41
+ super().__init__(**kwargs)
42
+
43
+ if vision_config is None:
44
+ vision_config = {}
45
+ logger.info('vision_config is None. Initializing the NaViLVisionConfig with default values.')
46
+
47
+ if llm_config is None:
48
+ llm_config = {'architectures': ['InternLM2VEForCausalLM']}
49
+ logger.info('llm_config is None. Initializing the llm_config with default values (`InternLM2VEForCausalLM`).')
50
+
51
+ self.vision_config = NaViLVisionConfig(**vision_config)
52
+ self.vision_config.downsample_ratio = downsample_ratio
53
+ if llm_config['architectures'][0] == 'InternLM2VEForCausalLM':
54
+ self.llm_config = InternLM2Config(**llm_config)
55
+ else:
56
+ raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
57
+ self.use_backbone_lora = use_backbone_lora
58
+ self.use_llm_lora = use_llm_lora
59
+ self.pad2square = pad2square
60
+ self.select_layer = select_layer
61
+ self.force_image_size = force_image_size
62
+ self.downsample_ratio = downsample_ratio
63
+ self.template = template
64
+
65
+ self.anyres_image_size = anyres_image_size
66
+ self.scale_downsample_ratio = scale_downsample_ratio
67
+ self.ps_version = ps_version # pixel shuffle version
68
+ self.min_dynamic_patch = min_dynamic_patch
69
+ self.max_dynamic_patch = max_dynamic_patch
70
+
71
+ logger.info(f'vision_select_layer: {self.select_layer}')
72
+ logger.info(f'ps_version: {self.ps_version}')
73
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
74
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
75
+
76
+ def to_dict(self):
77
+ """
78
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
79
+
80
+ Returns:
81
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
82
+ """
83
+ output = copy.deepcopy(self.__dict__)
84
+ output['vision_config'] = self.vision_config.to_dict()
85
+ output['llm_config'] = self.llm_config.to_dict()
86
+ output['model_type'] = self.__class__.model_type
87
+ output['use_backbone_lora'] = self.use_backbone_lora
88
+ output['use_llm_lora'] = self.use_llm_lora
89
+ output['pad2square'] = self.pad2square
90
+ output['select_layer'] = self.select_layer
91
+ output['force_image_size'] = self.force_image_size
92
+ output['downsample_ratio'] = self.downsample_ratio
93
+ output['template'] = self.template
94
+
95
+ output['anyres_image_size'] = self.anyres_image_size
96
+ output['scale_downsample_ratio'] = self.scale_downsample_ratio
97
+ output['ps_version'] = self.ps_version
98
+ output['min_dynamic_patch'] = self.min_dynamic_patch
99
+ output['max_dynamic_patch'] = self.max_dynamic_patch
100
+
101
+ return output
configuration_navil_vit.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # NaViL
3
+ # Copyright (c) 2025 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ from typing import Union
9
+
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class NaViLVisionConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of a [`InternVisionModelAnyRes`]. It is used to
19
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
20
+
21
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
22
+ documentation from [`PretrainedConfig`] for more information.
23
+
24
+ Args:
25
+ num_channels (`int`, *optional*, defaults to 3):
26
+ Number of color channels in the input images (e.g., 3 for RGB).
27
+ patch_size (`int`, *optional*, defaults to 14):
28
+ The size (resolution) of each patch.
29
+ image_size (`int`, *optional*, defaults to 224):
30
+ The size (resolution) of each image.
31
+ qkv_bias (`bool`, *optional*, defaults to `False`):
32
+ Whether to add a bias to the queries and values in the self-attention layers.
33
+ hidden_size (`int`, *optional*, defaults to 3200):
34
+ Dimensionality of the encoder layers and the pooler layer.
35
+ num_attention_heads (`int`, *optional*, defaults to 25):
36
+ Number of attention heads for each attention layer in the Transformer encoder.
37
+ intermediate_size (`int`, *optional*, defaults to 12800):
38
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
39
+ qk_normalization (`bool`, *optional*, defaults to `True`):
40
+ Whether to normalize the queries and keys in the self-attention layers.
41
+ num_hidden_layers (`int`, *optional*, defaults to 48):
42
+ Number of hidden layers in the Transformer encoder.
43
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
44
+ Whether to use flash attention mechanism.
45
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
46
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
47
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
48
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
49
+ The epsilon used by the layer normalization layers.
50
+ dropout (`float`, *optional*, defaults to 0.0):
51
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
52
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
53
+ Dropout rate for stochastic depth.
54
+ attention_dropout (`float`, *optional*, defaults to 0.0):
55
+ The dropout ratio for the attention probabilities.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ initializer_factor (`float`, *optional*, defaults to 0.1):
59
+ A factor for layer scale.
60
+ """
61
+
62
+ model_type = 'navil_vit'
63
+
64
+ def __init__(
65
+ self,
66
+ num_channels=3,
67
+ patch_size=14,
68
+ image_size=224,
69
+ qkv_bias=False,
70
+ hidden_size=3200,
71
+ num_attention_heads=25,
72
+ intermediate_size=12800,
73
+ qk_normalization=True,
74
+ num_hidden_layers=48,
75
+ use_flash_attn=True,
76
+ hidden_act='gelu',
77
+ norm_type='rms_norm',
78
+ layer_norm_eps=1e-6,
79
+ dropout=0.0,
80
+ drop_path_rate=0.0,
81
+ attention_dropout=0.0,
82
+ initializer_range=0.02,
83
+ initializer_factor=0.1,
84
+ downsample_ratio=1.0,
85
+ fullatt_block_indexes=None,
86
+ window_size=8,
87
+ **kwargs,
88
+ ):
89
+ super().__init__(**kwargs)
90
+
91
+ self.hidden_size = hidden_size
92
+ self.intermediate_size = intermediate_size
93
+ self.dropout = dropout
94
+ self.drop_path_rate = drop_path_rate
95
+ self.num_hidden_layers = num_hidden_layers
96
+ self.num_attention_heads = num_attention_heads
97
+ self.num_channels = num_channels
98
+ self.patch_size = patch_size
99
+ self.image_size = image_size
100
+ self.initializer_range = initializer_range
101
+ self.initializer_factor = initializer_factor
102
+ self.attention_dropout = attention_dropout
103
+ self.layer_norm_eps = layer_norm_eps
104
+ self.hidden_act = hidden_act
105
+ self.norm_type = norm_type
106
+ self.qkv_bias = qkv_bias
107
+ self.qk_normalization = qk_normalization
108
+ self.use_flash_attn = use_flash_attn
109
+ self.downsample_ratio = downsample_ratio
110
+ self.fullatt_block_indexes = fullatt_block_indexes
111
+ self.window_size = window_size
112
+
113
+ @classmethod
114
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
115
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
116
+
117
+ if 'vision_config' in config_dict:
118
+ config_dict = config_dict['vision_config']
119
+
120
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
121
+ logger.warning(
122
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
123
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
124
+ )
125
+
126
+ return cls.from_dict(config_dict, **kwargs)
constants.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IMG_CONTEXT_TOKEN = '<IMG_CONTEXT>'
2
+ IMG_START_TOKEN = '<img>'
3
+ IMG_END_TOKEN = '</img>'
4
+ IMG_LINE_BREAK_TOKEN = '<IMG_LINE_BREAK>'
5
+ IMG_FRAME_BREAK_TOKEN = '<IMG_FRAME_BREAK>'
6
+ QUAD_START_TOKEN = '<quad>'
7
+ QUAD_END_TOKEN = '</quad>'
8
+ REF_START_TOKEN = '<ref>'
9
+ REF_END_TOKEN = '</ref>'
10
+ BOX_START_TOKEN = '<box>'
11
+ BOX_END_TOKEN = '</box>'
12
+
13
+ IMG_UNCOND_TOKEN = '<img_uncond>'
14
+
15
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
16
+ IMAGENET_STD = (0.229, 0.224, 0.225)
17
+ CLIP_MEAN = (0.4814546, 0.4578275, 0.40821073)
18
+ CLIP_STD = (0.2686295, 0.2613025, 0.2757711)
19
+ SIGLIP_MEAN = (0.5, 0.5, 0.5)
20
+ SIGLIP_STD = (0.5, 0.5, 0.5)
21
+ VAE_MEAN = (0.5, 0.5, 0.5)
22
+ VAE_STD = (0.5, 0.5, 0.5)
23
+
24
+ SPECIAL_TOKEN_LIST = [
25
+ BOX_END_TOKEN, BOX_START_TOKEN,
26
+ IMG_CONTEXT_TOKEN, IMG_END_TOKEN,
27
+ IMG_START_TOKEN, QUAD_END_TOKEN,
28
+ QUAD_START_TOKEN, REF_END_TOKEN,
29
+ REF_START_TOKEN, IMG_UNCOND_TOKEN,
30
+ IMG_LINE_BREAK_TOKEN, IMG_FRAME_BREAK_TOKEN,
31
+ ]
conversation.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation prompt templates.
3
+
4
+ We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
+ If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
6
+ """
7
+
8
+ import dataclasses
9
+ from enum import IntEnum, auto
10
+ from typing import Any, Dict, List, Tuple, Union
11
+
12
+
13
+ class SeparatorStyle(IntEnum):
14
+ """Separator styles."""
15
+
16
+ ADD_COLON_SINGLE = auto()
17
+ ADD_COLON_TWO = auto()
18
+ ADD_COLON_SPACE_SINGLE = auto()
19
+ NO_COLON_SINGLE = auto()
20
+ NO_COLON_TWO = auto()
21
+ ADD_NEW_LINE_SINGLE = auto()
22
+ LLAMA2 = auto()
23
+ CHATGLM = auto()
24
+ CHATML = auto()
25
+ CHATINTERN = auto()
26
+ DOLLY = auto()
27
+ RWKV = auto()
28
+ PHOENIX = auto()
29
+ ROBIN = auto()
30
+ FALCON_CHAT = auto()
31
+ CHATGLM3 = auto()
32
+ INTERNVL_ZH = auto()
33
+ MPT = auto()
34
+
35
+
36
+ @dataclasses.dataclass
37
+ class Conversation:
38
+ """A class that manages prompt templates and keeps all conversation history."""
39
+
40
+ # The name of this template
41
+ name: str
42
+ # The template of the system prompt
43
+ system_template: str = '{system_message}'
44
+ # The system message
45
+ system_message: str = ''
46
+ # The names of two roles
47
+ roles: Tuple[str] = ('USER', 'ASSISTANT')
48
+ # All messages. Each item is (role, message).
49
+ messages: List[List[str]] = ()
50
+ # The number of few shot examples
51
+ offset: int = 0
52
+ # The separator style and configurations
53
+ sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
54
+ sep: str = '\n'
55
+ sep2: str = None
56
+ # Stop criteria (the default one is EOS token)
57
+ stop_str: Union[str, List[str]] = None
58
+ # Stops generation if meeting any token in this list
59
+ stop_token_ids: List[int] = None
60
+
61
+ def get_prompt(self) -> str:
62
+ """Get the prompt for generation."""
63
+ system_prompt = self.system_template.format(system_message=self.system_message)
64
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
65
+ ret = system_prompt + self.sep
66
+ for role, message in self.messages:
67
+ if message:
68
+ ret += role + ': ' + message + self.sep
69
+ else:
70
+ ret += role + ':'
71
+ return ret
72
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
73
+ seps = [self.sep, self.sep2]
74
+ ret = system_prompt + seps[0]
75
+ for i, (role, message) in enumerate(self.messages):
76
+ if message:
77
+ ret += role + ': ' + message + seps[i % 2]
78
+ else:
79
+ ret += role + ':'
80
+ return ret
81
+ elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
82
+ ret = system_prompt + self.sep
83
+ for role, message in self.messages:
84
+ if message:
85
+ ret += role + ': ' + message + self.sep
86
+ else:
87
+ ret += role + ': ' # must be end with a space
88
+ return ret
89
+ elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
90
+ ret = '' if system_prompt == '' else system_prompt + self.sep
91
+ for role, message in self.messages:
92
+ if message:
93
+ ret += role + '\n' + message + self.sep
94
+ else:
95
+ ret += role + '\n'
96
+ return ret
97
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
98
+ ret = system_prompt
99
+ for role, message in self.messages:
100
+ if message:
101
+ ret += role + message + self.sep
102
+ else:
103
+ ret += role
104
+ return ret
105
+ elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
106
+ seps = [self.sep, self.sep2]
107
+ ret = system_prompt
108
+ for i, (role, message) in enumerate(self.messages):
109
+ if message:
110
+ ret += role + message + seps[i % 2]
111
+ else:
112
+ ret += role
113
+ return ret
114
+ elif self.sep_style == SeparatorStyle.RWKV:
115
+ ret = system_prompt
116
+ for i, (role, message) in enumerate(self.messages):
117
+ if message:
118
+ ret += (
119
+ role
120
+ + ': '
121
+ + message.replace('\r\n', '\n').replace('\n\n', '\n')
122
+ )
123
+ ret += '\n\n'
124
+ else:
125
+ ret += role + ':'
126
+ return ret
127
+ elif self.sep_style == SeparatorStyle.LLAMA2:
128
+ seps = [self.sep, self.sep2]
129
+ if self.system_message:
130
+ ret = system_prompt
131
+ else:
132
+ ret = '[INST] '
133
+ for i, (role, message) in enumerate(self.messages):
134
+ tag = self.roles[i % 2]
135
+ if message:
136
+ if i == 0:
137
+ ret += message + ' '
138
+ else:
139
+ ret += tag + ' ' + message + seps[i % 2]
140
+ else:
141
+ ret += tag
142
+ return ret
143
+ elif self.sep_style == SeparatorStyle.CHATGLM:
144
+ # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
145
+ # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
146
+ round_add_n = 1 if self.name == 'chatglm2' else 0
147
+ if system_prompt:
148
+ ret = system_prompt + self.sep
149
+ else:
150
+ ret = ''
151
+
152
+ for i, (role, message) in enumerate(self.messages):
153
+ if i % 2 == 0:
154
+ ret += f'[Round {i//2 + round_add_n}]{self.sep}'
155
+
156
+ if message:
157
+ ret += f'{role}:{message}{self.sep}'
158
+ else:
159
+ ret += f'{role}:'
160
+ return ret
161
+ elif self.sep_style == SeparatorStyle.CHATML:
162
+ ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
163
+ for role, message in self.messages:
164
+ if message:
165
+ ret += role + '\n' + message + self.sep + '\n'
166
+ else:
167
+ ret += role + '\n'
168
+ return ret
169
+ elif self.sep_style == SeparatorStyle.CHATGLM3:
170
+ ret = ''
171
+ if self.system_message:
172
+ ret += system_prompt
173
+ for role, message in self.messages:
174
+ if message:
175
+ ret += role + '\n' + ' ' + message
176
+ else:
177
+ ret += role
178
+ return ret
179
+ elif self.sep_style == SeparatorStyle.CHATINTERN:
180
+ # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
181
+ seps = [self.sep, self.sep2]
182
+ ret = system_prompt
183
+ for i, (role, message) in enumerate(self.messages):
184
+ # if i % 2 == 0:
185
+ # ret += "<s>"
186
+ if message:
187
+ ret += role + ':' + message + seps[i % 2] + '\n'
188
+ else:
189
+ ret += role + ':'
190
+ return ret
191
+ elif self.sep_style == SeparatorStyle.DOLLY:
192
+ seps = [self.sep, self.sep2]
193
+ ret = system_prompt
194
+ for i, (role, message) in enumerate(self.messages):
195
+ if message:
196
+ ret += role + ':\n' + message + seps[i % 2]
197
+ if i % 2 == 1:
198
+ ret += '\n\n'
199
+ else:
200
+ ret += role + ':\n'
201
+ return ret
202
+ elif self.sep_style == SeparatorStyle.PHOENIX:
203
+ ret = system_prompt
204
+ for role, message in self.messages:
205
+ if message:
206
+ ret += role + ': ' + '<s>' + message + '</s>'
207
+ else:
208
+ ret += role + ': ' + '<s>'
209
+ return ret
210
+ elif self.sep_style == SeparatorStyle.ROBIN:
211
+ ret = system_prompt + self.sep
212
+ for role, message in self.messages:
213
+ if message:
214
+ ret += role + ':\n' + message + self.sep
215
+ else:
216
+ ret += role + ':\n'
217
+ return ret
218
+ elif self.sep_style == SeparatorStyle.FALCON_CHAT:
219
+ ret = ''
220
+ if self.system_message:
221
+ ret += system_prompt + self.sep
222
+ for role, message in self.messages:
223
+ if message:
224
+ ret += role + ': ' + message + self.sep
225
+ else:
226
+ ret += role + ':'
227
+
228
+ return ret
229
+ elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
230
+ seps = [self.sep2, self.sep]
231
+ ret = self.system_message + seps[0]
232
+ for i, (role, message) in enumerate(self.messages):
233
+ if message:
234
+ ret += role + ': ' + message + seps[i % 2]
235
+ else:
236
+ ret += role + ':'
237
+ return ret
238
+ elif self.sep_style == SeparatorStyle.MPT:
239
+ ret = system_prompt + self.sep
240
+ for role, message in self.messages:
241
+ if message:
242
+ if type(message) is tuple:
243
+ message, _, _ = message
244
+ ret += role + message + self.sep
245
+ else:
246
+ ret += role
247
+ return ret
248
+ else:
249
+ raise ValueError(f'Invalid style: {self.sep_style}')
250
+
251
+ def set_system_message(self, system_message: str):
252
+ """Set the system message."""
253
+ self.system_message = system_message
254
+
255
+ def append_message(self, role: str, message: str):
256
+ """Append a new message."""
257
+ self.messages.append([role, message])
258
+
259
+ def update_last_message(self, message: str):
260
+ """Update the last output.
261
+
262
+ The last message is typically set to be None when constructing the prompt,
263
+ so we need to update it in-place after getting the response from a model.
264
+ """
265
+ self.messages[-1][1] = message
266
+
267
+ def to_gradio_chatbot(self):
268
+ """Convert the conversation to gradio chatbot format."""
269
+ ret = []
270
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
271
+ if i % 2 == 0:
272
+ ret.append([msg, None])
273
+ else:
274
+ ret[-1][-1] = msg
275
+ return ret
276
+
277
+ def to_openai_api_messages(self):
278
+ """Convert the conversation to OpenAI chat completion format."""
279
+ ret = [{'role': 'system', 'content': self.system_message}]
280
+
281
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
282
+ if i % 2 == 0:
283
+ ret.append({'role': 'user', 'content': msg})
284
+ else:
285
+ if msg is not None:
286
+ ret.append({'role': 'assistant', 'content': msg})
287
+ return ret
288
+
289
+ def copy(self):
290
+ return Conversation(
291
+ name=self.name,
292
+ system_template=self.system_template,
293
+ system_message=self.system_message,
294
+ roles=self.roles,
295
+ messages=[[x, y] for x, y in self.messages],
296
+ offset=self.offset,
297
+ sep_style=self.sep_style,
298
+ sep=self.sep,
299
+ sep2=self.sep2,
300
+ stop_str=self.stop_str,
301
+ stop_token_ids=self.stop_token_ids,
302
+ )
303
+
304
+ def dict(self):
305
+ return {
306
+ 'template_name': self.name,
307
+ 'system_message': self.system_message,
308
+ 'roles': self.roles,
309
+ 'messages': self.messages,
310
+ 'offset': self.offset,
311
+ }
312
+
313
+
314
+ # A global registry for all conversation templates
315
+ conv_templates: Dict[str, Conversation] = {}
316
+
317
+
318
+ def register_conv_template(template: Conversation, override: bool = False):
319
+ """Register a new conversation template."""
320
+ if not override:
321
+ assert (
322
+ template.name not in conv_templates
323
+ ), f'{template.name} has been registered.'
324
+
325
+ conv_templates[template.name] = template
326
+
327
+
328
+ def get_conv_template(name: str) -> Conversation:
329
+ """Get a conversation template."""
330
+ return conv_templates[name].copy()
331
+
332
+
333
+ # InternVL-Chat-V1-1 template
334
+ register_conv_template(
335
+ Conversation(
336
+ name='internvl_zh',
337
+ system_template='',
338
+ roles=('<human>', '<bot>'),
339
+ sep_style=SeparatorStyle.INTERNVL_ZH,
340
+ sep='</s>',
341
+ sep2=' ',
342
+ )
343
+ )
344
+
345
+
346
+ # Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
347
+ # is that during training, the preprocessing function for the Hermes-2 template doesn't add
348
+ # <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
349
+ # Therefore, they are completely equivalent during inference.
350
+ register_conv_template(
351
+ Conversation(
352
+ name='Hermes-2',
353
+ system_template='<|im_start|>system\n{system_message}',
354
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
355
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
356
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
357
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
358
+ sep_style=SeparatorStyle.MPT,
359
+ sep='<|im_end|>',
360
+ stop_token_ids=[
361
+ 2,
362
+ 6,
363
+ 7,
364
+ 8,
365
+ ],
366
+ stop_str='<|endoftext|>',
367
+ )
368
+ )
369
+
370
+ register_conv_template(
371
+ Conversation(
372
+ name='Hermes-2-imgen',
373
+ system_template='<|im_start|>system\n{system_message}',
374
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
375
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
376
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
377
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
378
+ sep_style=SeparatorStyle.MPT,
379
+ sep='<|im_end|>',
380
+ stop_token_ids=[
381
+ 2,
382
+ 6,
383
+ 7,
384
+ 8,
385
+ ],
386
+ stop_str='<|endoftext|>',
387
+ )
388
+ )
389
+
390
+ register_conv_template(
391
+ Conversation(
392
+ name='internlm2-chat',
393
+ system_template='<|im_start|>system\n{system_message}',
394
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
395
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
396
+ system_message='你���由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
397
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
398
+ sep_style=SeparatorStyle.MPT,
399
+ sep='<|im_end|>',
400
+ stop_token_ids=[
401
+ 2,
402
+ 92543,
403
+ 92542
404
+ ]
405
+ )
406
+ )
407
+
408
+ register_conv_template(
409
+ Conversation(
410
+ name='internlm2-chat-imgen',
411
+ system_template='<|im_start|>system\n{system_message}',
412
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
413
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
414
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
415
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
416
+ sep_style=SeparatorStyle.MPT,
417
+ sep='<|im_end|>',
418
+ stop_token_ids=[
419
+ 2,
420
+ 92543,
421
+ 92542
422
+ ]
423
+ )
424
+ )
425
+
426
+ register_conv_template(
427
+ Conversation(
428
+ name='phi3-chat',
429
+ system_template='<|system|>\n{system_message}',
430
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
431
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
432
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
433
+ roles=('<|user|>\n', '<|assistant|>\n'),
434
+ sep_style=SeparatorStyle.MPT,
435
+ sep='<|end|>',
436
+ stop_token_ids=[
437
+ 2,
438
+ 32000,
439
+ 32007
440
+ ]
441
+ )
442
+ )
443
+
444
+ register_conv_template(
445
+ Conversation(
446
+ name='qwen3-chat',
447
+ system_template='<|im_start|>system\n{system_message}',
448
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
449
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室及多家合作单位联合开发的多模态大语言模型。',
450
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
451
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n<think>\n\n</think>\n\n'),
452
+ sep_style=SeparatorStyle.MPT,
453
+ sep='<|im_end|>',
454
+ stop_token_ids=[
455
+ 2,
456
+ 92543,
457
+ 92542
458
+ ]
459
+ )
460
+ )
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.45.0"
4
+ }
image_processing_qwen2_vl.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """Image processor class for Qwen2-VL."""
21
+
22
+ import math
23
+ from typing import Dict, List, Optional, Union
24
+
25
+ import numpy as np
26
+
27
+ from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
28
+ from transformers.image_transforms import (
29
+ convert_to_rgb,
30
+ resize,
31
+ to_channel_dimension_format,
32
+ pad
33
+ )
34
+ from transformers.image_utils import (
35
+ OPENAI_CLIP_MEAN,
36
+ OPENAI_CLIP_STD,
37
+ ChannelDimension,
38
+ ImageInput,
39
+ PILImageResampling,
40
+ VideoInput,
41
+ get_image_size,
42
+ infer_channel_dimension_format,
43
+ is_scaled_image,
44
+ is_valid_image,
45
+ make_list_of_images,
46
+ to_numpy_array,
47
+ valid_images,
48
+ validate_preprocess_arguments,
49
+ )
50
+ from transformers.utils import TensorType, is_vision_available, logging
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+
56
+ if is_vision_available():
57
+ from PIL import Image
58
+
59
+
60
+ def make_batched_images(images) -> List[List[ImageInput]]:
61
+ """
62
+ Accepts images in list or nested list format, and makes a list of images for preprocessing.
63
+
64
+ Args:
65
+ images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):
66
+ The input image.
67
+
68
+ Returns:
69
+ list: A list of images.
70
+ """
71
+ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):
72
+ return [img for img_list in images for img in img_list]
73
+
74
+ elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):
75
+ return images
76
+
77
+ elif is_valid_image(images):
78
+ return [images]
79
+
80
+ raise ValueError(f"Could not make batched images from {images}")
81
+
82
+
83
+ # Copied from transformers.models.llava_next_video.image_processing_llava_next_video.make_batched_videos
84
+ def make_batched_videos(videos) -> List[VideoInput]:
85
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
86
+ return videos
87
+
88
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
89
+ if isinstance(videos[0], Image.Image):
90
+ return [videos]
91
+ elif len(videos[0].shape) == 4:
92
+ return [list(video) for video in videos]
93
+
94
+ elif is_valid_image(videos) and len(videos.shape) == 4:
95
+ return [list(videos)]
96
+
97
+ raise ValueError(f"Could not make batched video from {videos}")
98
+
99
+
100
+ def smart_resize(
101
+ height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280
102
+ ):
103
+ """Rescales the image so that the following conditions are met:
104
+
105
+ 1. Both dimensions (height and width) are divisible by 'factor'.
106
+
107
+ 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
108
+
109
+ 3. The aspect ratio of the image is maintained as closely as possible.
110
+
111
+ """
112
+ if height < factor or width < factor:
113
+ raise ValueError(f"height:{height} or width:{width} must be larger than factor:{factor}")
114
+ elif max(height, width) / min(height, width) > 200:
115
+ raise ValueError(
116
+ f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
117
+ )
118
+ h_bar = round(height / factor) * factor
119
+ w_bar = round(width / factor) * factor
120
+ if h_bar * w_bar > max_pixels:
121
+ beta = math.sqrt((height * width) / max_pixels)
122
+ h_bar = math.floor(height / beta / factor) * factor
123
+ w_bar = math.floor(width / beta / factor) * factor
124
+ elif h_bar * w_bar < min_pixels:
125
+ beta = math.sqrt(min_pixels / (height * width))
126
+ h_bar = math.ceil(height * beta / factor) * factor
127
+ w_bar = math.ceil(width * beta / factor) * factor
128
+ return h_bar, w_bar
129
+
130
+
131
+ class Qwen2VLImageProcessor(BaseImageProcessor):
132
+ r"""
133
+ Constructs a Qwen2-VL image processor that dynamically resizes images based on the original images.
134
+
135
+ Args:
136
+ do_resize (`bool`, *optional*, defaults to `True`):
137
+ Whether to resize the image's (height, width) dimensions.
138
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
139
+ Resampling filter to use when resizing the image.
140
+ do_rescale (`bool`, *optional*, defaults to `True`):
141
+ Whether to rescale the image by the specified scale `rescale_factor`.
142
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
143
+ Scale factor to use if rescaling the image.
144
+ do_normalize (`bool`, *optional*, defaults to `True`):
145
+ Whether to normalize the image.
146
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
147
+ Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
148
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
149
+ Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
150
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
151
+ Whether to convert the image to RGB.
152
+ min_pixels (`int`, *optional*, defaults to `56 * 56`):
153
+ The min pixels of the image to resize the image.
154
+ max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
155
+ The max pixels of the image to resize the image.
156
+ patch_size (`int`, *optional*, defaults to 14):
157
+ The spacial patch size of the vision encoder.
158
+ temporal_patch_size (`int`, *optional*, defaults to 2):
159
+ The temporal patch size of the vision encoder.
160
+ merge_size (`int`, *optional*, defaults to 2):
161
+ The merge size of the vision encoder to llm encoder.
162
+ """
163
+
164
+ model_input_names = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw"]
165
+
166
+ def __init__(
167
+ self,
168
+ do_resize: bool = True,
169
+ do_pad: bool = False,
170
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
171
+ do_rescale: bool = True,
172
+ rescale_factor: Union[int, float] = 1 / 255,
173
+ do_normalize: bool = True,
174
+ image_mean: Optional[Union[float, List[float]]] = None,
175
+ image_std: Optional[Union[float, List[float]]] = None,
176
+ do_convert_rgb: bool = True,
177
+ min_pixels: int = 56 * 56,
178
+ max_pixels: int = 28 * 28 * 1280,
179
+ patch_size: int = 14,
180
+ temporal_patch_size: int = 2,
181
+ merge_size: int = 2,
182
+ **kwargs,
183
+ ) -> None:
184
+ super().__init__(**kwargs)
185
+ self.do_resize = do_resize
186
+ self.do_pad = do_pad
187
+ self.resample = resample
188
+ self.do_rescale = do_rescale
189
+ self.rescale_factor = rescale_factor
190
+ self.do_normalize = do_normalize
191
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
192
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
193
+ self.min_pixels = min_pixels
194
+ self.max_pixels = max_pixels
195
+ self.patch_size = patch_size
196
+ self.temporal_patch_size = temporal_patch_size
197
+ self.merge_size = merge_size
198
+ self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels}
199
+ self.do_convert_rgb = do_convert_rgb
200
+
201
+ def _preprocess(
202
+ self,
203
+ images: Union[ImageInput, VideoInput],
204
+ do_resize: bool = None,
205
+ do_pad: bool = None,
206
+ resample: PILImageResampling = None,
207
+ do_rescale: bool = None,
208
+ rescale_factor: float = None,
209
+ do_normalize: bool = None,
210
+ image_mean: Optional[Union[float, List[float]]] = None,
211
+ image_std: Optional[Union[float, List[float]]] = None,
212
+ do_convert_rgb: bool = None,
213
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
214
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
215
+ min_pixels: int = None,
216
+ max_pixels: int = None,
217
+ ):
218
+ """
219
+ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
220
+
221
+ Args:
222
+ images (`ImageInput`):
223
+ Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
224
+ vision_info (`List[Dict]`, *optional*):
225
+ Optional list of dictionaries containing additional information about vision inputs.
226
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
227
+ Whether to resize the image.
228
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
229
+ Whether to pad the image.
230
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
231
+ Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
232
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
233
+ Whether to rescale the image.
234
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
235
+ Scale factor to use if rescaling the image.
236
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
237
+ Whether to normalize the image.
238
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
239
+ Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
240
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
241
+ Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
242
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
243
+ Whether to convert the image to RGB.
244
+ data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
245
+ The channel dimension format for the output image. Can be one of:
246
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
247
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
248
+ - Unset: Use the channel dimension format of the input image.
249
+ input_data_format (`ChannelDimension` or `str`, *optional*):
250
+ The channel dimension format for the input image. Can be one of:
251
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
252
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
253
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
254
+ """
255
+
256
+ min_pixels = min_pixels if min_pixels is not None else self.min_pixels
257
+ max_pixels = max_pixels if max_pixels is not None else self.max_pixels
258
+
259
+ images = make_list_of_images(images)
260
+
261
+ if do_convert_rgb:
262
+ images = [convert_to_rgb(image) for image in images]
263
+
264
+ # All transformations expect numpy arrays.
265
+ images = [to_numpy_array(image) for image in images]
266
+
267
+ if is_scaled_image(images[0]) and do_rescale:
268
+ logger.warning_once(
269
+ "It looks like you are trying to rescale already rescaled images. If the input"
270
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
271
+ )
272
+ if input_data_format is None:
273
+ # We assume that all images have the same channel dimension format.
274
+ input_data_format = infer_channel_dimension_format(images[0])
275
+
276
+ assert not (do_resize and do_pad), "Only one of `do_resize` and `do_pad` can be set to `True`."
277
+
278
+ height, width = get_image_size(images[0], channel_dim=input_data_format)
279
+ resized_height, resized_width = height, width
280
+ processed_images = []
281
+ for image in images:
282
+ if do_resize:
283
+ resized_height, resized_width = smart_resize(
284
+ height,
285
+ width,
286
+ factor=self.patch_size * self.merge_size,
287
+ min_pixels=min_pixels,
288
+ max_pixels=max_pixels,
289
+ )
290
+ image = resize(
291
+ image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
292
+ )
293
+ elif do_pad:
294
+ # 1. resize the image s.t. the total number of pixels is within the range [min_pixels, max_pixels] while maintaining the aspect ratio
295
+ resized_height, resized_width = smart_resize(
296
+ height,
297
+ width,
298
+ factor=1,
299
+ min_pixels=min_pixels,
300
+ max_pixels=max_pixels,
301
+ )
302
+ image = resize(
303
+ image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
304
+ )
305
+ # 2. pad the image to the nearest multiple of patch_size * merge_size
306
+ pad_height = math.ceil(resized_height / (self.patch_size * self.merge_size)) * self.patch_size * self.merge_size
307
+ pad_width = math.ceil(resized_width / (self.patch_size * self.merge_size)) * self.patch_size * self.merge_size
308
+ image = pad(
309
+ image,
310
+ padding=((0, pad_height - resized_height), (0, pad_width - resized_width)),
311
+ constant_values=0,
312
+ input_data_format=input_data_format,
313
+ data_format=input_data_format,
314
+ )
315
+ resized_height, resized_width = pad_height, pad_width
316
+
317
+ if do_rescale:
318
+ image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
319
+
320
+ if do_normalize:
321
+ image = self.normalize(
322
+ image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
323
+ )
324
+
325
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
326
+ processed_images.append(image)
327
+
328
+ patches = np.array(processed_images)
329
+ if data_format == ChannelDimension.LAST:
330
+ patches = patches.transpose(0, 3, 1, 2)
331
+ if patches.shape[0] == 1:
332
+ patches = np.tile(patches, (self.temporal_patch_size, 1, 1, 1))
333
+ channel = patches.shape[1]
334
+ grid_t = patches.shape[0] // self.temporal_patch_size
335
+ grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
336
+ patches = patches.reshape(
337
+ grid_t,
338
+ self.temporal_patch_size,
339
+ channel,
340
+ grid_h // self.merge_size,
341
+ self.merge_size,
342
+ self.patch_size,
343
+ grid_w // self.merge_size,
344
+ self.merge_size,
345
+ self.patch_size,
346
+ )
347
+ patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)
348
+ flatten_patches = patches.reshape(
349
+ grid_t * grid_h * grid_w, channel * self.temporal_patch_size * self.patch_size * self.patch_size
350
+ )
351
+
352
+ return flatten_patches, (grid_t, grid_h, grid_w)
353
+
354
+ def preprocess(
355
+ self,
356
+ images: ImageInput,
357
+ videos: VideoInput = None,
358
+ do_resize: bool = None,
359
+ do_pad: bool = None,
360
+ size: Dict[str, int] = None,
361
+ resample: PILImageResampling = None,
362
+ do_rescale: bool = None,
363
+ rescale_factor: float = None,
364
+ do_normalize: bool = None,
365
+ image_mean: Optional[Union[float, List[float]]] = None,
366
+ image_std: Optional[Union[float, List[float]]] = None,
367
+ do_convert_rgb: bool = None,
368
+ return_tensors: Optional[Union[str, TensorType]] = None,
369
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
370
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
371
+ min_pixels: int = None,
372
+ max_pixels: int = None,
373
+ ):
374
+ """
375
+ Args:
376
+ images (`ImageInput`):
377
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
378
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
379
+ videos (`VideoInput`):
380
+ Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If
381
+ passing in videos with pixel values between 0 and 1, set `do_rescale=False`.
382
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
383
+ Whether to resize the image.
384
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
385
+ Whether to pad the image.
386
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
387
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
388
+ the longest edge resized to keep the input aspect ratio.
389
+ resample (`int`, *optional*, defaults to `self.resample`):
390
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
391
+ has an effect if `do_resize` is set to `True`.
392
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
393
+ Whether to rescale the image.
394
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
395
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
396
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
397
+ Whether to normalize the image.
398
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
399
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
400
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
401
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
402
+ `True`.
403
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
404
+ Whether to convert the image to RGB.
405
+ return_tensors (`str` or `TensorType`, *optional*):
406
+ The type of tensors to return. Can be one of:
407
+ - Unset: Return a list of `np.ndarray`.
408
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
409
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
410
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
411
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
412
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
413
+ The channel dimension format for the output image. Can be one of:
414
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
415
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
416
+ - Unset: Use the channel dimension format of the input image.
417
+ input_data_format (`ChannelDimension` or `str`, *optional*):
418
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
419
+ from the input image. Can be one of:
420
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
421
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
422
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
423
+
424
+ """
425
+ do_resize = do_resize if do_resize is not None else self.do_resize
426
+ do_pad = do_pad if do_pad is not None else self.do_pad
427
+ size = size if size is not None else self.size
428
+ resample = resample if resample is not None else self.resample
429
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
430
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
431
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
432
+ image_mean = image_mean if image_mean is not None else self.image_mean
433
+ image_std = image_std if image_std is not None else self.image_std
434
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
435
+ min_pixels = min_pixels if min_pixels is not None else self.min_pixels
436
+ max_pixels = max_pixels if max_pixels is not None else self.max_pixels
437
+
438
+ if images is not None:
439
+ images = make_batched_images(images)
440
+ if videos is not None:
441
+ videos = make_batched_videos(videos)
442
+
443
+ if images is not None and not valid_images(images):
444
+ raise ValueError(
445
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
446
+ "torch.Tensor, tf.Tensor or jax.ndarray."
447
+ )
448
+
449
+ validate_preprocess_arguments(
450
+ rescale_factor=rescale_factor,
451
+ do_normalize=do_normalize,
452
+ image_mean=image_mean,
453
+ image_std=image_std,
454
+ do_resize=do_resize,
455
+ # do_pad=do_pad,
456
+ size=size,
457
+ resample=resample,
458
+ )
459
+
460
+ if images is not None:
461
+ pixel_values, vision_grid_thws = [], []
462
+ for image in images:
463
+ patches, image_grid_thw = self._preprocess(
464
+ image,
465
+ do_resize=do_resize,
466
+ resample=resample,
467
+ do_rescale=do_rescale,
468
+ do_pad=do_pad,
469
+ rescale_factor=rescale_factor,
470
+ do_normalize=do_normalize,
471
+ image_mean=image_mean,
472
+ image_std=image_std,
473
+ data_format=data_format,
474
+ do_convert_rgb=do_convert_rgb,
475
+ input_data_format=input_data_format,
476
+ min_pixels=min_pixels,
477
+ max_pixels=max_pixels,
478
+ )
479
+ pixel_values.extend(patches)
480
+ vision_grid_thws.append(image_grid_thw)
481
+ pixel_values = np.array(pixel_values)
482
+ vision_grid_thws = np.array(vision_grid_thws)
483
+ data = {"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws}
484
+
485
+ if videos is not None:
486
+ pixel_values, vision_grid_thws = [], []
487
+ for images in videos:
488
+ patches, video_grid_thw = self._preprocess(
489
+ images,
490
+ do_resize=do_resize,
491
+ do_pad=do_pad,
492
+ resample=resample,
493
+ do_rescale=do_rescale,
494
+ rescale_factor=rescale_factor,
495
+ do_normalize=do_normalize,
496
+ image_mean=image_mean,
497
+ image_std=image_std,
498
+ data_format=data_format,
499
+ do_convert_rgb=do_convert_rgb,
500
+ input_data_format=input_data_format,
501
+ min_pixels=min_pixels,
502
+ max_pixels=max_pixels,
503
+ )
504
+ pixel_values.extend(patches)
505
+ vision_grid_thws.append(video_grid_thw)
506
+ pixel_values = np.array(pixel_values)
507
+ vision_grid_thws = np.array(vision_grid_thws)
508
+ data = {"pixel_values_videos": pixel_values, "video_grid_thw": vision_grid_thws}
509
+
510
+ return BatchFeature(data=data, tensor_type=return_tensors)
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07cbf3b5f7f8d5dc7bcb45b8e41b2e59cb621ffa72ddfba8c581c7391429855b
3
+ size 4996221656
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cabd50a86eabf5b2520ca3817f866ac848dd7bce02bc49af9206ce1b0f3758d
3
+ size 3093013512
model.safetensors.index.json ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 8089143808
4
+ },
5
+ "weight_map": {
6
+ "language_model.model.layers.0.attention.wo.weight": "model-00001-of-00002.safetensors",
7
+ "language_model.model.layers.0.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
8
+ "language_model.model.layers.0.attention.wqkv.weight": "model-00001-of-00002.safetensors",
9
+ "language_model.model.layers.0.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
10
+ "language_model.model.layers.0.attention_norm.weight": "model-00001-of-00002.safetensors",
11
+ "language_model.model.layers.0.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
12
+ "language_model.model.layers.0.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
13
+ "language_model.model.layers.0.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
14
+ "language_model.model.layers.0.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
15
+ "language_model.model.layers.0.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
16
+ "language_model.model.layers.0.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
17
+ "language_model.model.layers.0.ffn_norm.weight": "model-00001-of-00002.safetensors",
18
+ "language_model.model.layers.1.attention.wo.weight": "model-00001-of-00002.safetensors",
19
+ "language_model.model.layers.1.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
20
+ "language_model.model.layers.1.attention.wqkv.weight": "model-00001-of-00002.safetensors",
21
+ "language_model.model.layers.1.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
22
+ "language_model.model.layers.1.attention_norm.weight": "model-00001-of-00002.safetensors",
23
+ "language_model.model.layers.1.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
24
+ "language_model.model.layers.1.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
25
+ "language_model.model.layers.1.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
26
+ "language_model.model.layers.1.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
27
+ "language_model.model.layers.1.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
28
+ "language_model.model.layers.1.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
29
+ "language_model.model.layers.1.ffn_norm.weight": "model-00001-of-00002.safetensors",
30
+ "language_model.model.layers.10.attention.wo.weight": "model-00001-of-00002.safetensors",
31
+ "language_model.model.layers.10.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
32
+ "language_model.model.layers.10.attention.wqkv.weight": "model-00001-of-00002.safetensors",
33
+ "language_model.model.layers.10.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
34
+ "language_model.model.layers.10.attention_norm.weight": "model-00001-of-00002.safetensors",
35
+ "language_model.model.layers.10.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
36
+ "language_model.model.layers.10.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
37
+ "language_model.model.layers.10.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
38
+ "language_model.model.layers.10.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
39
+ "language_model.model.layers.10.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
40
+ "language_model.model.layers.10.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
41
+ "language_model.model.layers.10.ffn_norm.weight": "model-00001-of-00002.safetensors",
42
+ "language_model.model.layers.11.attention.wo.weight": "model-00001-of-00002.safetensors",
43
+ "language_model.model.layers.11.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
44
+ "language_model.model.layers.11.attention.wqkv.weight": "model-00001-of-00002.safetensors",
45
+ "language_model.model.layers.11.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
46
+ "language_model.model.layers.11.attention_norm.weight": "model-00001-of-00002.safetensors",
47
+ "language_model.model.layers.11.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
48
+ "language_model.model.layers.11.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
49
+ "language_model.model.layers.11.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
50
+ "language_model.model.layers.11.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
51
+ "language_model.model.layers.11.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
52
+ "language_model.model.layers.11.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
53
+ "language_model.model.layers.11.ffn_norm.weight": "model-00001-of-00002.safetensors",
54
+ "language_model.model.layers.12.attention.wo.weight": "model-00001-of-00002.safetensors",
55
+ "language_model.model.layers.12.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
56
+ "language_model.model.layers.12.attention.wqkv.weight": "model-00001-of-00002.safetensors",
57
+ "language_model.model.layers.12.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
58
+ "language_model.model.layers.12.attention_norm.weight": "model-00001-of-00002.safetensors",
59
+ "language_model.model.layers.12.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
60
+ "language_model.model.layers.12.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
61
+ "language_model.model.layers.12.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
62
+ "language_model.model.layers.12.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
63
+ "language_model.model.layers.12.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
64
+ "language_model.model.layers.12.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
65
+ "language_model.model.layers.12.ffn_norm.weight": "model-00001-of-00002.safetensors",
66
+ "language_model.model.layers.13.attention.wo.weight": "model-00001-of-00002.safetensors",
67
+ "language_model.model.layers.13.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
68
+ "language_model.model.layers.13.attention.wqkv.weight": "model-00001-of-00002.safetensors",
69
+ "language_model.model.layers.13.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
70
+ "language_model.model.layers.13.attention_norm.weight": "model-00002-of-00002.safetensors",
71
+ "language_model.model.layers.13.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
72
+ "language_model.model.layers.13.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
73
+ "language_model.model.layers.13.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
74
+ "language_model.model.layers.13.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
75
+ "language_model.model.layers.13.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
76
+ "language_model.model.layers.13.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
77
+ "language_model.model.layers.13.ffn_norm.weight": "model-00002-of-00002.safetensors",
78
+ "language_model.model.layers.14.attention.wo.weight": "model-00002-of-00002.safetensors",
79
+ "language_model.model.layers.14.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
80
+ "language_model.model.layers.14.attention.wqkv.weight": "model-00002-of-00002.safetensors",
81
+ "language_model.model.layers.14.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
82
+ "language_model.model.layers.14.attention_norm.weight": "model-00002-of-00002.safetensors",
83
+ "language_model.model.layers.14.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
84
+ "language_model.model.layers.14.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
85
+ "language_model.model.layers.14.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
86
+ "language_model.model.layers.14.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
87
+ "language_model.model.layers.14.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
88
+ "language_model.model.layers.14.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
89
+ "language_model.model.layers.14.ffn_norm.weight": "model-00002-of-00002.safetensors",
90
+ "language_model.model.layers.15.attention.wo.weight": "model-00002-of-00002.safetensors",
91
+ "language_model.model.layers.15.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
92
+ "language_model.model.layers.15.attention.wqkv.weight": "model-00002-of-00002.safetensors",
93
+ "language_model.model.layers.15.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
94
+ "language_model.model.layers.15.attention_norm.weight": "model-00002-of-00002.safetensors",
95
+ "language_model.model.layers.15.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
96
+ "language_model.model.layers.15.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
97
+ "language_model.model.layers.15.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
98
+ "language_model.model.layers.15.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
99
+ "language_model.model.layers.15.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
100
+ "language_model.model.layers.15.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
101
+ "language_model.model.layers.15.ffn_norm.weight": "model-00002-of-00002.safetensors",
102
+ "language_model.model.layers.16.attention.wo.weight": "model-00002-of-00002.safetensors",
103
+ "language_model.model.layers.16.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
104
+ "language_model.model.layers.16.attention.wqkv.weight": "model-00002-of-00002.safetensors",
105
+ "language_model.model.layers.16.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
106
+ "language_model.model.layers.16.attention_norm.weight": "model-00002-of-00002.safetensors",
107
+ "language_model.model.layers.16.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
108
+ "language_model.model.layers.16.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
109
+ "language_model.model.layers.16.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
110
+ "language_model.model.layers.16.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
111
+ "language_model.model.layers.16.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
112
+ "language_model.model.layers.16.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
113
+ "language_model.model.layers.16.ffn_norm.weight": "model-00002-of-00002.safetensors",
114
+ "language_model.model.layers.17.attention.wo.weight": "model-00002-of-00002.safetensors",
115
+ "language_model.model.layers.17.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
116
+ "language_model.model.layers.17.attention.wqkv.weight": "model-00002-of-00002.safetensors",
117
+ "language_model.model.layers.17.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
118
+ "language_model.model.layers.17.attention_norm.weight": "model-00002-of-00002.safetensors",
119
+ "language_model.model.layers.17.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
120
+ "language_model.model.layers.17.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
121
+ "language_model.model.layers.17.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
122
+ "language_model.model.layers.17.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
123
+ "language_model.model.layers.17.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
124
+ "language_model.model.layers.17.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
125
+ "language_model.model.layers.17.ffn_norm.weight": "model-00002-of-00002.safetensors",
126
+ "language_model.model.layers.18.attention.wo.weight": "model-00002-of-00002.safetensors",
127
+ "language_model.model.layers.18.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
128
+ "language_model.model.layers.18.attention.wqkv.weight": "model-00002-of-00002.safetensors",
129
+ "language_model.model.layers.18.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
130
+ "language_model.model.layers.18.attention_norm.weight": "model-00002-of-00002.safetensors",
131
+ "language_model.model.layers.18.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
132
+ "language_model.model.layers.18.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
133
+ "language_model.model.layers.18.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
134
+ "language_model.model.layers.18.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
135
+ "language_model.model.layers.18.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
136
+ "language_model.model.layers.18.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
137
+ "language_model.model.layers.18.ffn_norm.weight": "model-00002-of-00002.safetensors",
138
+ "language_model.model.layers.19.attention.wo.weight": "model-00002-of-00002.safetensors",
139
+ "language_model.model.layers.19.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
140
+ "language_model.model.layers.19.attention.wqkv.weight": "model-00002-of-00002.safetensors",
141
+ "language_model.model.layers.19.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
142
+ "language_model.model.layers.19.attention_norm.weight": "model-00002-of-00002.safetensors",
143
+ "language_model.model.layers.19.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
144
+ "language_model.model.layers.19.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
145
+ "language_model.model.layers.19.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
146
+ "language_model.model.layers.19.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
147
+ "language_model.model.layers.19.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
148
+ "language_model.model.layers.19.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
149
+ "language_model.model.layers.19.ffn_norm.weight": "model-00002-of-00002.safetensors",
150
+ "language_model.model.layers.2.attention.wo.weight": "model-00001-of-00002.safetensors",
151
+ "language_model.model.layers.2.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
152
+ "language_model.model.layers.2.attention.wqkv.weight": "model-00001-of-00002.safetensors",
153
+ "language_model.model.layers.2.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
154
+ "language_model.model.layers.2.attention_norm.weight": "model-00001-of-00002.safetensors",
155
+ "language_model.model.layers.2.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
156
+ "language_model.model.layers.2.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
157
+ "language_model.model.layers.2.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
158
+ "language_model.model.layers.2.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
159
+ "language_model.model.layers.2.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
160
+ "language_model.model.layers.2.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
161
+ "language_model.model.layers.2.ffn_norm.weight": "model-00001-of-00002.safetensors",
162
+ "language_model.model.layers.20.attention.wo.weight": "model-00002-of-00002.safetensors",
163
+ "language_model.model.layers.20.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
164
+ "language_model.model.layers.20.attention.wqkv.weight": "model-00002-of-00002.safetensors",
165
+ "language_model.model.layers.20.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
166
+ "language_model.model.layers.20.attention_norm.weight": "model-00002-of-00002.safetensors",
167
+ "language_model.model.layers.20.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
168
+ "language_model.model.layers.20.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
169
+ "language_model.model.layers.20.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
170
+ "language_model.model.layers.20.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
171
+ "language_model.model.layers.20.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
172
+ "language_model.model.layers.20.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
173
+ "language_model.model.layers.20.ffn_norm.weight": "model-00002-of-00002.safetensors",
174
+ "language_model.model.layers.21.attention.wo.weight": "model-00002-of-00002.safetensors",
175
+ "language_model.model.layers.21.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
176
+ "language_model.model.layers.21.attention.wqkv.weight": "model-00002-of-00002.safetensors",
177
+ "language_model.model.layers.21.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
178
+ "language_model.model.layers.21.attention_norm.weight": "model-00002-of-00002.safetensors",
179
+ "language_model.model.layers.21.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
180
+ "language_model.model.layers.21.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
181
+ "language_model.model.layers.21.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
182
+ "language_model.model.layers.21.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
183
+ "language_model.model.layers.21.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
184
+ "language_model.model.layers.21.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
185
+ "language_model.model.layers.21.ffn_norm.weight": "model-00002-of-00002.safetensors",
186
+ "language_model.model.layers.22.attention.wo.weight": "model-00002-of-00002.safetensors",
187
+ "language_model.model.layers.22.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
188
+ "language_model.model.layers.22.attention.wqkv.weight": "model-00002-of-00002.safetensors",
189
+ "language_model.model.layers.22.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
190
+ "language_model.model.layers.22.attention_norm.weight": "model-00002-of-00002.safetensors",
191
+ "language_model.model.layers.22.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
192
+ "language_model.model.layers.22.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
193
+ "language_model.model.layers.22.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
194
+ "language_model.model.layers.22.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
195
+ "language_model.model.layers.22.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
196
+ "language_model.model.layers.22.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
197
+ "language_model.model.layers.22.ffn_norm.weight": "model-00002-of-00002.safetensors",
198
+ "language_model.model.layers.23.attention.wo.weight": "model-00002-of-00002.safetensors",
199
+ "language_model.model.layers.23.attention.wo_ve.weight": "model-00002-of-00002.safetensors",
200
+ "language_model.model.layers.23.attention.wqkv.weight": "model-00002-of-00002.safetensors",
201
+ "language_model.model.layers.23.attention.wqkv_ve.weight": "model-00002-of-00002.safetensors",
202
+ "language_model.model.layers.23.attention_norm.weight": "model-00002-of-00002.safetensors",
203
+ "language_model.model.layers.23.feed_forward.w1.weight": "model-00002-of-00002.safetensors",
204
+ "language_model.model.layers.23.feed_forward.w2.weight": "model-00002-of-00002.safetensors",
205
+ "language_model.model.layers.23.feed_forward.w3.weight": "model-00002-of-00002.safetensors",
206
+ "language_model.model.layers.23.feed_forward_ve.w1.weight": "model-00002-of-00002.safetensors",
207
+ "language_model.model.layers.23.feed_forward_ve.w2.weight": "model-00002-of-00002.safetensors",
208
+ "language_model.model.layers.23.feed_forward_ve.w3.weight": "model-00002-of-00002.safetensors",
209
+ "language_model.model.layers.23.ffn_norm.weight": "model-00002-of-00002.safetensors",
210
+ "language_model.model.layers.3.attention.wo.weight": "model-00001-of-00002.safetensors",
211
+ "language_model.model.layers.3.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
212
+ "language_model.model.layers.3.attention.wqkv.weight": "model-00001-of-00002.safetensors",
213
+ "language_model.model.layers.3.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
214
+ "language_model.model.layers.3.attention_norm.weight": "model-00001-of-00002.safetensors",
215
+ "language_model.model.layers.3.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
216
+ "language_model.model.layers.3.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
217
+ "language_model.model.layers.3.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
218
+ "language_model.model.layers.3.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
219
+ "language_model.model.layers.3.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
220
+ "language_model.model.layers.3.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
221
+ "language_model.model.layers.3.ffn_norm.weight": "model-00001-of-00002.safetensors",
222
+ "language_model.model.layers.4.attention.wo.weight": "model-00001-of-00002.safetensors",
223
+ "language_model.model.layers.4.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
224
+ "language_model.model.layers.4.attention.wqkv.weight": "model-00001-of-00002.safetensors",
225
+ "language_model.model.layers.4.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
226
+ "language_model.model.layers.4.attention_norm.weight": "model-00001-of-00002.safetensors",
227
+ "language_model.model.layers.4.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
228
+ "language_model.model.layers.4.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
229
+ "language_model.model.layers.4.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
230
+ "language_model.model.layers.4.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
231
+ "language_model.model.layers.4.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
232
+ "language_model.model.layers.4.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
233
+ "language_model.model.layers.4.ffn_norm.weight": "model-00001-of-00002.safetensors",
234
+ "language_model.model.layers.5.attention.wo.weight": "model-00001-of-00002.safetensors",
235
+ "language_model.model.layers.5.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
236
+ "language_model.model.layers.5.attention.wqkv.weight": "model-00001-of-00002.safetensors",
237
+ "language_model.model.layers.5.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
238
+ "language_model.model.layers.5.attention_norm.weight": "model-00001-of-00002.safetensors",
239
+ "language_model.model.layers.5.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
240
+ "language_model.model.layers.5.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
241
+ "language_model.model.layers.5.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
242
+ "language_model.model.layers.5.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
243
+ "language_model.model.layers.5.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
244
+ "language_model.model.layers.5.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
245
+ "language_model.model.layers.5.ffn_norm.weight": "model-00001-of-00002.safetensors",
246
+ "language_model.model.layers.6.attention.wo.weight": "model-00001-of-00002.safetensors",
247
+ "language_model.model.layers.6.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
248
+ "language_model.model.layers.6.attention.wqkv.weight": "model-00001-of-00002.safetensors",
249
+ "language_model.model.layers.6.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
250
+ "language_model.model.layers.6.attention_norm.weight": "model-00001-of-00002.safetensors",
251
+ "language_model.model.layers.6.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
252
+ "language_model.model.layers.6.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
253
+ "language_model.model.layers.6.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
254
+ "language_model.model.layers.6.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
255
+ "language_model.model.layers.6.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
256
+ "language_model.model.layers.6.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
257
+ "language_model.model.layers.6.ffn_norm.weight": "model-00001-of-00002.safetensors",
258
+ "language_model.model.layers.7.attention.wo.weight": "model-00001-of-00002.safetensors",
259
+ "language_model.model.layers.7.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
260
+ "language_model.model.layers.7.attention.wqkv.weight": "model-00001-of-00002.safetensors",
261
+ "language_model.model.layers.7.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
262
+ "language_model.model.layers.7.attention_norm.weight": "model-00001-of-00002.safetensors",
263
+ "language_model.model.layers.7.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
264
+ "language_model.model.layers.7.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
265
+ "language_model.model.layers.7.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
266
+ "language_model.model.layers.7.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
267
+ "language_model.model.layers.7.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
268
+ "language_model.model.layers.7.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
269
+ "language_model.model.layers.7.ffn_norm.weight": "model-00001-of-00002.safetensors",
270
+ "language_model.model.layers.8.attention.wo.weight": "model-00001-of-00002.safetensors",
271
+ "language_model.model.layers.8.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
272
+ "language_model.model.layers.8.attention.wqkv.weight": "model-00001-of-00002.safetensors",
273
+ "language_model.model.layers.8.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
274
+ "language_model.model.layers.8.attention_norm.weight": "model-00001-of-00002.safetensors",
275
+ "language_model.model.layers.8.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
276
+ "language_model.model.layers.8.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
277
+ "language_model.model.layers.8.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
278
+ "language_model.model.layers.8.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
279
+ "language_model.model.layers.8.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
280
+ "language_model.model.layers.8.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
281
+ "language_model.model.layers.8.ffn_norm.weight": "model-00001-of-00002.safetensors",
282
+ "language_model.model.layers.9.attention.wo.weight": "model-00001-of-00002.safetensors",
283
+ "language_model.model.layers.9.attention.wo_ve.weight": "model-00001-of-00002.safetensors",
284
+ "language_model.model.layers.9.attention.wqkv.weight": "model-00001-of-00002.safetensors",
285
+ "language_model.model.layers.9.attention.wqkv_ve.weight": "model-00001-of-00002.safetensors",
286
+ "language_model.model.layers.9.attention_norm.weight": "model-00001-of-00002.safetensors",
287
+ "language_model.model.layers.9.feed_forward.w1.weight": "model-00001-of-00002.safetensors",
288
+ "language_model.model.layers.9.feed_forward.w2.weight": "model-00001-of-00002.safetensors",
289
+ "language_model.model.layers.9.feed_forward.w3.weight": "model-00001-of-00002.safetensors",
290
+ "language_model.model.layers.9.feed_forward_ve.w1.weight": "model-00001-of-00002.safetensors",
291
+ "language_model.model.layers.9.feed_forward_ve.w2.weight": "model-00001-of-00002.safetensors",
292
+ "language_model.model.layers.9.feed_forward_ve.w3.weight": "model-00001-of-00002.safetensors",
293
+ "language_model.model.layers.9.ffn_norm.weight": "model-00001-of-00002.safetensors",
294
+ "language_model.model.norm.weight": "model-00002-of-00002.safetensors",
295
+ "language_model.model.tok_embeddings.weight": "model-00001-of-00002.safetensors",
296
+ "language_model.output.weight": "model-00002-of-00002.safetensors",
297
+ "mlp1.0.bias": "model-00002-of-00002.safetensors",
298
+ "mlp1.0.weight": "model-00002-of-00002.safetensors",
299
+ "mlp1.1.bias": "model-00002-of-00002.safetensors",
300
+ "mlp1.1.weight": "model-00002-of-00002.safetensors",
301
+ "mlp1.3.bias": "model-00002-of-00002.safetensors",
302
+ "mlp1.3.weight": "model-00002-of-00002.safetensors",
303
+ "special_token_embedding.weight": "model-00002-of-00002.safetensors",
304
+ "vision_model.embeddings.patch_embedding.bias": "model-00001-of-00002.safetensors",
305
+ "vision_model.embeddings.patch_embedding.weight": "model-00001-of-00002.safetensors",
306
+ "vision_model.encoder.layers.0.attn.proj.bias": "model-00001-of-00002.safetensors",
307
+ "vision_model.encoder.layers.0.attn.proj.weight": "model-00001-of-00002.safetensors",
308
+ "vision_model.encoder.layers.0.attn.qkv.bias": "model-00001-of-00002.safetensors",
309
+ "vision_model.encoder.layers.0.attn.qkv.weight": "model-00001-of-00002.safetensors",
310
+ "vision_model.encoder.layers.0.ls1": "model-00001-of-00002.safetensors",
311
+ "vision_model.encoder.layers.0.ls2": "model-00001-of-00002.safetensors",
312
+ "vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00002.safetensors",
313
+ "vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00002.safetensors",
314
+ "vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00002.safetensors",
315
+ "vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00002.safetensors",
316
+ "vision_model.encoder.layers.0.norm1.bias": "model-00001-of-00002.safetensors",
317
+ "vision_model.encoder.layers.0.norm1.weight": "model-00001-of-00002.safetensors",
318
+ "vision_model.encoder.layers.0.norm2.bias": "model-00001-of-00002.safetensors",
319
+ "vision_model.encoder.layers.0.norm2.weight": "model-00001-of-00002.safetensors",
320
+ "vision_model.encoder.layers.1.attn.proj.bias": "model-00001-of-00002.safetensors",
321
+ "vision_model.encoder.layers.1.attn.proj.weight": "model-00001-of-00002.safetensors",
322
+ "vision_model.encoder.layers.1.attn.qkv.bias": "model-00001-of-00002.safetensors",
323
+ "vision_model.encoder.layers.1.attn.qkv.weight": "model-00001-of-00002.safetensors",
324
+ "vision_model.encoder.layers.1.ls1": "model-00001-of-00002.safetensors",
325
+ "vision_model.encoder.layers.1.ls2": "model-00001-of-00002.safetensors",
326
+ "vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00002.safetensors",
327
+ "vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00002.safetensors",
328
+ "vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00002.safetensors",
329
+ "vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00002.safetensors",
330
+ "vision_model.encoder.layers.1.norm1.bias": "model-00001-of-00002.safetensors",
331
+ "vision_model.encoder.layers.1.norm1.weight": "model-00001-of-00002.safetensors",
332
+ "vision_model.encoder.layers.1.norm2.bias": "model-00001-of-00002.safetensors",
333
+ "vision_model.encoder.layers.1.norm2.weight": "model-00001-of-00002.safetensors",
334
+ "vision_model.encoder.layers.10.attn.proj.bias": "model-00001-of-00002.safetensors",
335
+ "vision_model.encoder.layers.10.attn.proj.weight": "model-00001-of-00002.safetensors",
336
+ "vision_model.encoder.layers.10.attn.qkv.bias": "model-00001-of-00002.safetensors",
337
+ "vision_model.encoder.layers.10.attn.qkv.weight": "model-00001-of-00002.safetensors",
338
+ "vision_model.encoder.layers.10.ls1": "model-00001-of-00002.safetensors",
339
+ "vision_model.encoder.layers.10.ls2": "model-00001-of-00002.safetensors",
340
+ "vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00002.safetensors",
341
+ "vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00002.safetensors",
342
+ "vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00002.safetensors",
343
+ "vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00002.safetensors",
344
+ "vision_model.encoder.layers.10.norm1.bias": "model-00001-of-00002.safetensors",
345
+ "vision_model.encoder.layers.10.norm1.weight": "model-00001-of-00002.safetensors",
346
+ "vision_model.encoder.layers.10.norm2.bias": "model-00001-of-00002.safetensors",
347
+ "vision_model.encoder.layers.10.norm2.weight": "model-00001-of-00002.safetensors",
348
+ "vision_model.encoder.layers.11.attn.proj.bias": "model-00001-of-00002.safetensors",
349
+ "vision_model.encoder.layers.11.attn.proj.weight": "model-00001-of-00002.safetensors",
350
+ "vision_model.encoder.layers.11.attn.qkv.bias": "model-00001-of-00002.safetensors",
351
+ "vision_model.encoder.layers.11.attn.qkv.weight": "model-00001-of-00002.safetensors",
352
+ "vision_model.encoder.layers.11.ls1": "model-00001-of-00002.safetensors",
353
+ "vision_model.encoder.layers.11.ls2": "model-00001-of-00002.safetensors",
354
+ "vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00002.safetensors",
355
+ "vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00002.safetensors",
356
+ "vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00002.safetensors",
357
+ "vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00002.safetensors",
358
+ "vision_model.encoder.layers.11.norm1.bias": "model-00001-of-00002.safetensors",
359
+ "vision_model.encoder.layers.11.norm1.weight": "model-00001-of-00002.safetensors",
360
+ "vision_model.encoder.layers.11.norm2.bias": "model-00001-of-00002.safetensors",
361
+ "vision_model.encoder.layers.11.norm2.weight": "model-00001-of-00002.safetensors",
362
+ "vision_model.encoder.layers.12.attn.proj.bias": "model-00001-of-00002.safetensors",
363
+ "vision_model.encoder.layers.12.attn.proj.weight": "model-00001-of-00002.safetensors",
364
+ "vision_model.encoder.layers.12.attn.qkv.bias": "model-00001-of-00002.safetensors",
365
+ "vision_model.encoder.layers.12.attn.qkv.weight": "model-00001-of-00002.safetensors",
366
+ "vision_model.encoder.layers.12.ls1": "model-00001-of-00002.safetensors",
367
+ "vision_model.encoder.layers.12.ls2": "model-00001-of-00002.safetensors",
368
+ "vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00002.safetensors",
369
+ "vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00002.safetensors",
370
+ "vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00002.safetensors",
371
+ "vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00002.safetensors",
372
+ "vision_model.encoder.layers.12.norm1.bias": "model-00001-of-00002.safetensors",
373
+ "vision_model.encoder.layers.12.norm1.weight": "model-00001-of-00002.safetensors",
374
+ "vision_model.encoder.layers.12.norm2.bias": "model-00001-of-00002.safetensors",
375
+ "vision_model.encoder.layers.12.norm2.weight": "model-00001-of-00002.safetensors",
376
+ "vision_model.encoder.layers.13.attn.proj.bias": "model-00001-of-00002.safetensors",
377
+ "vision_model.encoder.layers.13.attn.proj.weight": "model-00001-of-00002.safetensors",
378
+ "vision_model.encoder.layers.13.attn.qkv.bias": "model-00001-of-00002.safetensors",
379
+ "vision_model.encoder.layers.13.attn.qkv.weight": "model-00001-of-00002.safetensors",
380
+ "vision_model.encoder.layers.13.ls1": "model-00001-of-00002.safetensors",
381
+ "vision_model.encoder.layers.13.ls2": "model-00001-of-00002.safetensors",
382
+ "vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00002.safetensors",
383
+ "vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00002.safetensors",
384
+ "vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00002.safetensors",
385
+ "vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00002.safetensors",
386
+ "vision_model.encoder.layers.13.norm1.bias": "model-00001-of-00002.safetensors",
387
+ "vision_model.encoder.layers.13.norm1.weight": "model-00001-of-00002.safetensors",
388
+ "vision_model.encoder.layers.13.norm2.bias": "model-00001-of-00002.safetensors",
389
+ "vision_model.encoder.layers.13.norm2.weight": "model-00001-of-00002.safetensors",
390
+ "vision_model.encoder.layers.14.attn.proj.bias": "model-00001-of-00002.safetensors",
391
+ "vision_model.encoder.layers.14.attn.proj.weight": "model-00001-of-00002.safetensors",
392
+ "vision_model.encoder.layers.14.attn.qkv.bias": "model-00001-of-00002.safetensors",
393
+ "vision_model.encoder.layers.14.attn.qkv.weight": "model-00001-of-00002.safetensors",
394
+ "vision_model.encoder.layers.14.ls1": "model-00001-of-00002.safetensors",
395
+ "vision_model.encoder.layers.14.ls2": "model-00001-of-00002.safetensors",
396
+ "vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00002.safetensors",
397
+ "vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00002.safetensors",
398
+ "vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00002.safetensors",
399
+ "vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00002.safetensors",
400
+ "vision_model.encoder.layers.14.norm1.bias": "model-00001-of-00002.safetensors",
401
+ "vision_model.encoder.layers.14.norm1.weight": "model-00001-of-00002.safetensors",
402
+ "vision_model.encoder.layers.14.norm2.bias": "model-00001-of-00002.safetensors",
403
+ "vision_model.encoder.layers.14.norm2.weight": "model-00001-of-00002.safetensors",
404
+ "vision_model.encoder.layers.15.attn.proj.bias": "model-00001-of-00002.safetensors",
405
+ "vision_model.encoder.layers.15.attn.proj.weight": "model-00001-of-00002.safetensors",
406
+ "vision_model.encoder.layers.15.attn.qkv.bias": "model-00001-of-00002.safetensors",
407
+ "vision_model.encoder.layers.15.attn.qkv.weight": "model-00001-of-00002.safetensors",
408
+ "vision_model.encoder.layers.15.ls1": "model-00001-of-00002.safetensors",
409
+ "vision_model.encoder.layers.15.ls2": "model-00001-of-00002.safetensors",
410
+ "vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00002.safetensors",
411
+ "vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00002.safetensors",
412
+ "vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00002.safetensors",
413
+ "vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00002.safetensors",
414
+ "vision_model.encoder.layers.15.norm1.bias": "model-00001-of-00002.safetensors",
415
+ "vision_model.encoder.layers.15.norm1.weight": "model-00001-of-00002.safetensors",
416
+ "vision_model.encoder.layers.15.norm2.bias": "model-00001-of-00002.safetensors",
417
+ "vision_model.encoder.layers.15.norm2.weight": "model-00001-of-00002.safetensors",
418
+ "vision_model.encoder.layers.16.attn.proj.bias": "model-00001-of-00002.safetensors",
419
+ "vision_model.encoder.layers.16.attn.proj.weight": "model-00001-of-00002.safetensors",
420
+ "vision_model.encoder.layers.16.attn.qkv.bias": "model-00001-of-00002.safetensors",
421
+ "vision_model.encoder.layers.16.attn.qkv.weight": "model-00001-of-00002.safetensors",
422
+ "vision_model.encoder.layers.16.ls1": "model-00001-of-00002.safetensors",
423
+ "vision_model.encoder.layers.16.ls2": "model-00001-of-00002.safetensors",
424
+ "vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00002.safetensors",
425
+ "vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00002.safetensors",
426
+ "vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00002.safetensors",
427
+ "vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00002.safetensors",
428
+ "vision_model.encoder.layers.16.norm1.bias": "model-00001-of-00002.safetensors",
429
+ "vision_model.encoder.layers.16.norm1.weight": "model-00001-of-00002.safetensors",
430
+ "vision_model.encoder.layers.16.norm2.bias": "model-00001-of-00002.safetensors",
431
+ "vision_model.encoder.layers.16.norm2.weight": "model-00001-of-00002.safetensors",
432
+ "vision_model.encoder.layers.17.attn.proj.bias": "model-00001-of-00002.safetensors",
433
+ "vision_model.encoder.layers.17.attn.proj.weight": "model-00001-of-00002.safetensors",
434
+ "vision_model.encoder.layers.17.attn.qkv.bias": "model-00001-of-00002.safetensors",
435
+ "vision_model.encoder.layers.17.attn.qkv.weight": "model-00001-of-00002.safetensors",
436
+ "vision_model.encoder.layers.17.ls1": "model-00001-of-00002.safetensors",
437
+ "vision_model.encoder.layers.17.ls2": "model-00001-of-00002.safetensors",
438
+ "vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00002.safetensors",
439
+ "vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00002.safetensors",
440
+ "vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00002.safetensors",
441
+ "vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00002.safetensors",
442
+ "vision_model.encoder.layers.17.norm1.bias": "model-00001-of-00002.safetensors",
443
+ "vision_model.encoder.layers.17.norm1.weight": "model-00001-of-00002.safetensors",
444
+ "vision_model.encoder.layers.17.norm2.bias": "model-00001-of-00002.safetensors",
445
+ "vision_model.encoder.layers.17.norm2.weight": "model-00001-of-00002.safetensors",
446
+ "vision_model.encoder.layers.18.attn.proj.bias": "model-00001-of-00002.safetensors",
447
+ "vision_model.encoder.layers.18.attn.proj.weight": "model-00001-of-00002.safetensors",
448
+ "vision_model.encoder.layers.18.attn.qkv.bias": "model-00001-of-00002.safetensors",
449
+ "vision_model.encoder.layers.18.attn.qkv.weight": "model-00001-of-00002.safetensors",
450
+ "vision_model.encoder.layers.18.ls1": "model-00001-of-00002.safetensors",
451
+ "vision_model.encoder.layers.18.ls2": "model-00001-of-00002.safetensors",
452
+ "vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00002.safetensors",
453
+ "vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00002.safetensors",
454
+ "vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00002.safetensors",
455
+ "vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00002.safetensors",
456
+ "vision_model.encoder.layers.18.norm1.bias": "model-00001-of-00002.safetensors",
457
+ "vision_model.encoder.layers.18.norm1.weight": "model-00001-of-00002.safetensors",
458
+ "vision_model.encoder.layers.18.norm2.bias": "model-00001-of-00002.safetensors",
459
+ "vision_model.encoder.layers.18.norm2.weight": "model-00001-of-00002.safetensors",
460
+ "vision_model.encoder.layers.19.attn.proj.bias": "model-00001-of-00002.safetensors",
461
+ "vision_model.encoder.layers.19.attn.proj.weight": "model-00001-of-00002.safetensors",
462
+ "vision_model.encoder.layers.19.attn.qkv.bias": "model-00001-of-00002.safetensors",
463
+ "vision_model.encoder.layers.19.attn.qkv.weight": "model-00001-of-00002.safetensors",
464
+ "vision_model.encoder.layers.19.ls1": "model-00001-of-00002.safetensors",
465
+ "vision_model.encoder.layers.19.ls2": "model-00001-of-00002.safetensors",
466
+ "vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00002.safetensors",
467
+ "vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00002.safetensors",
468
+ "vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00002.safetensors",
469
+ "vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00002.safetensors",
470
+ "vision_model.encoder.layers.19.norm1.bias": "model-00001-of-00002.safetensors",
471
+ "vision_model.encoder.layers.19.norm1.weight": "model-00001-of-00002.safetensors",
472
+ "vision_model.encoder.layers.19.norm2.bias": "model-00001-of-00002.safetensors",
473
+ "vision_model.encoder.layers.19.norm2.weight": "model-00001-of-00002.safetensors",
474
+ "vision_model.encoder.layers.2.attn.proj.bias": "model-00001-of-00002.safetensors",
475
+ "vision_model.encoder.layers.2.attn.proj.weight": "model-00001-of-00002.safetensors",
476
+ "vision_model.encoder.layers.2.attn.qkv.bias": "model-00001-of-00002.safetensors",
477
+ "vision_model.encoder.layers.2.attn.qkv.weight": "model-00001-of-00002.safetensors",
478
+ "vision_model.encoder.layers.2.ls1": "model-00001-of-00002.safetensors",
479
+ "vision_model.encoder.layers.2.ls2": "model-00001-of-00002.safetensors",
480
+ "vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00002.safetensors",
481
+ "vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00002.safetensors",
482
+ "vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00002.safetensors",
483
+ "vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00002.safetensors",
484
+ "vision_model.encoder.layers.2.norm1.bias": "model-00001-of-00002.safetensors",
485
+ "vision_model.encoder.layers.2.norm1.weight": "model-00001-of-00002.safetensors",
486
+ "vision_model.encoder.layers.2.norm2.bias": "model-00001-of-00002.safetensors",
487
+ "vision_model.encoder.layers.2.norm2.weight": "model-00001-of-00002.safetensors",
488
+ "vision_model.encoder.layers.20.attn.proj.bias": "model-00001-of-00002.safetensors",
489
+ "vision_model.encoder.layers.20.attn.proj.weight": "model-00001-of-00002.safetensors",
490
+ "vision_model.encoder.layers.20.attn.qkv.bias": "model-00001-of-00002.safetensors",
491
+ "vision_model.encoder.layers.20.attn.qkv.weight": "model-00001-of-00002.safetensors",
492
+ "vision_model.encoder.layers.20.ls1": "model-00001-of-00002.safetensors",
493
+ "vision_model.encoder.layers.20.ls2": "model-00001-of-00002.safetensors",
494
+ "vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00002.safetensors",
495
+ "vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00002.safetensors",
496
+ "vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00002.safetensors",
497
+ "vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00002.safetensors",
498
+ "vision_model.encoder.layers.20.norm1.bias": "model-00001-of-00002.safetensors",
499
+ "vision_model.encoder.layers.20.norm1.weight": "model-00001-of-00002.safetensors",
500
+ "vision_model.encoder.layers.20.norm2.bias": "model-00001-of-00002.safetensors",
501
+ "vision_model.encoder.layers.20.norm2.weight": "model-00001-of-00002.safetensors",
502
+ "vision_model.encoder.layers.21.attn.proj.bias": "model-00001-of-00002.safetensors",
503
+ "vision_model.encoder.layers.21.attn.proj.weight": "model-00001-of-00002.safetensors",
504
+ "vision_model.encoder.layers.21.attn.qkv.bias": "model-00001-of-00002.safetensors",
505
+ "vision_model.encoder.layers.21.attn.qkv.weight": "model-00001-of-00002.safetensors",
506
+ "vision_model.encoder.layers.21.ls1": "model-00001-of-00002.safetensors",
507
+ "vision_model.encoder.layers.21.ls2": "model-00001-of-00002.safetensors",
508
+ "vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00002.safetensors",
509
+ "vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00002.safetensors",
510
+ "vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00002.safetensors",
511
+ "vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00002.safetensors",
512
+ "vision_model.encoder.layers.21.norm1.bias": "model-00001-of-00002.safetensors",
513
+ "vision_model.encoder.layers.21.norm1.weight": "model-00001-of-00002.safetensors",
514
+ "vision_model.encoder.layers.21.norm2.bias": "model-00001-of-00002.safetensors",
515
+ "vision_model.encoder.layers.21.norm2.weight": "model-00001-of-00002.safetensors",
516
+ "vision_model.encoder.layers.22.attn.proj.bias": "model-00001-of-00002.safetensors",
517
+ "vision_model.encoder.layers.22.attn.proj.weight": "model-00001-of-00002.safetensors",
518
+ "vision_model.encoder.layers.22.attn.qkv.bias": "model-00001-of-00002.safetensors",
519
+ "vision_model.encoder.layers.22.attn.qkv.weight": "model-00001-of-00002.safetensors",
520
+ "vision_model.encoder.layers.22.ls1": "model-00001-of-00002.safetensors",
521
+ "vision_model.encoder.layers.22.ls2": "model-00001-of-00002.safetensors",
522
+ "vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00002.safetensors",
523
+ "vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00002.safetensors",
524
+ "vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00002.safetensors",
525
+ "vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00002.safetensors",
526
+ "vision_model.encoder.layers.22.norm1.bias": "model-00001-of-00002.safetensors",
527
+ "vision_model.encoder.layers.22.norm1.weight": "model-00001-of-00002.safetensors",
528
+ "vision_model.encoder.layers.22.norm2.bias": "model-00001-of-00002.safetensors",
529
+ "vision_model.encoder.layers.22.norm2.weight": "model-00001-of-00002.safetensors",
530
+ "vision_model.encoder.layers.23.attn.proj.bias": "model-00001-of-00002.safetensors",
531
+ "vision_model.encoder.layers.23.attn.proj.weight": "model-00001-of-00002.safetensors",
532
+ "vision_model.encoder.layers.23.attn.qkv.bias": "model-00001-of-00002.safetensors",
533
+ "vision_model.encoder.layers.23.attn.qkv.weight": "model-00001-of-00002.safetensors",
534
+ "vision_model.encoder.layers.23.ls1": "model-00001-of-00002.safetensors",
535
+ "vision_model.encoder.layers.23.ls2": "model-00001-of-00002.safetensors",
536
+ "vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00002.safetensors",
537
+ "vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00002.safetensors",
538
+ "vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00002.safetensors",
539
+ "vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00002.safetensors",
540
+ "vision_model.encoder.layers.23.norm1.bias": "model-00001-of-00002.safetensors",
541
+ "vision_model.encoder.layers.23.norm1.weight": "model-00001-of-00002.safetensors",
542
+ "vision_model.encoder.layers.23.norm2.bias": "model-00001-of-00002.safetensors",
543
+ "vision_model.encoder.layers.23.norm2.weight": "model-00001-of-00002.safetensors",
544
+ "vision_model.encoder.layers.24.attn.proj.bias": "model-00001-of-00002.safetensors",
545
+ "vision_model.encoder.layers.24.attn.proj.weight": "model-00001-of-00002.safetensors",
546
+ "vision_model.encoder.layers.24.attn.qkv.bias": "model-00001-of-00002.safetensors",
547
+ "vision_model.encoder.layers.24.attn.qkv.weight": "model-00001-of-00002.safetensors",
548
+ "vision_model.encoder.layers.24.ls1": "model-00001-of-00002.safetensors",
549
+ "vision_model.encoder.layers.24.ls2": "model-00001-of-00002.safetensors",
550
+ "vision_model.encoder.layers.24.mlp.fc1.bias": "model-00001-of-00002.safetensors",
551
+ "vision_model.encoder.layers.24.mlp.fc1.weight": "model-00001-of-00002.safetensors",
552
+ "vision_model.encoder.layers.24.mlp.fc2.bias": "model-00001-of-00002.safetensors",
553
+ "vision_model.encoder.layers.24.mlp.fc2.weight": "model-00001-of-00002.safetensors",
554
+ "vision_model.encoder.layers.24.norm1.bias": "model-00001-of-00002.safetensors",
555
+ "vision_model.encoder.layers.24.norm1.weight": "model-00001-of-00002.safetensors",
556
+ "vision_model.encoder.layers.24.norm2.bias": "model-00001-of-00002.safetensors",
557
+ "vision_model.encoder.layers.24.norm2.weight": "model-00001-of-00002.safetensors",
558
+ "vision_model.encoder.layers.25.attn.proj.bias": "model-00001-of-00002.safetensors",
559
+ "vision_model.encoder.layers.25.attn.proj.weight": "model-00001-of-00002.safetensors",
560
+ "vision_model.encoder.layers.25.attn.qkv.bias": "model-00001-of-00002.safetensors",
561
+ "vision_model.encoder.layers.25.attn.qkv.weight": "model-00001-of-00002.safetensors",
562
+ "vision_model.encoder.layers.25.ls1": "model-00001-of-00002.safetensors",
563
+ "vision_model.encoder.layers.25.ls2": "model-00001-of-00002.safetensors",
564
+ "vision_model.encoder.layers.25.mlp.fc1.bias": "model-00001-of-00002.safetensors",
565
+ "vision_model.encoder.layers.25.mlp.fc1.weight": "model-00001-of-00002.safetensors",
566
+ "vision_model.encoder.layers.25.mlp.fc2.bias": "model-00001-of-00002.safetensors",
567
+ "vision_model.encoder.layers.25.mlp.fc2.weight": "model-00001-of-00002.safetensors",
568
+ "vision_model.encoder.layers.25.norm1.bias": "model-00001-of-00002.safetensors",
569
+ "vision_model.encoder.layers.25.norm1.weight": "model-00001-of-00002.safetensors",
570
+ "vision_model.encoder.layers.25.norm2.bias": "model-00001-of-00002.safetensors",
571
+ "vision_model.encoder.layers.25.norm2.weight": "model-00001-of-00002.safetensors",
572
+ "vision_model.encoder.layers.26.attn.proj.bias": "model-00001-of-00002.safetensors",
573
+ "vision_model.encoder.layers.26.attn.proj.weight": "model-00001-of-00002.safetensors",
574
+ "vision_model.encoder.layers.26.attn.qkv.bias": "model-00001-of-00002.safetensors",
575
+ "vision_model.encoder.layers.26.attn.qkv.weight": "model-00001-of-00002.safetensors",
576
+ "vision_model.encoder.layers.26.ls1": "model-00001-of-00002.safetensors",
577
+ "vision_model.encoder.layers.26.ls2": "model-00001-of-00002.safetensors",
578
+ "vision_model.encoder.layers.26.mlp.fc1.bias": "model-00001-of-00002.safetensors",
579
+ "vision_model.encoder.layers.26.mlp.fc1.weight": "model-00001-of-00002.safetensors",
580
+ "vision_model.encoder.layers.26.mlp.fc2.bias": "model-00001-of-00002.safetensors",
581
+ "vision_model.encoder.layers.26.mlp.fc2.weight": "model-00001-of-00002.safetensors",
582
+ "vision_model.encoder.layers.26.norm1.bias": "model-00001-of-00002.safetensors",
583
+ "vision_model.encoder.layers.26.norm1.weight": "model-00001-of-00002.safetensors",
584
+ "vision_model.encoder.layers.26.norm2.bias": "model-00001-of-00002.safetensors",
585
+ "vision_model.encoder.layers.26.norm2.weight": "model-00001-of-00002.safetensors",
586
+ "vision_model.encoder.layers.27.attn.proj.bias": "model-00001-of-00002.safetensors",
587
+ "vision_model.encoder.layers.27.attn.proj.weight": "model-00001-of-00002.safetensors",
588
+ "vision_model.encoder.layers.27.attn.qkv.bias": "model-00001-of-00002.safetensors",
589
+ "vision_model.encoder.layers.27.attn.qkv.weight": "model-00001-of-00002.safetensors",
590
+ "vision_model.encoder.layers.27.ls1": "model-00001-of-00002.safetensors",
591
+ "vision_model.encoder.layers.27.ls2": "model-00001-of-00002.safetensors",
592
+ "vision_model.encoder.layers.27.mlp.fc1.bias": "model-00001-of-00002.safetensors",
593
+ "vision_model.encoder.layers.27.mlp.fc1.weight": "model-00001-of-00002.safetensors",
594
+ "vision_model.encoder.layers.27.mlp.fc2.bias": "model-00001-of-00002.safetensors",
595
+ "vision_model.encoder.layers.27.mlp.fc2.weight": "model-00001-of-00002.safetensors",
596
+ "vision_model.encoder.layers.27.norm1.bias": "model-00001-of-00002.safetensors",
597
+ "vision_model.encoder.layers.27.norm1.weight": "model-00001-of-00002.safetensors",
598
+ "vision_model.encoder.layers.27.norm2.bias": "model-00001-of-00002.safetensors",
599
+ "vision_model.encoder.layers.27.norm2.weight": "model-00001-of-00002.safetensors",
600
+ "vision_model.encoder.layers.28.attn.proj.bias": "model-00001-of-00002.safetensors",
601
+ "vision_model.encoder.layers.28.attn.proj.weight": "model-00001-of-00002.safetensors",
602
+ "vision_model.encoder.layers.28.attn.qkv.bias": "model-00001-of-00002.safetensors",
603
+ "vision_model.encoder.layers.28.attn.qkv.weight": "model-00001-of-00002.safetensors",
604
+ "vision_model.encoder.layers.28.ls1": "model-00001-of-00002.safetensors",
605
+ "vision_model.encoder.layers.28.ls2": "model-00001-of-00002.safetensors",
606
+ "vision_model.encoder.layers.28.mlp.fc1.bias": "model-00001-of-00002.safetensors",
607
+ "vision_model.encoder.layers.28.mlp.fc1.weight": "model-00001-of-00002.safetensors",
608
+ "vision_model.encoder.layers.28.mlp.fc2.bias": "model-00001-of-00002.safetensors",
609
+ "vision_model.encoder.layers.28.mlp.fc2.weight": "model-00001-of-00002.safetensors",
610
+ "vision_model.encoder.layers.28.norm1.bias": "model-00001-of-00002.safetensors",
611
+ "vision_model.encoder.layers.28.norm1.weight": "model-00001-of-00002.safetensors",
612
+ "vision_model.encoder.layers.28.norm2.bias": "model-00001-of-00002.safetensors",
613
+ "vision_model.encoder.layers.28.norm2.weight": "model-00001-of-00002.safetensors",
614
+ "vision_model.encoder.layers.29.attn.proj.bias": "model-00001-of-00002.safetensors",
615
+ "vision_model.encoder.layers.29.attn.proj.weight": "model-00001-of-00002.safetensors",
616
+ "vision_model.encoder.layers.29.attn.qkv.bias": "model-00001-of-00002.safetensors",
617
+ "vision_model.encoder.layers.29.attn.qkv.weight": "model-00001-of-00002.safetensors",
618
+ "vision_model.encoder.layers.29.ls1": "model-00001-of-00002.safetensors",
619
+ "vision_model.encoder.layers.29.ls2": "model-00001-of-00002.safetensors",
620
+ "vision_model.encoder.layers.29.mlp.fc1.bias": "model-00001-of-00002.safetensors",
621
+ "vision_model.encoder.layers.29.mlp.fc1.weight": "model-00001-of-00002.safetensors",
622
+ "vision_model.encoder.layers.29.mlp.fc2.bias": "model-00001-of-00002.safetensors",
623
+ "vision_model.encoder.layers.29.mlp.fc2.weight": "model-00001-of-00002.safetensors",
624
+ "vision_model.encoder.layers.29.norm1.bias": "model-00001-of-00002.safetensors",
625
+ "vision_model.encoder.layers.29.norm1.weight": "model-00001-of-00002.safetensors",
626
+ "vision_model.encoder.layers.29.norm2.bias": "model-00001-of-00002.safetensors",
627
+ "vision_model.encoder.layers.29.norm2.weight": "model-00001-of-00002.safetensors",
628
+ "vision_model.encoder.layers.3.attn.proj.bias": "model-00001-of-00002.safetensors",
629
+ "vision_model.encoder.layers.3.attn.proj.weight": "model-00001-of-00002.safetensors",
630
+ "vision_model.encoder.layers.3.attn.qkv.bias": "model-00001-of-00002.safetensors",
631
+ "vision_model.encoder.layers.3.attn.qkv.weight": "model-00001-of-00002.safetensors",
632
+ "vision_model.encoder.layers.3.ls1": "model-00001-of-00002.safetensors",
633
+ "vision_model.encoder.layers.3.ls2": "model-00001-of-00002.safetensors",
634
+ "vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00002.safetensors",
635
+ "vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00002.safetensors",
636
+ "vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00002.safetensors",
637
+ "vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00002.safetensors",
638
+ "vision_model.encoder.layers.3.norm1.bias": "model-00001-of-00002.safetensors",
639
+ "vision_model.encoder.layers.3.norm1.weight": "model-00001-of-00002.safetensors",
640
+ "vision_model.encoder.layers.3.norm2.bias": "model-00001-of-00002.safetensors",
641
+ "vision_model.encoder.layers.3.norm2.weight": "model-00001-of-00002.safetensors",
642
+ "vision_model.encoder.layers.30.attn.proj.bias": "model-00001-of-00002.safetensors",
643
+ "vision_model.encoder.layers.30.attn.proj.weight": "model-00001-of-00002.safetensors",
644
+ "vision_model.encoder.layers.30.attn.qkv.bias": "model-00001-of-00002.safetensors",
645
+ "vision_model.encoder.layers.30.attn.qkv.weight": "model-00001-of-00002.safetensors",
646
+ "vision_model.encoder.layers.30.ls1": "model-00001-of-00002.safetensors",
647
+ "vision_model.encoder.layers.30.ls2": "model-00001-of-00002.safetensors",
648
+ "vision_model.encoder.layers.30.mlp.fc1.bias": "model-00001-of-00002.safetensors",
649
+ "vision_model.encoder.layers.30.mlp.fc1.weight": "model-00001-of-00002.safetensors",
650
+ "vision_model.encoder.layers.30.mlp.fc2.bias": "model-00001-of-00002.safetensors",
651
+ "vision_model.encoder.layers.30.mlp.fc2.weight": "model-00001-of-00002.safetensors",
652
+ "vision_model.encoder.layers.30.norm1.bias": "model-00001-of-00002.safetensors",
653
+ "vision_model.encoder.layers.30.norm1.weight": "model-00001-of-00002.safetensors",
654
+ "vision_model.encoder.layers.30.norm2.bias": "model-00001-of-00002.safetensors",
655
+ "vision_model.encoder.layers.30.norm2.weight": "model-00001-of-00002.safetensors",
656
+ "vision_model.encoder.layers.31.attn.proj.bias": "model-00001-of-00002.safetensors",
657
+ "vision_model.encoder.layers.31.attn.proj.weight": "model-00001-of-00002.safetensors",
658
+ "vision_model.encoder.layers.31.attn.qkv.bias": "model-00001-of-00002.safetensors",
659
+ "vision_model.encoder.layers.31.attn.qkv.weight": "model-00001-of-00002.safetensors",
660
+ "vision_model.encoder.layers.31.ls1": "model-00001-of-00002.safetensors",
661
+ "vision_model.encoder.layers.31.ls2": "model-00001-of-00002.safetensors",
662
+ "vision_model.encoder.layers.31.mlp.fc1.bias": "model-00001-of-00002.safetensors",
663
+ "vision_model.encoder.layers.31.mlp.fc1.weight": "model-00001-of-00002.safetensors",
664
+ "vision_model.encoder.layers.31.mlp.fc2.bias": "model-00001-of-00002.safetensors",
665
+ "vision_model.encoder.layers.31.mlp.fc2.weight": "model-00001-of-00002.safetensors",
666
+ "vision_model.encoder.layers.31.norm1.bias": "model-00001-of-00002.safetensors",
667
+ "vision_model.encoder.layers.31.norm1.weight": "model-00001-of-00002.safetensors",
668
+ "vision_model.encoder.layers.31.norm2.bias": "model-00001-of-00002.safetensors",
669
+ "vision_model.encoder.layers.31.norm2.weight": "model-00001-of-00002.safetensors",
670
+ "vision_model.encoder.layers.4.attn.proj.bias": "model-00001-of-00002.safetensors",
671
+ "vision_model.encoder.layers.4.attn.proj.weight": "model-00001-of-00002.safetensors",
672
+ "vision_model.encoder.layers.4.attn.qkv.bias": "model-00001-of-00002.safetensors",
673
+ "vision_model.encoder.layers.4.attn.qkv.weight": "model-00001-of-00002.safetensors",
674
+ "vision_model.encoder.layers.4.ls1": "model-00001-of-00002.safetensors",
675
+ "vision_model.encoder.layers.4.ls2": "model-00001-of-00002.safetensors",
676
+ "vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00002.safetensors",
677
+ "vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00002.safetensors",
678
+ "vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00002.safetensors",
679
+ "vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00002.safetensors",
680
+ "vision_model.encoder.layers.4.norm1.bias": "model-00001-of-00002.safetensors",
681
+ "vision_model.encoder.layers.4.norm1.weight": "model-00001-of-00002.safetensors",
682
+ "vision_model.encoder.layers.4.norm2.bias": "model-00001-of-00002.safetensors",
683
+ "vision_model.encoder.layers.4.norm2.weight": "model-00001-of-00002.safetensors",
684
+ "vision_model.encoder.layers.5.attn.proj.bias": "model-00001-of-00002.safetensors",
685
+ "vision_model.encoder.layers.5.attn.proj.weight": "model-00001-of-00002.safetensors",
686
+ "vision_model.encoder.layers.5.attn.qkv.bias": "model-00001-of-00002.safetensors",
687
+ "vision_model.encoder.layers.5.attn.qkv.weight": "model-00001-of-00002.safetensors",
688
+ "vision_model.encoder.layers.5.ls1": "model-00001-of-00002.safetensors",
689
+ "vision_model.encoder.layers.5.ls2": "model-00001-of-00002.safetensors",
690
+ "vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00002.safetensors",
691
+ "vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00002.safetensors",
692
+ "vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00002.safetensors",
693
+ "vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00002.safetensors",
694
+ "vision_model.encoder.layers.5.norm1.bias": "model-00001-of-00002.safetensors",
695
+ "vision_model.encoder.layers.5.norm1.weight": "model-00001-of-00002.safetensors",
696
+ "vision_model.encoder.layers.5.norm2.bias": "model-00001-of-00002.safetensors",
697
+ "vision_model.encoder.layers.5.norm2.weight": "model-00001-of-00002.safetensors",
698
+ "vision_model.encoder.layers.6.attn.proj.bias": "model-00001-of-00002.safetensors",
699
+ "vision_model.encoder.layers.6.attn.proj.weight": "model-00001-of-00002.safetensors",
700
+ "vision_model.encoder.layers.6.attn.qkv.bias": "model-00001-of-00002.safetensors",
701
+ "vision_model.encoder.layers.6.attn.qkv.weight": "model-00001-of-00002.safetensors",
702
+ "vision_model.encoder.layers.6.ls1": "model-00001-of-00002.safetensors",
703
+ "vision_model.encoder.layers.6.ls2": "model-00001-of-00002.safetensors",
704
+ "vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00002.safetensors",
705
+ "vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00002.safetensors",
706
+ "vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00002.safetensors",
707
+ "vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00002.safetensors",
708
+ "vision_model.encoder.layers.6.norm1.bias": "model-00001-of-00002.safetensors",
709
+ "vision_model.encoder.layers.6.norm1.weight": "model-00001-of-00002.safetensors",
710
+ "vision_model.encoder.layers.6.norm2.bias": "model-00001-of-00002.safetensors",
711
+ "vision_model.encoder.layers.6.norm2.weight": "model-00001-of-00002.safetensors",
712
+ "vision_model.encoder.layers.7.attn.proj.bias": "model-00001-of-00002.safetensors",
713
+ "vision_model.encoder.layers.7.attn.proj.weight": "model-00001-of-00002.safetensors",
714
+ "vision_model.encoder.layers.7.attn.qkv.bias": "model-00001-of-00002.safetensors",
715
+ "vision_model.encoder.layers.7.attn.qkv.weight": "model-00001-of-00002.safetensors",
716
+ "vision_model.encoder.layers.7.ls1": "model-00001-of-00002.safetensors",
717
+ "vision_model.encoder.layers.7.ls2": "model-00001-of-00002.safetensors",
718
+ "vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00002.safetensors",
719
+ "vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00002.safetensors",
720
+ "vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00002.safetensors",
721
+ "vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00002.safetensors",
722
+ "vision_model.encoder.layers.7.norm1.bias": "model-00001-of-00002.safetensors",
723
+ "vision_model.encoder.layers.7.norm1.weight": "model-00001-of-00002.safetensors",
724
+ "vision_model.encoder.layers.7.norm2.bias": "model-00001-of-00002.safetensors",
725
+ "vision_model.encoder.layers.7.norm2.weight": "model-00001-of-00002.safetensors",
726
+ "vision_model.encoder.layers.8.attn.proj.bias": "model-00001-of-00002.safetensors",
727
+ "vision_model.encoder.layers.8.attn.proj.weight": "model-00001-of-00002.safetensors",
728
+ "vision_model.encoder.layers.8.attn.qkv.bias": "model-00001-of-00002.safetensors",
729
+ "vision_model.encoder.layers.8.attn.qkv.weight": "model-00001-of-00002.safetensors",
730
+ "vision_model.encoder.layers.8.ls1": "model-00001-of-00002.safetensors",
731
+ "vision_model.encoder.layers.8.ls2": "model-00001-of-00002.safetensors",
732
+ "vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00002.safetensors",
733
+ "vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00002.safetensors",
734
+ "vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00002.safetensors",
735
+ "vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00002.safetensors",
736
+ "vision_model.encoder.layers.8.norm1.bias": "model-00001-of-00002.safetensors",
737
+ "vision_model.encoder.layers.8.norm1.weight": "model-00001-of-00002.safetensors",
738
+ "vision_model.encoder.layers.8.norm2.bias": "model-00001-of-00002.safetensors",
739
+ "vision_model.encoder.layers.8.norm2.weight": "model-00001-of-00002.safetensors",
740
+ "vision_model.encoder.layers.9.attn.proj.bias": "model-00001-of-00002.safetensors",
741
+ "vision_model.encoder.layers.9.attn.proj.weight": "model-00001-of-00002.safetensors",
742
+ "vision_model.encoder.layers.9.attn.qkv.bias": "model-00001-of-00002.safetensors",
743
+ "vision_model.encoder.layers.9.attn.qkv.weight": "model-00001-of-00002.safetensors",
744
+ "vision_model.encoder.layers.9.ls1": "model-00001-of-00002.safetensors",
745
+ "vision_model.encoder.layers.9.ls2": "model-00001-of-00002.safetensors",
746
+ "vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00002.safetensors",
747
+ "vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00002.safetensors",
748
+ "vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00002.safetensors",
749
+ "vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00002.safetensors",
750
+ "vision_model.encoder.layers.9.norm1.bias": "model-00001-of-00002.safetensors",
751
+ "vision_model.encoder.layers.9.norm1.weight": "model-00001-of-00002.safetensors",
752
+ "vision_model.encoder.layers.9.norm2.bias": "model-00001-of-00002.safetensors",
753
+ "vision_model.encoder.layers.9.norm2.weight": "model-00001-of-00002.safetensors"
754
+ }
755
+ }
modeling_internlm2_ve.py ADDED
@@ -0,0 +1,1541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
31
+ CausalLMOutputWithPast,
32
+ SequenceClassifierOutputWithPast)
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import (add_start_docstrings,
35
+ add_start_docstrings_to_model_forward, logging,
36
+ replace_return_docstrings)
37
+
38
+ try:
39
+ from transformers.generation.streamers import BaseStreamer
40
+ except: # noqa # pylint: disable=bare-except
41
+ BaseStreamer = None
42
+
43
+ from .configuration_internlm2 import InternLM2VEConfig
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = 'InternLM2VEConfig'
48
+
49
+ flash_attn_func, flash_attn_varlen_func = None, None
50
+ pad_input, index_first_axis, unpad_input = None, None, None
51
+ try:
52
+ from flash_attn import flash_attn_func as _flash_attn_func
53
+ from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
54
+ from flash_attn.bert_padding import index_first_axis as _index_first_axis
55
+ from flash_attn.bert_padding import pad_input as _pad_input
56
+ from flash_attn.bert_padding import unpad_input as _unpad_input
57
+
58
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
59
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
60
+ has_flash_attn = True
61
+ except:
62
+ has_flash_attn = False
63
+
64
+
65
+ def _import_flash_attn():
66
+ global flash_attn_func, flash_attn_varlen_func
67
+ global pad_input, index_first_axis, unpad_input
68
+ try:
69
+ from flash_attn import flash_attn_func as _flash_attn_func
70
+ from flash_attn import \
71
+ flash_attn_varlen_func as _flash_attn_varlen_func
72
+ from flash_attn.bert_padding import \
73
+ index_first_axis as _index_first_axis
74
+ from flash_attn.bert_padding import pad_input as _pad_input
75
+ from flash_attn.bert_padding import unpad_input as _unpad_input
76
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
77
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
78
+ except ImportError:
79
+ raise ImportError('flash_attn is not installed.')
80
+
81
+
82
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
83
+ def _get_unpad_data(attention_mask):
84
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
85
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
86
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
87
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
88
+ return (
89
+ indices,
90
+ cu_seqlens,
91
+ max_seqlen_in_batch,
92
+ )
93
+
94
+
95
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
96
+ def _make_causal_mask(
97
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
98
+ ):
99
+ """
100
+ Make causal mask used for bi-directional self-attention.
101
+ """
102
+ bsz, tgt_len = input_ids_shape
103
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
104
+ mask_cond = torch.arange(mask.size(-1), device=device)
105
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
106
+ mask = mask.to(dtype)
107
+
108
+ if past_key_values_length > 0:
109
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
110
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
111
+
112
+
113
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
114
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
115
+ """
116
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
117
+ """
118
+ bsz, src_len = mask.size()
119
+ tgt_len = tgt_len if tgt_len is not None else src_len
120
+
121
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
122
+
123
+ inverted_mask = 1.0 - expanded_mask
124
+
125
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
126
+
127
+
128
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
129
+ class InternLM2RMSNorm(nn.Module):
130
+ def __init__(self, hidden_size, eps=1e-6):
131
+ """
132
+ InternLM2RMSNorm is equivalent to T5LayerNorm
133
+ """
134
+ super().__init__()
135
+ self.weight = nn.Parameter(torch.ones(hidden_size))
136
+ self.variance_epsilon = eps
137
+
138
+ def forward(self, hidden_states):
139
+ input_dtype = hidden_states.dtype
140
+ hidden_states = hidden_states.to(torch.float32)
141
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
142
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
143
+ return (self.weight.to(torch.float32) * hidden_states).to(input_dtype)
144
+ # return self.weight * hidden_states.to(input_dtype)
145
+
146
+
147
+ # try:
148
+ # from functools import partial
149
+
150
+ # from apex.normalization import FusedRMSNorm
151
+ # InternLM2RMSNorm = partial(FusedRMSNorm, eps=1e-6) # noqa
152
+ # print('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternLM2RMSNorm')
153
+ # except ImportError:
154
+ # # using the normal LlamaRMSNorm
155
+ # pass
156
+ # except Exception:
157
+ # print('discovered apex but it failed to load, falling back to InternLM2RMSNorm')
158
+ # pass
159
+
160
+
161
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
162
+ class InternLM2RotaryEmbedding(nn.Module):
163
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
164
+ super().__init__()
165
+
166
+ self.dim = dim
167
+ self.max_position_embeddings = max_position_embeddings
168
+ self.base = base
169
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
170
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
171
+
172
+ # Build here to make `torch.jit.trace` work.
173
+ self._set_cos_sin_cache(
174
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
175
+ )
176
+
177
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
178
+ self.max_seq_len_cached = seq_len
179
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
180
+
181
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
182
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
183
+ emb = torch.cat((freqs, freqs), dim=-1)
184
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
185
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
186
+
187
+ def forward(self, x, seq_len=None):
188
+ # x: [bs, num_attention_heads, seq_len, head_size]
189
+ if seq_len > self.max_seq_len_cached:
190
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
191
+
192
+ return (
193
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
194
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
195
+ )
196
+
197
+
198
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
199
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
200
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
201
+
202
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
203
+ self.scaling_factor = scaling_factor
204
+ super().__init__(dim, max_position_embeddings, base, device)
205
+
206
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
207
+ self.max_seq_len_cached = seq_len
208
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
209
+ t = t / self.scaling_factor
210
+
211
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
212
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
213
+ emb = torch.cat((freqs, freqs), dim=-1)
214
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
215
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
216
+
217
+
218
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
219
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
220
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
221
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
222
+ """
223
+
224
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
225
+ self.scaling_factor = scaling_factor
226
+ super().__init__(dim, max_position_embeddings, base, device)
227
+
228
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
229
+ self.max_seq_len_cached = seq_len
230
+
231
+ if seq_len > self.max_position_embeddings:
232
+ base = self.base * (
233
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
234
+ ) ** (self.dim / (self.dim - 2))
235
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
236
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
237
+
238
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
239
+
240
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
241
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
242
+ emb = torch.cat((freqs, freqs), dim=-1)
243
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
244
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
245
+
246
+
247
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
248
+ def rotate_half(x):
249
+ """Rotates half the hidden dims of the input."""
250
+ x1 = x[..., : x.shape[-1] // 2]
251
+ x2 = x[..., x.shape[-1] // 2:]
252
+ return torch.cat((-x2, x1), dim=-1)
253
+
254
+
255
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
256
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
257
+ """Applies Rotary Position Embedding to the query and key tensors."""
258
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
259
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
260
+ q_embed = (q * cos) + (rotate_half(q) * sin)
261
+ k_embed = (k * cos) + (rotate_half(k) * sin)
262
+ return q_embed, k_embed
263
+
264
+ def apply_multimodal_rotary_pos_emb(q, k, cos, sin, position_ids, mrope_section, unsqueeze_dim=1):
265
+ """
266
+ Applies Rotary Position Embedding to the query and key tensors.
267
+
268
+ Args:
269
+ q (:obj:`torch.Tensor`): The query tensor, shape [batch_size, num_heads, seq_len, head_dim].
270
+ k (:obj:`torch.Tensor`): The key tensor, shape [batch_size, num_heads, seq_len, head_dim].
271
+ cos (:obj:`torch.Tensor`): The cosine values, shape [max_position_embeddings, head_dim].
272
+ sin (:obj:`torch.Tensor`): The sine values, shape [max_position_embeddings, head_dim].
273
+ position_ids (:obj:`torch.LongTensor`): The multi-modal position ids, shape [3, batch_size, seq_len].
274
+ mrope_section (:obj:`List[int]`): The respective dimension section of the multi-modal position ids for temporal, height and width.
275
+ unsqueeze_dim (:obj:`int`, `optional`, defaults to 1): The dimension to unsqueeze the cosine and sine values.
276
+ """
277
+
278
+ mrope_section = mrope_section * 2
279
+ cos, sin = cos[position_ids], sin[position_ids] # [3, B, L, D]
280
+ cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
281
+ unsqueeze_dim
282
+ ) # [B, 1, L, D]
283
+ sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
284
+ unsqueeze_dim
285
+ ) # [B, 1, L, D]
286
+
287
+ q_embed = (q * cos) + (rotate_half(q) * sin)
288
+ k_embed = (k * cos) + (rotate_half(k) * sin)
289
+ return q_embed, k_embed
290
+
291
+
292
+ class InternLM2MLP(nn.Module):
293
+ def __init__(self, config):
294
+ super().__init__()
295
+ self.config = config
296
+ self.hidden_size = config.hidden_size
297
+ self.intermediate_size = config.intermediate_size
298
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
299
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
300
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
301
+ self.act_fn = ACT2FN[config.hidden_act]
302
+
303
+ def forward(self, x):
304
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
305
+
306
+ return down_proj
307
+
308
+
309
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
310
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
311
+ """
312
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
313
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
314
+ """
315
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
316
+ if n_rep == 1:
317
+ return hidden_states
318
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
319
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
320
+
321
+
322
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
323
+ class InternLM2Attention(nn.Module):
324
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
325
+
326
+ def __init__(self, config: InternLM2VEConfig):
327
+ super().__init__()
328
+ self.config = config
329
+ self.hidden_size = config.hidden_size
330
+ self.num_heads = config.num_attention_heads
331
+ self.head_dim = self.hidden_size // self.num_heads
332
+ self.num_key_value_heads = config.num_key_value_heads
333
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
334
+ self.max_position_embeddings = config.max_position_embeddings
335
+ self.is_causal = True
336
+
337
+ if (self.head_dim * self.num_heads) != self.hidden_size:
338
+ raise ValueError(
339
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
340
+ f' and `num_heads`: {self.num_heads}).'
341
+ )
342
+
343
+ self.wqkv = nn.Linear(
344
+ self.hidden_size,
345
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
346
+ bias=config.bias,
347
+ )
348
+
349
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
350
+
351
+ self.wqkv_ve = nn.Linear(
352
+ self.hidden_size,
353
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
354
+ bias=config.bias,
355
+ )
356
+ self.wo_ve = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
357
+
358
+ self._init_rope()
359
+
360
+ def _init_rope(self):
361
+ if self.config.rope_scaling is None:
362
+ self.rotary_emb = InternLM2RotaryEmbedding(
363
+ self.head_dim,
364
+ max_position_embeddings=self.max_position_embeddings,
365
+ base=self.config.rope_theta,
366
+ )
367
+ else:
368
+ scaling_type = self.config.rope_scaling['type']
369
+ scaling_factor = self.config.rope_scaling['factor']
370
+ if scaling_type == 'dynamic':
371
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
372
+ self.head_dim,
373
+ max_position_embeddings=self.max_position_embeddings,
374
+ base=self.config.rope_theta,
375
+ scaling_factor=scaling_factor,
376
+ )
377
+ elif scaling_type == 'linear':
378
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
379
+ self.head_dim,
380
+ max_position_embeddings=self.max_position_embeddings,
381
+ base=self.config.rope_theta,
382
+ scaling_factor=scaling_factor,
383
+ )
384
+ else:
385
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
386
+ return self.rotary_emb
387
+
388
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
389
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
390
+
391
+ def forward(
392
+ self,
393
+ hidden_states: torch.Tensor,
394
+ attention_mask: Optional[torch.Tensor] = None,
395
+ position_ids: Optional[torch.LongTensor] = None,
396
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
397
+ output_attentions: bool = False,
398
+ use_cache: bool = False,
399
+ visual_token_mask: Optional[torch.Tensor] = None,
400
+ **kwargs,
401
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
402
+ if 'padding_mask' in kwargs:
403
+ warnings.warn(
404
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
405
+ 'Please make sure use `attention_mask` instead.`'
406
+ )
407
+
408
+ bsz, q_len, _ = hidden_states.size()
409
+
410
+ qkv_states = self.wqkv(hidden_states)
411
+ if visual_token_mask is not None:
412
+ # NOTE visual token mask can be None when evaluating with kv_cache
413
+ visual_token_mask = visual_token_mask.bool() # B, L
414
+ if visual_token_mask.any():
415
+ qkv_states[visual_token_mask] = self.wqkv_ve(hidden_states[visual_token_mask])
416
+
417
+ qkv_states = rearrange(
418
+ qkv_states,
419
+ 'b q (h gs d) -> b q h gs d',
420
+ gs=2 + self.num_key_value_groups,
421
+ d=self.head_dim,
422
+ )
423
+
424
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
425
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
426
+ key_states = qkv_states[..., -2, :]
427
+ value_states = qkv_states[..., -1, :]
428
+
429
+ query_states = query_states.transpose(1, 2)
430
+ key_states = key_states.transpose(1, 2)
431
+ value_states = value_states.transpose(1, 2)
432
+
433
+ kv_seq_len = key_states.shape[-2]
434
+ if past_key_value is not None:
435
+ kv_seq_len += past_key_value[0].shape[-2]
436
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
437
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
438
+
439
+ if past_key_value is not None:
440
+ # reuse k, v, self_attention
441
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
442
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
443
+
444
+ past_key_value = (key_states, value_states) if use_cache else None
445
+
446
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
447
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
448
+
449
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
450
+
451
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
452
+ raise ValueError(
453
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
454
+ f' {attn_weights.size()}'
455
+ )
456
+
457
+ if attention_mask is not None:
458
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
459
+ raise ValueError(
460
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
461
+ )
462
+ attn_weights = attn_weights + attention_mask
463
+
464
+ # upcast attention to fp32
465
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
466
+ attn_output = torch.matmul(attn_weights, value_states)
467
+
468
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
469
+ raise ValueError(
470
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
471
+ f' {attn_output.size()}'
472
+ )
473
+
474
+ attn_output = attn_output.transpose(1, 2).contiguous()
475
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
476
+
477
+ attn_output = self.wo(attn_output)
478
+ if visual_token_mask is not None:
479
+ attn_output[visual_token_mask] = self.wo_ve(attn_output[visual_token_mask])
480
+
481
+ if not output_attentions:
482
+ attn_weights = None
483
+
484
+ return attn_output, attn_weights, past_key_value
485
+
486
+
487
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
488
+ class InternLM2FlashAttention2(InternLM2Attention):
489
+ """
490
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
491
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
492
+ flash attention and deal with padding tokens in case the input contains any of them.
493
+ """
494
+
495
+ def forward(
496
+ self,
497
+ hidden_states: torch.Tensor,
498
+ attention_mask: Optional[torch.LongTensor] = None,
499
+ position_ids: Optional[torch.LongTensor] = None,
500
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
501
+ output_attentions: bool = False,
502
+ use_cache: bool = False,
503
+ padding_type: str = "pad",
504
+ visual_token_mask: Optional[torch.Tensor] = None,
505
+ **kwargs,
506
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
507
+ # InternLM2FlashAttention2 attention does not support output_attentions
508
+ if 'padding_mask' in kwargs:
509
+ warnings.warn(
510
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
511
+ 'Please make sure use `attention_mask` instead.`'
512
+ )
513
+
514
+ # overwrite attention_mask with padding_mask
515
+ attention_mask = kwargs.pop('padding_mask')
516
+
517
+ output_attentions = False
518
+
519
+ bsz, q_len, _ = hidden_states.size()
520
+
521
+ qkv_states = self.wqkv(hidden_states)
522
+ if visual_token_mask is not None:
523
+ # NOTE visual token mask can be None when evaluating with kv_cache
524
+ visual_token_mask = visual_token_mask.bool()
525
+ if visual_token_mask.any():
526
+ qkv_states[visual_token_mask] = self.wqkv_ve(hidden_states[visual_token_mask])
527
+
528
+ qkv_states = rearrange(
529
+ qkv_states,
530
+ 'b q (h gs d) -> b q h gs d',
531
+ gs=2 + self.num_key_value_groups,
532
+ d=self.head_dim,
533
+ )
534
+
535
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
536
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
537
+ key_states = qkv_states[..., -2, :]
538
+ value_states = qkv_states[..., -1, :]
539
+
540
+ query_states = query_states.transpose(1, 2)
541
+ key_states = key_states.transpose(1, 2)
542
+ value_states = value_states.transpose(1, 2)
543
+
544
+ kv_seq_len = key_states.shape[-2]
545
+ if past_key_value is not None:
546
+ kv_seq_len += past_key_value[0].shape[-2]
547
+
548
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
549
+
550
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
551
+
552
+ if past_key_value is not None:
553
+ # reuse k, v, self_attention
554
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
555
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
556
+
557
+ past_key_value = (key_states, value_states) if use_cache else None
558
+
559
+ query_states = query_states.transpose(1, 2)
560
+ key_states = key_states.transpose(1, 2)
561
+ value_states = value_states.transpose(1, 2)
562
+
563
+ attn_output = self._flash_attention_forward(
564
+ query_states, key_states, value_states, attention_mask, q_len, padding_type=padding_type
565
+ )
566
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
567
+ attn_output = self.wo(attn_output)
568
+ if visual_token_mask is not None:
569
+ attn_output[visual_token_mask] = self.wo_ve(attn_output[visual_token_mask])
570
+
571
+ if not output_attentions:
572
+ attn_weights = None
573
+
574
+ return attn_output, attn_weights, past_key_value
575
+
576
+ def _flash_attention_forward(
577
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, padding_type="pad"
578
+ ):
579
+ if padding_type == "pad":
580
+ return self._flash_attention_forward_pad(
581
+ query_states, key_states, value_states, attention_mask, query_length, dropout=dropout, softmax_scale=softmax_scale
582
+ )
583
+ elif padding_type == "pack":
584
+ return self._flash_attention_forward_pack(
585
+ query_states, key_states, value_states, attention_mask, query_length, dropout=dropout, softmax_scale=softmax_scale
586
+ )
587
+ else:
588
+ raise ValueError(f"padding_type should be either `pad` or `pack`, got {padding_type}")
589
+
590
+ def _flash_attention_forward_pad(
591
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
592
+ ):
593
+ """
594
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
595
+ first unpad the input, then computes the attention scores and pad the final attention scores.
596
+
597
+ Args:
598
+ query_states (`torch.Tensor`):
599
+ Input query states to be passed to Flash Attention API
600
+ key_states (`torch.Tensor`):
601
+ Input key states to be passed to Flash Attention API
602
+ value_states (`torch.Tensor`):
603
+ Input value states to be passed to Flash Attention API
604
+ attention_mask (`torch.Tensor`):
605
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
606
+ position of padding tokens and 1 for the position of non-padding tokens.
607
+ dropout (`int`, *optional*):
608
+ Attention dropout
609
+ softmax_scale (`float`, *optional*):
610
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
611
+ """
612
+ # Contains at least one padding token in the sequence
613
+ causal = self.is_causal and query_length != 1
614
+ if attention_mask is not None:
615
+ batch_size = query_states.shape[0]
616
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
617
+ query_states, key_states, value_states, attention_mask, query_length
618
+ )
619
+
620
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
621
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
622
+
623
+ attn_output_unpad = flash_attn_varlen_func(
624
+ query_states,
625
+ key_states,
626
+ value_states,
627
+ cu_seqlens_q=cu_seqlens_q,
628
+ cu_seqlens_k=cu_seqlens_k,
629
+ max_seqlen_q=max_seqlen_in_batch_q,
630
+ max_seqlen_k=max_seqlen_in_batch_k,
631
+ dropout_p=dropout,
632
+ softmax_scale=softmax_scale,
633
+ causal=causal,
634
+ )
635
+
636
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
637
+ else:
638
+ attn_output = flash_attn_func(
639
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
640
+ )
641
+
642
+ return attn_output
643
+
644
+ def _flash_attention_forward_pack(
645
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
646
+ ):
647
+ """
648
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
649
+ first unpad the input, then computes the attention scores and pad the final attention scores.
650
+ Args:
651
+ query_states (`torch.Tensor`):
652
+ Input query states to be passed to Flash Attention API
653
+ key_states (`torch.Tensor`):
654
+ Input key states to be passed to Flash Attention API
655
+ value_states (`torch.Tensor`):
656
+ Input value states to be passed to Flash Attention API
657
+ attention_mask (`torch.Tensor`):
658
+ rename from cu_seqlens to keep compatability - (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
659
+ of the sequences in the batch.
660
+ dropout (`int`, *optional*):
661
+ Attention dropout
662
+ softmax_scale (`float`, *optional*):
663
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
664
+ """
665
+ assert query_states.size(0) == key_states.size(0) == value_states.size(0) == 1
666
+ query_states = query_states.squeeze(0)
667
+ key_states = key_states.squeeze(0)
668
+ value_states = value_states.squeeze(0)
669
+ cu_seqlens = attention_mask.squeeze(0)
670
+
671
+ with torch.no_grad():
672
+ max_seqlen = max([
673
+ cu_seqlens[idx+1] - cu_seqlens[idx]
674
+ for idx in range(cu_seqlens.size(0) - 1)
675
+ ]).item()
676
+
677
+ # Contains at least one padding token in the sequence
678
+ causal = self.is_causal and query_length != 1
679
+ attn_output = flash_attn_varlen_func(
680
+ q=query_states,
681
+ k=key_states,
682
+ v=value_states,
683
+ cu_seqlens_q=cu_seqlens,
684
+ cu_seqlens_k=cu_seqlens,
685
+ max_seqlen_q=max_seqlen,
686
+ max_seqlen_k=max_seqlen,
687
+ dropout_p=dropout,
688
+ softmax_scale=softmax_scale,
689
+ causal=causal,
690
+ )
691
+
692
+ query_states = query_states.unsqueeze(0)
693
+ key_states = key_states.unsqueeze(0)
694
+ value_states = value_states.unsqueeze(0)
695
+ return attn_output
696
+
697
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
698
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
699
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
700
+
701
+ key_layer = index_first_axis(
702
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
703
+ )
704
+ value_layer = index_first_axis(
705
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
706
+ )
707
+
708
+ if query_length == kv_seq_len:
709
+ query_layer = index_first_axis(
710
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
711
+ )
712
+ cu_seqlens_q = cu_seqlens_k
713
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
714
+ indices_q = indices_k
715
+ elif query_length == 1:
716
+ max_seqlen_in_batch_q = 1
717
+ cu_seqlens_q = torch.arange(
718
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
719
+ ) # There is a memcpy here, that is very bad.
720
+ indices_q = cu_seqlens_q[:-1]
721
+ query_layer = query_layer.squeeze(1)
722
+ else:
723
+ # The -q_len: slice assumes left padding.
724
+ attention_mask = attention_mask[:, -query_length:]
725
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
726
+
727
+ return (
728
+ query_layer,
729
+ key_layer,
730
+ value_layer,
731
+ indices_q.to(torch.int64),
732
+ (cu_seqlens_q, cu_seqlens_k),
733
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
734
+ )
735
+
736
+
737
+ INTERNLM2_ATTENTION_CLASSES = {
738
+ 'eager': InternLM2Attention,
739
+ 'flash_attention_2': InternLM2FlashAttention2,
740
+ }
741
+
742
+ import os
743
+
744
+
745
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
746
+ class InternLM2DecoderLayer(nn.Module):
747
+ def __init__(self, config: InternLM2VEConfig):
748
+ super().__init__()
749
+ self.hidden_size = config.hidden_size
750
+
751
+ VISUALIZE_ATTN = int(os.environ.get("VISUALIZE_ATTN", "0"))
752
+ if VISUALIZE_ATTN:
753
+ self.attention = InternLM2Attention(config=config)
754
+ else:
755
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
756
+
757
+ self.feed_forward = InternLM2MLP(config)
758
+ self.feed_forward_ve = InternLM2MLP(config)
759
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
760
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
761
+
762
+ def forward(
763
+ self,
764
+ hidden_states: torch.Tensor,
765
+ attention_mask: Optional[torch.Tensor] = None,
766
+ position_ids: Optional[torch.LongTensor] = None,
767
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
768
+ output_attentions: Optional[bool] = False,
769
+ use_cache: Optional[bool] = False,
770
+ padding_type: Optional[str] = "pad",
771
+ visual_token_mask: Optional[torch.Tensor] = None,
772
+ layer_idx: Optional[int] = -1,
773
+ return_feature_scale: Optional[bool] = False,
774
+ **kwargs,
775
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
776
+ """
777
+ Args:
778
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
779
+ attention_mask (`torch.FloatTensor`, *optional*):
780
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
781
+ query_sequence_length, key_sequence_length)` if default attention is used.
782
+ output_attentions (`bool`, *optional*):
783
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
784
+ returned tensors for more detail.
785
+ use_cache (`bool`, *optional*):
786
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
787
+ (see `past_key_values`).
788
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
789
+ """
790
+ if 'padding_mask' in kwargs:
791
+ warnings.warn(
792
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
793
+ 'Please make sure use `attention_mask` instead.`'
794
+ )
795
+
796
+ residual = hidden_states
797
+
798
+ hidden_states = self.attention_norm(hidden_states)
799
+
800
+ # Self Attention
801
+ hidden_states, self_attn_weights, present_key_value = self.attention(
802
+ hidden_states=hidden_states,
803
+ attention_mask=attention_mask,
804
+ position_ids=position_ids,
805
+ past_key_value=past_key_value,
806
+ output_attentions=output_attentions,
807
+ use_cache=use_cache,
808
+ padding_type=padding_type,
809
+ visual_token_mask=visual_token_mask,
810
+ **kwargs,
811
+ )
812
+
813
+ # logger.setLevel(logging.INFO)
814
+ # logger.info(f"====== {layer_idx=} ======")
815
+ # logger.info(f"{residual[visual_token_mask & attention_mask].mean()=} {residual[visual_token_mask & attention_mask].std()=}")
816
+ # logger.info(f"{residual[~visual_token_mask & attention_mask].mean()=} {residual[~visual_token_mask & attention_mask].std()=}")
817
+ # logger.info(f"{hidden_states[visual_token_mask & attention_mask].mean()=} {hidden_states[visual_token_mask & attention_mask].std()=}")
818
+ # logger.info(f"{hidden_states[~visual_token_mask & attention_mask].mean()=} {hidden_states[~visual_token_mask & attention_mask].std()=}")
819
+ # logger.info(f"====== ======")
820
+
821
+ hidden_states = residual + hidden_states
822
+
823
+ # Fully Connected
824
+ residual = hidden_states
825
+ hidden_states = self.ffn_norm(hidden_states)
826
+
827
+ if visual_token_mask is None:
828
+ hidden_states = self.feed_forward(hidden_states)
829
+ else:
830
+ visual_token_mask = visual_token_mask.bool() # B, L
831
+ if visual_token_mask.any():
832
+ hidden_states[visual_token_mask] = self.feed_forward_ve(hidden_states[visual_token_mask])
833
+ if (~visual_token_mask).any():
834
+ hidden_states[~visual_token_mask] = self.feed_forward(hidden_states[~visual_token_mask])
835
+ hidden_states = residual + hidden_states
836
+
837
+ outputs = (hidden_states,)
838
+
839
+ '''
840
+ # print hidden states statistics
841
+ print(f"===== Layer {layer_idx} =====")
842
+ # print(hidden_states[~visual_token_mask].abs().mean(-1).max(), hidden_states[~visual_token_mask].abs().mean(-1).min(), hidden_states[~visual_token_mask].abs().mean(-1).std(), hidden_states[~visual_token_mask].abs().mean(-1).mean(), hidden_states[~visual_token_mask].mean())
843
+ # reformat the output
844
+ print(f"{hidden_states[~visual_token_mask].abs().mean(-1).max().item()=:.3f} {hidden_states[~visual_token_mask].abs().mean(-1).min().item()=:.3f} {hidden_states[~visual_token_mask].abs().mean(-1).std().item()=:.3f} {hidden_states[~visual_token_mask].abs().mean(-1).mean().item()=:.3f} {hidden_states[~visual_token_mask].mean().item()=:.3f}")
845
+ # print(hidden_states[visual_token_mask].abs().mean(-1).max(), hidden_states[visual_token_mask].abs().mean(-1).min(), hidden_states[visual_token_mask].abs().mean(-1).std(), hidden_states[visual_token_mask].abs().mean(-1).mean(), hidden_states[visual_token_mask].mean())
846
+ print(f"{hidden_states[visual_token_mask].abs().mean(-1).max().item()=:.3f} {hidden_states[visual_token_mask].abs().mean(-1).min().item()=:.3f} {hidden_states[visual_token_mask].abs().mean(-1).std().item()=:.3f} {hidden_states[visual_token_mask].abs().mean(-1).mean().item()=:.3f} {hidden_states[visual_token_mask].mean().item()=:.3f}")
847
+ print("===== =====")
848
+ # print feed_forward statistics
849
+ print(f"===== Layer {layer_idx} Feed Forward =====")
850
+ # print(self.feed_forward.w1.weight.abs().max(), self.feed_forward.w1.weight.abs().min(), self.feed_forward.w1.weight.abs().std(), self.feed_forward.w1.weight.abs().mean(), self.feed_forward.w1.weight.mean())
851
+ # print(self.feed_forward.w2.weight.abs().max(), self.feed_forward.w2.weight.abs().min(), self.feed_forward.w2.weight.abs().std(), self.feed_forward.w2.weight.abs().mean(), self.feed_forward.w2.weight.mean())
852
+ # print(self.feed_forward.w3.weight.abs().max(), self.feed_forward.w3.weight.abs().min(), self.feed_forward.w3.weight.abs().std(), self.feed_forward.w3.weight.abs().mean(), self.feed_forward.w3.weight.mean())
853
+ # reformat the output
854
+ print(f"{self.feed_forward.w1.weight.abs().max().item()=:.3f} {self.feed_forward.w1.weight.abs().min().item()=:.3f} {self.feed_forward.w1.weight.abs().std().item()=:.3f} {self.feed_forward.w1.weight.abs().mean().item()=:.3f} {self.feed_forward.w1.weight.mean().item()=:.3f}")
855
+ print(f"{self.feed_forward.w2.weight.abs().max().item()=:.3f} {self.feed_forward.w2.weight.abs().min().item()=:.3f} {self.feed_forward.w2.weight.abs().std().item()=:.3f} {self.feed_forward.w2.weight.abs().mean().item()=:.3f} {self.feed_forward.w2.weight.mean().item()=:.3f}")
856
+ print(f"{self.feed_forward.w3.weight.abs().max().item()=:.3f} {self.feed_forward.w3.weight.abs().min().item()=:.3f} {self.feed_forward.w3.weight.abs().std().item()=:.3f} {self.feed_forward.w3.weight.abs().mean().item()=:.3f} {self.feed_forward.w3.weight.mean().item()=:.3f}")
857
+ print("===== =====")
858
+ # print feed_forward_ve statistics
859
+ print(f"===== Layer {layer_idx} Feed Forward VE =====")
860
+ # print(self.feed_forward_ve.w1.weight.abs().max(), self.feed_forward_ve.w1.weight.abs().min(), self.feed_forward_ve.w1.weight.abs().std(), self.feed_forward_ve.w1.weight.abs().mean(), self.feed_forward_ve.w1.weight.mean())
861
+ # print(self.feed_forward_ve.w2.weight.abs().max(), self.feed_forward_ve.w2.weight.abs().min(), self.feed_forward_ve.w2.weight.abs().std(), self.feed_forward_ve.w2.weight.abs().mean(), self.feed_forward_ve.w2.weight.mean())
862
+ # print(self.feed_forward_ve.w3.weight.abs().max(), self.feed_forward_ve.w3.weight.abs().min(), self.feed_forward_ve.w3.weight.abs().std(), self.feed_forward_ve.w3.weight.abs().mean(), self.feed_forward_ve.w3.weight.mean())
863
+ # reformat the output
864
+ print(f"{self.feed_forward_ve.w1.weight.abs().max().item()=:.3f} {self.feed_forward_ve.w1.weight.abs().min().item()=:.3f} {self.feed_forward_ve.w1.weight.abs().std().item()=:.3f} {self.feed_forward_ve.w1.weight.abs().mean().item()=:.3f} {self.feed_forward_ve.w1.weight.mean().item()=:.3f}")
865
+ print(f"{self.feed_forward_ve.w2.weight.abs().max().item()=:.3f} {self.feed_forward_ve.w2.weight.abs().min().item()=:.3f} {self.feed_forward_ve.w2.weight.abs().std().item()=:.3f} {self.feed_forward_ve.w2.weight.abs().mean().item()=:.3f} {self.feed_forward_ve.w2.weight.mean().item()=:.3f}")
866
+ print(f"{self.feed_forward_ve.w3.weight.abs().max().item()=:.3f} {self.feed_forward_ve.w3.weight.abs().min().item()=:.3f} {self.feed_forward_ve.w3.weight.abs().std().item()=:.3f} {self.feed_forward_ve.w3.weight.abs().mean().item()=:.3f} {self.feed_forward_ve.w3.weight.mean().item()=:.3f}")
867
+ print("===== =====")
868
+ '''
869
+
870
+ if output_attentions:
871
+ outputs += (self_attn_weights,)
872
+
873
+ if use_cache:
874
+ outputs += (present_key_value,)
875
+
876
+ if return_feature_scale:
877
+ assert visual_token_mask is not None, "visual_token_mask must be provided when return_feature_scale is True"
878
+ outputs += ((hidden_states[visual_token_mask].abs().mean(-1).std(), hidden_states[~visual_token_mask].abs().mean(-1).std()),)
879
+
880
+ return outputs
881
+
882
+
883
+ InternLM2_START_DOCSTRING = r"""
884
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
885
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
886
+ etc.)
887
+
888
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
889
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
890
+ and behavior.
891
+
892
+ Parameters:
893
+ config ([`InternLM2VEConfig`]):
894
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
895
+ load the weights associated with the model, only the configuration. Check out the
896
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
897
+ """
898
+
899
+
900
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
901
+ @add_start_docstrings(
902
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
903
+ InternLM2_START_DOCSTRING,
904
+ )
905
+ class InternLM2PreTrainedModel(PreTrainedModel):
906
+ config_class = InternLM2VEConfig
907
+ base_model_prefix = 'model'
908
+ supports_gradient_checkpointing = True
909
+ _no_split_modules = ['InternLM2DecoderLayer']
910
+ _skip_keys_device_placement = 'past_key_values'
911
+ _supports_flash_attn_2 = True
912
+
913
+ def _init_weights(self, module):
914
+ std = self.config.initializer_range
915
+ if isinstance(module, nn.Linear):
916
+ module.weight.data.normal_(mean=0.0, std=std)
917
+ if module.bias is not None:
918
+ module.bias.data.zero_()
919
+ elif isinstance(module, nn.Embedding):
920
+ module.weight.data.normal_(mean=0.0, std=std)
921
+ if module.padding_idx is not None:
922
+ module.weight.data[module.padding_idx].zero_()
923
+
924
+
925
+ InternLM2_INPUTS_DOCSTRING = r"""
926
+ Args:
927
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
928
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
929
+ it.
930
+
931
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
932
+ [`PreTrainedTokenizer.__call__`] for details.
933
+
934
+ [What are input IDs?](../glossary#input-ids)
935
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
936
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
937
+
938
+ - 1 for tokens that are **not masked**,
939
+ - 0 for tokens that are **masked**.
940
+
941
+ [What are attention masks?](../glossary#attention-mask)
942
+
943
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
944
+ [`PreTrainedTokenizer.__call__`] for details.
945
+
946
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
947
+ `past_key_values`).
948
+
949
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
950
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
951
+ information on the default strategy.
952
+
953
+ - 1 indicates the head is **not masked**,
954
+ - 0 indicates the head is **masked**.
955
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
956
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
957
+ config.n_positions - 1]`.
958
+
959
+ [What are position IDs?](../glossary#position-ids)
960
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
961
+ when `config.use_cache=True`):
962
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
963
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
964
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
965
+
966
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
967
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
968
+
969
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
970
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
971
+ of shape `(batch_size, sequence_length)`.
972
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
973
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
974
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
975
+ model's internal embedding lookup matrix.
976
+ use_cache (`bool`, *optional*):
977
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
978
+ `past_key_values`).
979
+ output_attentions (`bool`, *optional*):
980
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
981
+ tensors for more detail.
982
+ output_hidden_states (`bool`, *optional*):
983
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
984
+ more detail.
985
+ return_dict (`bool`, *optional*):
986
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
987
+ """
988
+
989
+
990
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
991
+ @add_start_docstrings(
992
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
993
+ InternLM2_START_DOCSTRING,
994
+ )
995
+ class InternLM2Model(InternLM2PreTrainedModel):
996
+ """
997
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
998
+
999
+ Args:
1000
+ config: InternLM2VEConfig
1001
+ """
1002
+
1003
+ _auto_class = 'AutoModel'
1004
+
1005
+ def __init__(self, config: InternLM2VEConfig):
1006
+ super().__init__(config)
1007
+ self.padding_idx = config.pad_token_id
1008
+ self.vocab_size = config.vocab_size
1009
+ self.config = config
1010
+
1011
+ VISUALIZE_ATTN = int(os.environ.get("VISUALIZE_ATTN", "0"))
1012
+ if VISUALIZE_ATTN:
1013
+ self.config.attn_implementation = 'eager'
1014
+
1015
+ if not has_flash_attn:
1016
+ self.config.attn_implementation = 'eager'
1017
+ print('Warning: Flash attention is not available, using eager attention instead.')
1018
+
1019
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1020
+
1021
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
1022
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1023
+
1024
+ self.gradient_checkpointing = False
1025
+ # Initialize weights and apply final processing
1026
+ self.post_init()
1027
+
1028
+ def get_input_embeddings(self):
1029
+ return self.tok_embeddings
1030
+
1031
+ def set_input_embeddings(self, value):
1032
+ self.tok_embeddings = value
1033
+
1034
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
1035
+ # create causal mask
1036
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1037
+ combined_attention_mask = None
1038
+ if input_shape[-1] > 1:
1039
+ combined_attention_mask = _make_causal_mask(
1040
+ input_shape,
1041
+ inputs_embeds.dtype,
1042
+ device=inputs_embeds.device,
1043
+ past_key_values_length=past_key_values_length,
1044
+ )
1045
+
1046
+ if attention_mask is not None:
1047
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1048
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
1049
+ inputs_embeds.device
1050
+ )
1051
+ combined_attention_mask = (
1052
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
1053
+ )
1054
+
1055
+ return combined_attention_mask
1056
+
1057
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1058
+ def forward(
1059
+ self,
1060
+ input_ids: torch.LongTensor = None,
1061
+ attention_mask: Optional[torch.Tensor] = None,
1062
+ position_ids: Optional[torch.LongTensor] = None,
1063
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1064
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1065
+ use_cache: Optional[bool] = None,
1066
+ output_attentions: Optional[bool] = None,
1067
+ output_hidden_states: Optional[bool] = None,
1068
+ return_dict: Optional[bool] = None,
1069
+ padding_type: Optional[str] = "pad",
1070
+ visual_token_mask: Optional[torch.Tensor] = None,
1071
+ generation_modality: Optional[int] = 0,
1072
+ return_feature_scale: Optional[bool] = False,
1073
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1074
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1075
+ output_hidden_states = (
1076
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1077
+ )
1078
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1079
+
1080
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1081
+
1082
+ if self.config.attn_implementation == 'flash_attention_2':
1083
+ _import_flash_attn()
1084
+
1085
+ # retrieve input_ids and inputs_embeds
1086
+ if input_ids is not None and inputs_embeds is not None:
1087
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
1088
+ elif input_ids is not None:
1089
+ batch_size, seq_length = input_ids.shape[:2]
1090
+ elif inputs_embeds is not None:
1091
+ batch_size, seq_length = inputs_embeds.shape[:2]
1092
+ else:
1093
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
1094
+
1095
+ seq_length_with_past = seq_length
1096
+ past_key_values_length = 0
1097
+ if past_key_values is not None:
1098
+ past_key_values_length = past_key_values[0][0].shape[2]
1099
+ seq_length_with_past = seq_length_with_past + past_key_values_length
1100
+
1101
+ if position_ids is None:
1102
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1103
+ position_ids = torch.arange(
1104
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1105
+ )
1106
+ position_ids = position_ids.unsqueeze(0)
1107
+
1108
+
1109
+ if inputs_embeds is None:
1110
+ inputs_embeds = self.tok_embeddings(input_ids)
1111
+
1112
+ if self.config.attn_implementation == 'flash_attention_2':
1113
+ # 2d mask is passed through the layers
1114
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1115
+ else:
1116
+ if attention_mask is None:
1117
+ attention_mask = torch.ones(
1118
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
1119
+ )
1120
+ attention_mask = self._prepare_decoder_attention_mask(
1121
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1122
+ )
1123
+
1124
+ # embed positions
1125
+ hidden_states = inputs_embeds
1126
+
1127
+ if self.gradient_checkpointing and self.training:
1128
+ if use_cache:
1129
+ logger.warning_once(
1130
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
1131
+ )
1132
+ use_cache = False
1133
+
1134
+ if generation_modality == 0 and past_key_values_length > 0:
1135
+ # Indicating we are generating text and is not pre-filling. This is an ugly hack to make the model work
1136
+ visual_token_mask = None
1137
+
1138
+
1139
+ # decoder layers
1140
+ all_hidden_states = () if output_hidden_states else None
1141
+ all_self_attns = () if output_attentions else None
1142
+ next_decoder_cache = () if use_cache else None
1143
+
1144
+ for idx, decoder_layer in enumerate(self.layers):
1145
+ if output_hidden_states:
1146
+ all_hidden_states += (hidden_states,)
1147
+
1148
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1149
+
1150
+ if self.gradient_checkpointing and self.training:
1151
+
1152
+ def create_custom_forward(module):
1153
+ def custom_forward(*inputs):
1154
+ # None for past_key_value
1155
+ return module(*inputs[:-4], output_attentions, None,
1156
+ padding_type=inputs[-4],
1157
+ visual_token_mask=inputs[-3],
1158
+ layer_idx=inputs[-2],
1159
+ return_feature_scale=inputs[-1]
1160
+ )
1161
+
1162
+ return custom_forward
1163
+
1164
+ layer_outputs = torch.utils.checkpoint.checkpoint(
1165
+ create_custom_forward(decoder_layer),
1166
+ hidden_states,
1167
+ attention_mask,
1168
+ position_ids,
1169
+ None,
1170
+ padding_type,
1171
+ visual_token_mask,
1172
+ idx,
1173
+ return_feature_scale,
1174
+ )
1175
+ else:
1176
+ layer_outputs = decoder_layer(
1177
+ hidden_states,
1178
+ attention_mask=attention_mask,
1179
+ position_ids=position_ids,
1180
+ past_key_value=past_key_value,
1181
+ output_attentions=output_attentions,
1182
+ use_cache=use_cache,
1183
+ padding_type=padding_type,
1184
+ visual_token_mask=visual_token_mask,
1185
+ layer_idx=idx,
1186
+ return_feature_scale=return_feature_scale,
1187
+ )
1188
+
1189
+ hidden_states = layer_outputs[0]
1190
+
1191
+ if use_cache:
1192
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
1193
+
1194
+ if output_attentions:
1195
+ all_self_attns += (layer_outputs[1],)
1196
+
1197
+ hidden_states = self.norm(hidden_states)
1198
+
1199
+ # add hidden states from the last decoder layer
1200
+ if output_hidden_states:
1201
+ all_hidden_states += (hidden_states,)
1202
+
1203
+ next_cache = next_decoder_cache if use_cache else None
1204
+ if not return_dict:
1205
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1206
+ ret = BaseModelOutputWithPast(
1207
+ last_hidden_state=hidden_states,
1208
+ past_key_values=next_cache,
1209
+ hidden_states=all_hidden_states,
1210
+ attentions=all_self_attns,
1211
+ )
1212
+ if return_feature_scale:
1213
+ ret["feature_scale"] = layer_outputs[-1]
1214
+ return ret
1215
+
1216
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
1217
+ class InternLM2VEForCausalLM(InternLM2PreTrainedModel):
1218
+ _auto_class = 'AutoModelForCausalLM'
1219
+
1220
+ _tied_weights_keys = ['output.weight']
1221
+
1222
+ def __init__(self, config):
1223
+ super().__init__(config)
1224
+ self.model = InternLM2Model(config)
1225
+ self.vocab_size = config.vocab_size
1226
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1227
+
1228
+ # Initialize weights and apply final processing
1229
+ self.post_init()
1230
+
1231
+ def get_input_embeddings(self):
1232
+ return self.model.tok_embeddings
1233
+
1234
+ def set_input_embeddings(self, value):
1235
+ self.model.tok_embeddings = value
1236
+
1237
+ def get_output_embeddings(self):
1238
+ return self.output
1239
+
1240
+ def set_output_embeddings(self, new_embeddings):
1241
+ self.output = new_embeddings
1242
+
1243
+ def set_decoder(self, decoder):
1244
+ self.model = decoder
1245
+
1246
+ def get_decoder(self):
1247
+ return self.model
1248
+
1249
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1250
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1251
+ def forward(
1252
+ self,
1253
+ input_ids: torch.LongTensor = None,
1254
+ attention_mask: Optional[torch.Tensor] = None,
1255
+ position_ids: Optional[torch.LongTensor] = None,
1256
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1257
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1258
+ labels: Optional[torch.LongTensor] = None,
1259
+ use_cache: Optional[bool] = None,
1260
+ output_attentions: Optional[bool] = None,
1261
+ output_hidden_states: Optional[bool] = None,
1262
+ return_dict: Optional[bool] = None,
1263
+ padding_type: Optional[str] = "pad",
1264
+ visual_token_mask: Optional[torch.Tensor] = None,
1265
+ generation_modality: Optional[int] = 0,
1266
+ skip_lm_head: Optional[bool] = False,
1267
+ return_feature_scale: Optional[bool] = False,
1268
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1269
+ r"""
1270
+ Args:
1271
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1272
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1273
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1274
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1275
+
1276
+ Returns:
1277
+
1278
+ Example:
1279
+
1280
+ ```python
1281
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1282
+
1283
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1284
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1285
+
1286
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1287
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1288
+
1289
+ >>> # Generate
1290
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1291
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1292
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1293
+ ```"""
1294
+
1295
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1296
+ output_hidden_states = (
1297
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1298
+ )
1299
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1300
+
1301
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1302
+ outputs = self.model(
1303
+ input_ids=input_ids,
1304
+ attention_mask=attention_mask,
1305
+ position_ids=position_ids,
1306
+ past_key_values=past_key_values,
1307
+ inputs_embeds=inputs_embeds,
1308
+ use_cache=use_cache,
1309
+ output_attentions=output_attentions,
1310
+ output_hidden_states=output_hidden_states,
1311
+ return_dict=return_dict,
1312
+ padding_type=padding_type,
1313
+ visual_token_mask=visual_token_mask,
1314
+ generation_modality=generation_modality,
1315
+ return_feature_scale=return_feature_scale,
1316
+ )
1317
+
1318
+ hidden_states = outputs[0]
1319
+
1320
+ logits = None
1321
+ if not skip_lm_head:
1322
+ logits = self.output(hidden_states)
1323
+ logits = logits.float()
1324
+
1325
+ loss = None
1326
+ if labels is not None:
1327
+ # Shift so that tokens < n predict n
1328
+ shift_logits = logits[..., :-1, :].contiguous()
1329
+ shift_labels = labels[..., 1:].contiguous()
1330
+ # Flatten the tokens
1331
+ loss_fct = CrossEntropyLoss()
1332
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1333
+ shift_labels = shift_labels.view(-1)
1334
+ # Enable model parallelism
1335
+ shift_labels = shift_labels.to(shift_logits.device)
1336
+ loss = loss_fct(shift_logits, shift_labels)
1337
+
1338
+ if not return_dict:
1339
+ output = (logits,) + outputs[1:]
1340
+ return (loss,) + output if loss is not None else output
1341
+
1342
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1343
+ output = CausalLMOutputWithPast(
1344
+ loss=loss,
1345
+ logits=logits,
1346
+ past_key_values=outputs.past_key_values,
1347
+ hidden_states=outputs.hidden_states,
1348
+ attentions=outputs.attentions,
1349
+ )
1350
+ if return_feature_scale:
1351
+ output["feature_scale"] = outputs["feature_scale"]
1352
+ if not skip_lm_head:
1353
+ output['logits'] = output['logits'].to(device)
1354
+ return output
1355
+
1356
+ def prepare_inputs_for_generation(
1357
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1358
+ ):
1359
+ if past_key_values is not None:
1360
+ past_length = past_key_values[0][0].shape[2]
1361
+
1362
+ # Some generation methods already pass only the last input ID
1363
+ if input_ids.shape[1] > past_length:
1364
+ remove_prefix_length = past_length
1365
+ else:
1366
+ # Default to old behavior: keep only final ID
1367
+ remove_prefix_length = input_ids.shape[1] - 1
1368
+
1369
+ input_ids = input_ids[:, remove_prefix_length:]
1370
+
1371
+ position_ids = kwargs.get('position_ids', None)
1372
+ if attention_mask is not None and position_ids is None:
1373
+ # create position_ids on the fly for batch generation
1374
+ position_ids = attention_mask.long().cumsum(-1) - 1
1375
+ position_ids.masked_fill_(attention_mask == 0, 1)
1376
+ if past_key_values:
1377
+ position_ids = position_ids[:, -input_ids.shape[1]:]
1378
+ elif position_ids is not None and past_key_values is not None:
1379
+ # for generation step after the 1st
1380
+ position_ids = torch.ones_like(position_ids[..., -1:]) * (position_ids.max() + 1 + past_length - position_ids.shape[-1])
1381
+
1382
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1383
+ if inputs_embeds is not None and past_key_values is None:
1384
+ model_inputs = {'inputs_embeds': inputs_embeds}
1385
+ else:
1386
+ model_inputs = {'input_ids': input_ids}
1387
+
1388
+ model_inputs.update(
1389
+ {
1390
+ 'position_ids': position_ids,
1391
+ 'past_key_values': past_key_values,
1392
+ 'use_cache': kwargs.get('use_cache'),
1393
+ 'attention_mask': attention_mask,
1394
+ 'visual_token_mask': kwargs.get('visual_token_mask')
1395
+ }
1396
+ )
1397
+ return model_inputs
1398
+
1399
+ @staticmethod
1400
+ def _reorder_cache(past_key_values, beam_idx):
1401
+ reordered_past = ()
1402
+ for layer_past in past_key_values:
1403
+ reordered_past += (
1404
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1405
+ )
1406
+ return reordered_past
1407
+
1408
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''):
1409
+ if tokenizer.add_bos_token:
1410
+ prompt = ''
1411
+ else:
1412
+ prompt = tokenizer.bos_token
1413
+ if meta_instruction:
1414
+ prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1415
+ for record in history:
1416
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1417
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1418
+ return tokenizer([prompt], return_tensors='pt')
1419
+
1420
+ @torch.no_grad()
1421
+ def chat(
1422
+ self,
1423
+ tokenizer,
1424
+ query: str,
1425
+ history: List[Tuple[str, str]] = [],
1426
+ streamer: Optional[BaseStreamer] = None,
1427
+ max_new_tokens: int = 1024,
1428
+ do_sample: bool = True,
1429
+ temperature: float = 0.8,
1430
+ top_p: float = 0.8,
1431
+ meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n'
1432
+ '- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n'
1433
+ '- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.',
1434
+ **kwargs,
1435
+ ):
1436
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1437
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1438
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1439
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]]
1440
+ outputs = self.generate(
1441
+ **inputs,
1442
+ streamer=streamer,
1443
+ max_new_tokens=max_new_tokens,
1444
+ do_sample=do_sample,
1445
+ temperature=temperature,
1446
+ top_p=top_p,
1447
+ eos_token_id=eos_token_id,
1448
+ **kwargs,
1449
+ )
1450
+ outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]):]
1451
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1452
+ response = response.split('<|im_end|>')[0]
1453
+ history = history + [(query, response)]
1454
+ return response, history
1455
+
1456
+ @torch.no_grad()
1457
+ def stream_chat(
1458
+ self,
1459
+ tokenizer,
1460
+ query: str,
1461
+ history: List[Tuple[str, str]] = [],
1462
+ max_new_tokens: int = 1024,
1463
+ do_sample: bool = True,
1464
+ temperature: float = 0.8,
1465
+ top_p: float = 0.8,
1466
+ **kwargs,
1467
+ ):
1468
+ """
1469
+ Return a generator in format: (response, history)
1470
+ Eg.
1471
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1472
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1473
+ """
1474
+ if BaseStreamer is None:
1475
+ raise ModuleNotFoundError(
1476
+ 'The version of `transformers` is too low. Please make sure '
1477
+ 'that you have installed `transformers>=4.28.0`.'
1478
+ )
1479
+
1480
+ response_queue = queue.Queue(maxsize=20)
1481
+
1482
+ class ChatStreamer(BaseStreamer):
1483
+ def __init__(self, tokenizer) -> None:
1484
+ super().__init__()
1485
+ self.tokenizer = tokenizer
1486
+ self.queue = response_queue
1487
+ self.query = query
1488
+ self.history = history
1489
+ self.response = ''
1490
+ self.cache = []
1491
+ self.received_inputs = False
1492
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1493
+
1494
+ def put(self, value):
1495
+ if len(value.shape) > 1 and value.shape[0] > 1:
1496
+ raise ValueError('ChatStreamer only supports batch size 1')
1497
+ elif len(value.shape) > 1:
1498
+ value = value[0]
1499
+
1500
+ if not self.received_inputs:
1501
+ # The first received value is input_ids, ignore here
1502
+ self.received_inputs = True
1503
+ return
1504
+
1505
+ self.cache.extend(value.tolist())
1506
+ token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
1507
+ if token.strip() != '<|im_end|>':
1508
+ self.response = self.response + token
1509
+ history = self.history + [(self.query, self.response)]
1510
+ self.queue.put((self.response, history))
1511
+ self.cache = []
1512
+ else:
1513
+ self.end()
1514
+
1515
+ def end(self):
1516
+ self.queue.put(None)
1517
+
1518
+ def stream_producer():
1519
+ return self.chat(
1520
+ tokenizer=tokenizer,
1521
+ query=query,
1522
+ streamer=ChatStreamer(tokenizer=tokenizer),
1523
+ history=history,
1524
+ max_new_tokens=max_new_tokens,
1525
+ do_sample=do_sample,
1526
+ temperature=temperature,
1527
+ top_p=top_p,
1528
+ **kwargs,
1529
+ )
1530
+
1531
+ def consumer():
1532
+ producer = threading.Thread(target=stream_producer)
1533
+ producer.start()
1534
+ while True:
1535
+ res = response_queue.get()
1536
+ if res is None:
1537
+ return
1538
+ yield res
1539
+
1540
+ return consumer()
1541
+
modeling_navil_chat.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # NaViL
3
+ # Copyright (c) 2025 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ import warnings
9
+ from typing import Any, List, Optional, Tuple, Union
10
+ import copy
11
+
12
+ from dataclasses import dataclass
13
+
14
+ import torch
15
+ import torch.distributed as dist
16
+ from torch import nn
17
+ from torch.nn import CrossEntropyLoss
18
+
19
+ import transformers
20
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
21
+ LlamaTokenizer, Qwen2ForCausalLM)
22
+ from transformers.modeling_utils import PreTrainedModel
23
+ from transformers.utils import ModelOutput, logging
24
+ from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm
25
+
26
+ from .configuration_navil_chat import NaViLChatConfig
27
+ from .modeling_navil_vit_anyres import NaViLVisionModelAnyRes
28
+
29
+ from .conversation import get_conv_template
30
+ from .modeling_internlm2_ve import InternLM2VEForCausalLM
31
+ # from navil.model.qwen3.modeling_qwen3_ve import Qwen3VEForCausalLM
32
+ from .modeling_internlm2_ve import InternLM2RMSNorm
33
+ from .image_processing_qwen2_vl import Qwen2VLImageProcessor
34
+ from .constants import (
35
+ SPECIAL_TOKEN_LIST,
36
+ IMG_CONTEXT_TOKEN, IMG_END_TOKEN, IMG_START_TOKEN, IMG_UNCOND_TOKEN,
37
+ VAE_MEAN, VAE_STD,
38
+ )
39
+ from .modular_intern_vit import (
40
+ InternVisionFlashAttention2,
41
+ InternVisionSdpaAttention,
42
+ InternMLP,
43
+ NORM2FN,
44
+ InternVisionRotaryEmbedding,
45
+ )
46
+
47
+ logger = logging.get_logger(__name__)
48
+ logger.setLevel(logging.INFO)
49
+
50
+
51
+ def version_cmp(v1, v2, op='eq'):
52
+ import operator
53
+
54
+ from packaging import version
55
+ op_func = getattr(operator, op)
56
+ return op_func(version.parse(v1), version.parse(v2))
57
+
58
+
59
+
60
+ @dataclass
61
+ class CausalLMOutputWithPast(ModelOutput):
62
+ """
63
+ Base class for causal language model (or autoregressive) outputs.
64
+
65
+ Args:
66
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
67
+ Language modeling loss (for next-token prediction).
68
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
69
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
70
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
71
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
72
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
73
+
74
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
75
+ `past_key_values` input) to speed up sequential decoding.
76
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
77
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
78
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
79
+
80
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
81
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
82
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
83
+ sequence_length)`.
84
+
85
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
86
+ heads.
87
+ """
88
+
89
+ loss: Optional[torch.FloatTensor] = None
90
+ logits: torch.FloatTensor = None
91
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
92
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
93
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
94
+
95
+ log_dict: Optional[dict] = None
96
+
97
+
98
+ class NaViL(PreTrainedModel):
99
+ config_class = NaViLChatConfig
100
+ main_input_name = 'pixel_values'
101
+ _no_split_modules = ['NaViLVisionModelAnyRes', 'InternLM2DecoderLayer', 'Qwen3DecoderLayer']
102
+ _supports_flash_attn_2 = True
103
+
104
+ def __init__(self, config: NaViLChatConfig, vision_model=None, language_model=None):
105
+ super().__init__(config)
106
+ self.config = config
107
+
108
+ assert version_cmp(transformers.__version__, '4.51.0', 'ge')
109
+ image_size = config.force_image_size or config.vision_config.image_size
110
+ patch_size = config.vision_config.patch_size
111
+ self.patch_size = patch_size
112
+ self.select_layer = config.select_layer
113
+ self.template = config.template
114
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
115
+ self.downsample_ratio = config.downsample_ratio
116
+ self.patch_aspect_ratio = 1.0
117
+ self.ps_version = config.ps_version
118
+ self.llm_arch_name = config.llm_config.architectures[0]
119
+
120
+ logger.info(f'init - image_size: {image_size}, patch_size: {patch_size}, num_image_token: {self.num_image_token}')
121
+ logger.info(f'ps_version: {self.ps_version}')
122
+ if vision_model is not None:
123
+ self.vision_model = vision_model
124
+ else:
125
+ self.vision_model = NaViLVisionModelAnyRes(config.vision_config)
126
+ if language_model is not None:
127
+ self.language_model = language_model
128
+ else:
129
+ llm_config = config.llm_config
130
+ if config.llm_config.architectures[0] == 'InternLM2VEForCausalLM':
131
+ self.language_model = InternLM2VEForCausalLM(llm_config)
132
+ else:
133
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
134
+
135
+ vit_hidden_size = config.vision_config.hidden_size
136
+ llm_hidden_size = config.llm_config.hidden_size
137
+
138
+ self.mlp1 = nn.Sequential(
139
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
140
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
141
+ nn.GELU(),
142
+ nn.Linear(llm_hidden_size, llm_hidden_size)
143
+ )
144
+
145
+ self.img_context_token_id = None
146
+ self.img_start_token_id = None
147
+ self.img_end_token_id = None
148
+ self.img_uncond_token_id = None
149
+ self.img_line_break_token_id = None
150
+ self.img_frame_break_token_id = None
151
+ self.pad_token_id = None
152
+ self.conv_template = get_conv_template(self.template)
153
+ if hasattr(config, 'system_message'):
154
+ self.system_message = config.system_message
155
+ else:
156
+ self.system_message = self.conv_template.system_message
157
+
158
+ min_pixels = config.min_dynamic_patch * (patch_size ** 2)
159
+ max_pixels = config.max_dynamic_patch * (patch_size ** 2)
160
+ down_sample_ratio = config.vision_config.downsample_ratio
161
+ self.image_processor = Qwen2VLImageProcessor(
162
+ do_resize=False,
163
+ do_pad=True,
164
+ do_rescale=True,
165
+ do_normalize=True,
166
+ image_mean=VAE_MEAN,
167
+ image_std=VAE_STD,
168
+ min_pixels=min_pixels,
169
+ max_pixels=max_pixels,
170
+ patch_size=patch_size,
171
+ temporal_patch_size=1,
172
+ merge_size=int(1.0 / down_sample_ratio),
173
+ )
174
+
175
+ ##### ---- Special token embeddings ---- #####
176
+ self.special_token_embedding = nn.Embedding(len(SPECIAL_TOKEN_LIST), config.llm_config.hidden_size)
177
+ self.special_token_list = copy.deepcopy(SPECIAL_TOKEN_LIST)
178
+ self.special_token_id_list = None # Remember to initialize this in the training script after tokenizer is loaded
179
+
180
+ self.group = None # Distributed group. Remember to set this in the training script
181
+
182
+ def init_special_token_ids(self, tokenizer):
183
+ special_token_id_list = []
184
+ for token in SPECIAL_TOKEN_LIST:
185
+ special_token_id_list.append(tokenizer.convert_tokens_to_ids(token))
186
+ self.special_token_id_list = special_token_id_list
187
+
188
+ self.img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
189
+ self.img_start_token_id = tokenizer.convert_tokens_to_ids(IMG_START_TOKEN)
190
+ self.img_end_token_id = tokenizer.convert_tokens_to_ids(IMG_END_TOKEN)
191
+ self.img_uncond_token_id = tokenizer.convert_tokens_to_ids(IMG_UNCOND_TOKEN)
192
+
193
+ def replace_img_special_tokens(self, input_embeds, input_ids):
194
+ assert self.special_token_id_list is not None, "model's special_token_id_list is not initialized"
195
+ for i, token_id in enumerate(self.special_token_id_list):
196
+ token_pos = input_ids == token_id
197
+ input_embeds[token_pos] = input_embeds[token_pos] * 0.0 + self.special_token_embedding.weight[i]
198
+
199
+ return input_embeds
200
+
201
+ def _init_weights(self, module):
202
+ if isinstance(module, nn.Linear):
203
+ module.weight.data.normal_(mean=0.0, std=0.02)
204
+ if module.bias is not None:
205
+ module.bias.data.zero_()
206
+ elif isinstance(module, nn.Embedding):
207
+ module.weight.data.normal_(mean=0.0, std=0.02)
208
+ elif isinstance(module, (nn.LayerNorm, Qwen2RMSNorm, InternLM2RMSNorm)):
209
+ if hasattr(module, 'bias') and module.bias is not None:
210
+ module.bias.data.zero_()
211
+ if module.weight is not None:
212
+ module.weight.data.fill_(1.0)
213
+
214
+ def forward(
215
+ self,
216
+ pixel_values: torch.FloatTensor,
217
+ input_ids: torch.LongTensor = None,
218
+ attention_mask: Optional[torch.Tensor] = None,
219
+ position_ids: Optional[torch.LongTensor] = None,
220
+ image_flags: Optional[torch.LongTensor] = None,
221
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
222
+ labels: Optional[torch.LongTensor] = None,
223
+ use_cache: Optional[bool] = None,
224
+ output_attentions: Optional[bool] = None,
225
+ output_hidden_states: Optional[bool] = None,
226
+ return_dict: Optional[bool] = None,
227
+ generation_modality: Optional[int] = 0,
228
+ statistics: Optional[torch.LongTensor] = None,
229
+ loss_weight: Optional[List] = None,
230
+ loss_reduction_all_gather: Optional[bool] = False,
231
+ padding_type: Optional[str] = None,
232
+ type_ids: Optional[torch.LongTensor] = None,
233
+ image_grid_thw: Optional[torch.LongTensor] = None,
234
+ video_grid_thw: Optional[torch.LongTensor] = None,
235
+ rope_deltas: Optional[torch.LongTensor] = None,
236
+ # cache_position: Optional[torch.LongTensor] = None,
237
+ second_per_grid_ts: Optional[torch.Tensor] = None,
238
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
239
+ ignore_flag = False
240
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
241
+
242
+ image_flags = image_flags.squeeze(-1)
243
+
244
+ input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
245
+ input_embeds = self.replace_img_special_tokens(input_embeds, input_ids)
246
+
247
+ if video_grid_thw is not None:
248
+ grid_thw = video_grid_thw
249
+ else:
250
+ grid_thw = image_grid_thw
251
+ vit_embeds, vit_embeds_ori = self.extract_feature(pixel_values, grid_thw)
252
+ vit_embeds = vit_embeds[image_flags == 1]
253
+ vit_embeds_ori = vit_embeds_ori[image_flags == 1]
254
+ vit_batch_size = image_flags.sum().item()
255
+
256
+ log_dict_keys = [
257
+ "text_loss", "text_acc1",
258
+ ]
259
+ log_dict = {k: torch.tensor(0.0, device=self.device) for k in log_dict_keys}
260
+ return_feature_scale = True
261
+
262
+ B, N, C = input_embeds.shape
263
+ selected = (input_ids == self.img_context_token_id)
264
+ try:
265
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
266
+ # ignore_flag = False
267
+ except Exception as e:
268
+ vit_embeds = vit_embeds.reshape(-1, C)
269
+ print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
270
+ f'vit_embeds.shape={vit_embeds.shape}', force=True)
271
+ n_token = selected.sum()
272
+ if n_token > vit_embeds.shape[0]:
273
+ selected = selected.view(-1, selected.shape[-1]) # 确保是 [B, N] 形状
274
+ batch_size = selected.shape[0]
275
+ max_visual_tokens = vit_embeds.shape[0] // batch_size # 每个批次可用的视觉特征数量
276
+ for i in range(batch_size):
277
+ # 获取当前批次中的图像标记位置
278
+ curr_selected = selected[i]
279
+ # 只保留前 max_visual_tokens 个标记位置
280
+ curr_indices = torch.where(curr_selected)[0][:max_visual_tokens]
281
+ # 更新选择标记
282
+ selected[i] = False
283
+ selected[i, curr_indices] = True
284
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
285
+ ignore_flag = True
286
+
287
+ # input_embeds = input_embeds.reshape(B, N, C)
288
+ visual_token_mask = (selected + (input_ids == self.img_start_token_id))
289
+
290
+ outputs = self.language_model(
291
+ inputs_embeds=input_embeds,
292
+ attention_mask=attention_mask,
293
+ position_ids=position_ids,
294
+ past_key_values=past_key_values,
295
+ use_cache=use_cache,
296
+ output_attentions=output_attentions,
297
+ output_hidden_states=output_hidden_states,
298
+ return_dict=return_dict,
299
+ visual_token_mask=visual_token_mask,
300
+ generation_modality=generation_modality,
301
+ padding_type=padding_type, # or self.train_padding_type,
302
+ skip_lm_head=False, # imgen
303
+ return_feature_scale=return_feature_scale,
304
+ )
305
+ logits = outputs.logits # B, N, C
306
+
307
+ if labels is not None and loss_weight is not None:
308
+ loss_weight = torch.tensor(loss_weight, dtype=torch.float32, device=labels.device)
309
+ # Shift so that tokens < n predict n
310
+ shift_logits = logits[..., :-1, :].contiguous()
311
+ shift_labels = labels[..., 1:].contiguous()
312
+ shift_weights = loss_weight[..., 1:].contiguous()
313
+ # Flatten the tokens
314
+ loss_fct = CrossEntropyLoss(reduction='none')
315
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
316
+ shift_labels = shift_labels.view(-1)
317
+ shift_weights = shift_weights.view(-1)
318
+ # Enable model parallelism
319
+ shift_labels = shift_labels.to(shift_logits.device)
320
+ shift_weights = shift_weights.to(shift_logits.device)
321
+ loss = loss_fct(shift_logits, shift_labels)
322
+
323
+ shift_weights_sum = shift_weights.sum()
324
+ if loss_reduction_all_gather:
325
+ dist.all_reduce(shift_weights_sum, op=dist.ReduceOp.AVG, group=self.group)
326
+
327
+ pred_ids = shift_logits.argmax(dim=-1)
328
+ pred_acc = 100.0 * ((shift_labels == pred_ids) * (shift_labels != -100)).sum() / (shift_labels != -100).sum()
329
+
330
+ log_dict.update({
331
+ "text_loss": ((loss * shift_weights).sum() / shift_weights_sum).detach(),
332
+ "text_acc1": pred_acc
333
+ })
334
+
335
+ loss = loss * shift_weights
336
+ loss = loss.sum() / shift_weights_sum
337
+
338
+ if ignore_flag:
339
+ loss = loss * 0.0
340
+
341
+ elif labels is not None:
342
+ # To reduce gpu memory, remove the image parts of the logits and labels
343
+ shift_selected = (input_ids == self.img_context_token_id)[..., :-1]
344
+ shift_logits = logits[..., :-1, :][~shift_selected]
345
+ shift_labels = labels[..., 1:][~shift_selected]
346
+
347
+ # Shift so that tokens < n predict n
348
+ # shift_logits = logits[..., :-1, :].contiguous()
349
+ # shift_labels = labels[..., 1:].contiguous()
350
+ # Flatten the tokens
351
+ loss_fct = CrossEntropyLoss()
352
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
353
+ shift_labels = shift_labels.view(-1)
354
+ # Enable model parallelism
355
+ shift_labels = shift_labels.to(shift_logits.device)
356
+ loss = loss_fct(shift_logits, shift_labels)
357
+
358
+ pred_ids = shift_logits.argmax(dim=-1)
359
+ pred_acc = 100.0 * ((shift_labels == pred_ids) * (shift_labels != -100)).sum() / (shift_labels != -100).sum()
360
+
361
+ log_dict.update({
362
+ "text_loss": loss.mean().detach(),
363
+ "text_acc1": pred_acc
364
+ })
365
+
366
+ if ignore_flag:
367
+ loss = loss * 0.0
368
+
369
+ if not return_dict:
370
+ output = (logits,) + outputs[1:]
371
+ return (loss,) + output if loss is not None else output
372
+
373
+ if return_feature_scale:
374
+ log_dict["feature_scale"] = {
375
+ "image": outputs.feature_scale[0],
376
+ "text": outputs.feature_scale[1],
377
+ }
378
+
379
+ return CausalLMOutputWithPast(
380
+ loss=loss,
381
+ logits=logits,
382
+ past_key_values=outputs.past_key_values,
383
+ hidden_states=outputs.hidden_states,
384
+ attentions=outputs.attentions,
385
+ log_dict=log_dict
386
+ )
387
+
388
+ def extract_feature(self, pixel_values, grid_thw=None):
389
+
390
+ if grid_thw is not None:
391
+ grid_thw = grid_thw.to(pixel_values.device)
392
+
393
+ vit_embeds = self.vision_model(
394
+ pixel_values=pixel_values,
395
+ output_hidden_states=False,
396
+ return_dict=True,
397
+ grid_thw=grid_thw
398
+ ).last_hidden_state
399
+
400
+ vit_embeds = pixel_shuffle_v2(vit_embeds, scale_factor=self.downsample_ratio, patch_aspect_ratio=self.patch_aspect_ratio)
401
+
402
+ vit_embeds_after_mlp = self.mlp1(vit_embeds)
403
+
404
+ return vit_embeds_after_mlp, vit_embeds
405
+
406
+ def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
407
+ num_patches_list=None, num_scales: list = [2],
408
+ IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
409
+ IMG_LINE_BREAK_TOKEN='<IMG_LINE_BREAK>', IMG_FRAME_BREAK_TOKEN='<IMG_FRAME_BREAK>',
410
+ anyres_image_size=True,
411
+ verbose=False,
412
+ ):
413
+
414
+ if history is None and pixel_values is not None and '<image>' not in question:
415
+ question = '<image>\n' * len(num_scales) + question
416
+
417
+ if num_patches_list is None:
418
+ assert not anyres_image_size, "Please provide `num_patches_list` when anyres_image_size is True."
419
+ num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
420
+ assert pixel_values is None or anyres_image_size or len(pixel_values) == sum(num_patches_list)
421
+
422
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
423
+ self.img_context_token_id = img_context_token_id
424
+ img_start_token_id = tokenizer.convert_tokens_to_ids(IMG_START_TOKEN)
425
+ self.img_start_token_id = img_start_token_id
426
+ self.img_line_break_token_id = tokenizer.convert_tokens_to_ids(IMG_LINE_BREAK_TOKEN)
427
+ self.img_frame_break_token_id = tokenizer.convert_tokens_to_ids(IMG_FRAME_BREAK_TOKEN)
428
+
429
+ template = get_conv_template(self.template)
430
+ template.system_message = self.system_message
431
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
432
+
433
+ history = [] if history is None else history
434
+ for (old_question, old_answer) in history:
435
+ template.append_message(template.roles[0], old_question)
436
+ template.append_message(template.roles[1], old_answer)
437
+ template.append_message(template.roles[0], question)
438
+ template.append_message(template.roles[1], None)
439
+ query = template.get_prompt()
440
+
441
+ if verbose and pixel_values is not None:
442
+ image_bs = pixel_values.shape[0]
443
+ print(f'dynamic ViT batch size: {image_bs}')
444
+
445
+ if anyres_image_size:
446
+ merge_size = int(1.0 / self.downsample_ratio)
447
+ for image_idx in range(len(num_scales)):
448
+ num_scales_prev = sum(num_scales[:image_idx])
449
+ num_scale = num_scales[image_idx]
450
+ _num_image_token_list = num_patches_list[num_scales_prev:num_scales_prev + num_scale]
451
+ image_tokens = f"{IMG_START_TOKEN}"
452
+ for i in range(len(_num_image_token_list)):
453
+ _image_tokens = ""
454
+ t, h, w = _num_image_token_list[i][0], _num_image_token_list[i][1] // merge_size, _num_image_token_list[i][2] // merge_size
455
+ for _ in range(t):
456
+ for _ in range(h):
457
+ _image_tokens += f"{IMG_CONTEXT_TOKEN * w}{IMG_LINE_BREAK_TOKEN}"
458
+ _image_tokens += f"{IMG_FRAME_BREAK_TOKEN}"
459
+ image_tokens += _image_tokens
460
+ image_tokens += f"{IMG_END_TOKEN}"
461
+ query = query.replace('<image>', image_tokens, 1)
462
+ else:
463
+ for num_patches in num_patches_list:
464
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
465
+ query = query.replace('<image>', image_tokens, 1)
466
+
467
+ model_inputs = tokenizer(query, return_tensors='pt')
468
+ input_ids = model_inputs['input_ids'].cuda()
469
+ attention_mask = model_inputs['attention_mask'].cuda()
470
+ generation_config['eos_token_id'] = eos_token_id
471
+ generation_output = self.generate(
472
+ pixel_values=pixel_values,
473
+ input_ids=input_ids,
474
+ attention_mask=attention_mask,
475
+ image_grid_thw=num_patches_list,
476
+ **generation_config
477
+ )
478
+ response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
479
+ response = response.split(template.sep)[0].strip()
480
+ # fix for InternLM2-base (textvqa)
481
+ response = response.replace("<|im_end|", "")
482
+ response = response.replace("<|im_end", "")
483
+ response = response.replace("<|im", "")
484
+ history.append((question, response))
485
+ if return_history:
486
+ return response, history
487
+ else:
488
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
489
+ query_to_print = query_to_print.replace(IMG_LINE_BREAK_TOKEN, '')
490
+ query_to_print = query_to_print.replace(IMG_FRAME_BREAK_TOKEN, '')
491
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
492
+ if verbose:
493
+ print(query_to_print, response)
494
+
495
+ return response
496
+
497
+ @torch.no_grad()
498
+ def generate(
499
+ self,
500
+ pixel_values: Optional[torch.FloatTensor] = None,
501
+ input_ids: Optional[torch.FloatTensor] = None,
502
+ attention_mask: Optional[torch.LongTensor] = None,
503
+ visual_features: Optional[torch.FloatTensor] = None,
504
+ generation_config: Optional[GenerationConfig] = None,
505
+ output_hidden_states: Optional[bool] = None,
506
+ return_dict: Optional[bool] = None,
507
+ image_grid_thw: Optional[torch.LongTensor] = None,
508
+ **generate_kwargs,
509
+ ) -> torch.LongTensor:
510
+
511
+ assert self.img_context_token_id is not None
512
+
513
+ grid_thw = image_grid_thw
514
+
515
+ if pixel_values is not None:
516
+ if visual_features is not None:
517
+ vit_embeds = visual_features
518
+ else:
519
+ vit_embeds, vit_embeds_ori = self.extract_feature(pixel_values, grid_thw)
520
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
521
+ input_embeds = self.replace_img_special_tokens(input_embeds, input_ids)
522
+ B, N, C = input_embeds.shape
523
+ # input_embeds = input_embeds.reshape(B * N, C)
524
+
525
+ # input_ids = input_ids.reshape(B * N)
526
+ selected = (input_ids == self.img_context_token_id) # B, N
527
+ assert selected.sum() != 0
528
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
529
+
530
+ # input_embeds = input_embeds.reshape(B, N, C)
531
+ else:
532
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
533
+ input_embeds = self.replace_img_special_tokens(input_embeds, input_ids)
534
+ selected = None
535
+
536
+ # input_embeds = self.replace_special_tokens(input_embeds, input_ids)
537
+ visual_token_mask = selected + (input_ids == self.img_start_token_id) if selected is not None else None
538
+
539
+ position_ids = None
540
+ generate_kwargs['position_ids'] = position_ids
541
+
542
+ outputs = self.language_model.generate(
543
+ inputs_embeds=input_embeds,
544
+ attention_mask=attention_mask,
545
+ generation_config=generation_config,
546
+ output_hidden_states=output_hidden_states,
547
+ # return_dict=return_dict,
548
+ use_cache=True,
549
+ visual_token_mask=visual_token_mask,
550
+ **generate_kwargs,
551
+ )
552
+
553
+ return outputs
554
+
555
+
556
+ def pixel_shuffle_v2(x, scale_factor=0.5, patch_aspect_ratio=1.0):
557
+ # input shape: N, L, C or N, H, W, C
558
+ # output shape: N, L * (scale_factor ** 2), C / (scale_factor ** 2)
559
+
560
+ if x.ndim == 3:
561
+ n, l, c = x.size()
562
+ h = w = int(l ** 0.5)
563
+ # N, L, C --> N, H, W, C
564
+ x = x.reshape(n, h, w, c)
565
+
566
+ n, h, w, c = x.size()
567
+
568
+ h_scale_factor = scale_factor * (patch_aspect_ratio ** 0.5)
569
+ w_scale_factor = scale_factor / (patch_aspect_ratio ** 0.5)
570
+
571
+ # N, H, W, C --> N, H, W * w_scale_factor, C // w_scale_factor
572
+ x = x.reshape(n, h, int(w * w_scale_factor), int(c / w_scale_factor))
573
+ # N, H, W * w_scale_factor, C // w_scale_factor --> N, W * w_scale_factor, H, C // w_scale_factor
574
+ x = x.permute(0, 2, 1, 3).contiguous()
575
+ # N, W * w_scale_factor, H, C // w_scale_factor --> N, W * w_scale_factor, H * h_scale_factor, C // (w_scale_factor * h_scale_factor)
576
+ x = x.reshape(n, int(w * w_scale_factor), int(h * h_scale_factor), int(c / (w_scale_factor * h_scale_factor)))
577
+ # N, W * w_scale_factor, H * h_scale_factor, C // (w_scale_factor * h_scale_factor) --> N, H * h_scale_factor, W * w_scale_factor, C // (w_scale_factor * h_scale_factor)
578
+ x = x.permute(0, 2, 1, 3).contiguous()
579
+ # N, H * h_scale_factor, W * w_scale_factor, C // (w_scale_factor * h_scale_factor) --> N, L * (scale_factor ** 2), C // (scale_factor ** 2)
580
+ x = x.reshape(n, int(h * h_scale_factor * w * w_scale_factor), int(c / (h_scale_factor * w_scale_factor)))
581
+
582
+ return x
modeling_navil_vit_anyres.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # NaViL
3
+ # Copyright (c) 2025 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ from typing import Optional, Tuple, Union
7
+ from functools import partial
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+
13
+ from einops import rearrange
14
+ from timm.models.layers import DropPath
15
+ from torch import nn
16
+ from transformers.activations import ACT2FN
17
+ from transformers.modeling_outputs import (BaseModelOutput,
18
+ BaseModelOutputWithPooling)
19
+ from transformers.modeling_utils import PreTrainedModel
20
+ from transformers.utils import logging
21
+
22
+ from .configuration_navil_vit import NaViLVisionConfig
23
+ from .modular_intern_vit import (
24
+ InternVisionFlashAttention2,
25
+ InternVisionSdpaAttention,
26
+ InternMLP,
27
+ NORM2FN,
28
+ InternVisionRotaryEmbedding,
29
+ )
30
+
31
+ try:
32
+ # from .flash_attention import FlashAttention
33
+ from flash_attn import flash_attn_varlen_func
34
+ from flash_attn.layers.rotary import apply_rotary_emb
35
+ has_flash_attn = True
36
+ except:
37
+ print('FlashAttention is not installed.')
38
+ has_flash_attn = False
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ class NaViLVisionEmbeddingsAnyRes(nn.Module):
44
+ def __init__(self, config: NaViLVisionConfig):
45
+ super().__init__()
46
+ self.config = config
47
+ self.embed_dim = config.hidden_size
48
+ self.image_size = config.image_size
49
+ self.patch_size = config.patch_size
50
+ self.merge_size = int(1.0 / config.downsample_ratio)
51
+
52
+ self.patch_embedding = nn.Conv2d(
53
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
54
+ )
55
+
56
+ self.num_patches = (self.image_size // self.patch_size) ** 2
57
+ self.num_positions = self.num_patches + 1
58
+
59
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
60
+ target_dtype = self.patch_embedding.weight.dtype
61
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
62
+ batch_size, _, height, width = patch_embeds.shape
63
+
64
+ return patch_embeds.flatten(1)
65
+
66
+
67
+ class NaViLVisionEncoderLayerAnyRes(nn.Module):
68
+ def __init__(self, config: NaViLVisionConfig, drop_path_rate: float):
69
+ super().__init__()
70
+ self.embed_dim = config.hidden_size
71
+ self.intermediate_size = config.intermediate_size
72
+ self.norm_type = config.norm_type
73
+
74
+ if has_flash_attn:
75
+ self.attn = InternVisionFlashAttention2(config)
76
+ else:
77
+ self.attn = InternVisionSdpaAttention(config)
78
+ self.mlp = InternMLP(config)
79
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
80
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
81
+
82
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
83
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
84
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
85
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
86
+
87
+ def forward(
88
+ self,
89
+ hidden_states: torch.Tensor,
90
+ cu_seqlens,
91
+ rotary_pos_emb
92
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
93
+ """
94
+ Args:
95
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
96
+ """
97
+ hidden_states = hidden_states + self.drop_path1(
98
+ self.attn(
99
+ self.norm1(hidden_states),
100
+ cu_seqlens=cu_seqlens,
101
+ rotary_pos_emb=rotary_pos_emb,
102
+ ) * self.ls1)
103
+
104
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
105
+
106
+ return hidden_states
107
+
108
+
109
+ class NaViLVisionEncoderAnyRes(nn.Module):
110
+ """
111
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
112
+ [`InternEncoderLayer`].
113
+
114
+ Args:
115
+ config (`InternConfig`):
116
+ The corresponding vision configuration for the `InternEncoder`.
117
+ """
118
+
119
+ def __init__(self, config: NaViLVisionConfig):
120
+ super().__init__()
121
+ self.config = config
122
+ # stochastic depth decay rule
123
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
124
+ self.layers = nn.ModuleList([
125
+ NaViLVisionEncoderLayerAnyRes(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
126
+ self.gradient_checkpointing = True
127
+
128
+ head_dim = config.hidden_size // config.num_attention_heads
129
+ self.rotary_pos_emb = InternVisionRotaryEmbedding(head_dim // 2)
130
+
131
+ self.merge_size = int(1.0 / config.downsample_ratio)
132
+ self.merge_unit = self.merge_size * self.merge_size
133
+ self.patch_size = config.patch_size
134
+ self.fullatt_block_indexes = config.fullatt_block_indexes
135
+ self.window_size = config.window_size
136
+
137
+ def rot_pos_emb(self, grid_thw):
138
+ pos_ids = []
139
+ for t, h, w in grid_thw:
140
+ hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
141
+ hpos_ids = hpos_ids.reshape(
142
+ h // self.merge_size,
143
+ self.merge_size,
144
+ w // self.merge_size,
145
+ self.merge_size,
146
+ )
147
+ hpos_ids = hpos_ids.permute(0, 2, 1, 3)
148
+ hpos_ids = hpos_ids.flatten()
149
+
150
+ wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
151
+ wpos_ids = wpos_ids.reshape(
152
+ h // self.merge_size,
153
+ self.merge_size,
154
+ w // self.merge_size,
155
+ self.merge_size,
156
+ )
157
+ wpos_ids = wpos_ids.permute(0, 2, 1, 3)
158
+ wpos_ids = wpos_ids.flatten()
159
+ pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
160
+ pos_ids = torch.cat(pos_ids, dim=0)
161
+ max_grid_size = grid_thw[:, 1:].max()
162
+ rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
163
+ rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
164
+ return rotary_pos_emb
165
+
166
+ def get_window_index(self, grid_thw):
167
+ window_index: list = []
168
+ cu_window_seqlens: list = [0]
169
+ window_index_id = 0
170
+ vit_merger_window_size = self.window_size // self.merge_size
171
+ assert vit_merger_window_size > 0
172
+
173
+ for grid_t, grid_h, grid_w in grid_thw:
174
+ llm_grid_h, llm_grid_w = (
175
+ grid_h // self.merge_size,
176
+ grid_w // self.merge_size,
177
+ )
178
+ index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w)
179
+ pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
180
+ pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
181
+ num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
182
+ num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
183
+ index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100)
184
+ index_padded = index_padded.reshape(
185
+ grid_t,
186
+ num_windows_h,
187
+ vit_merger_window_size,
188
+ num_windows_w,
189
+ vit_merger_window_size,
190
+ )
191
+ index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
192
+ grid_t,
193
+ num_windows_h * num_windows_w,
194
+ vit_merger_window_size,
195
+ vit_merger_window_size,
196
+ )
197
+ seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
198
+ index_padded = index_padded.reshape(-1)
199
+ index_new = index_padded[index_padded != -100]
200
+ window_index.append(index_new + window_index_id)
201
+ cu_seqlens_tmp = seqlens.cumsum(0) * self.merge_unit + cu_window_seqlens[-1]
202
+ cu_window_seqlens.extend(cu_seqlens_tmp.tolist())
203
+ window_index_id += (grid_t * llm_grid_h * llm_grid_w).item()
204
+ window_index = torch.cat(window_index, dim=0)
205
+
206
+ return window_index, cu_window_seqlens
207
+
208
+ def forward(
209
+ self,
210
+ inputs_embeds,
211
+ output_hidden_states: Optional[bool] = None,
212
+ return_dict: Optional[bool] = None,
213
+ grid_thw: Optional[torch.Tensor] = None,
214
+ ) -> Union[Tuple, BaseModelOutput]:
215
+ r"""
216
+ Args:
217
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
218
+ Embedded representation of the inputs. Should be float, not int tokens.
219
+ output_hidden_states (`bool`, *optional*):
220
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
221
+ for more detail.
222
+ return_dict (`bool`, *optional*):
223
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
224
+ """
225
+ output_hidden_states = (
226
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
227
+ )
228
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
229
+
230
+ encoder_states = () if output_hidden_states else None
231
+ hidden_states = inputs_embeds
232
+
233
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
234
+ window_index, cu_window_seqlens = self.get_window_index(grid_thw)
235
+ cu_window_seqlens = torch.tensor(
236
+ cu_window_seqlens,
237
+ device=hidden_states.device,
238
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
239
+ )
240
+ cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
241
+
242
+ seq_len, _ = hidden_states.size()
243
+ hidden_states = hidden_states.reshape(seq_len // self.merge_unit, self.merge_unit, -1)
244
+ hidden_states = hidden_states[window_index, :, :]
245
+ hidden_states = hidden_states.reshape(seq_len, -1)
246
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.merge_unit, self.merge_unit, -1)
247
+ rotary_pos_emb = rotary_pos_emb[window_index, :, :]
248
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
249
+
250
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
251
+ dim=0,
252
+ # Select dtype based on the following factors:
253
+ # - FA2 requires that cu_seqlens_q must have dtype int32
254
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
255
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
256
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
257
+ )
258
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
259
+
260
+
261
+ for idx, encoder_layer in enumerate(self.layers):
262
+ if (self.fullatt_block_indexes is None) or (idx in self.fullatt_block_indexes):
263
+ cu_seqlens_now = cu_seqlens
264
+ else:
265
+ cu_seqlens_now = cu_window_seqlens
266
+ if output_hidden_states:
267
+ encoder_states = encoder_states + (hidden_states,)
268
+ if self.gradient_checkpointing and self.training:
269
+ layer_outputs = torch.utils.checkpoint.checkpoint(
270
+ partial(encoder_layer, cu_seqlens=cu_seqlens_now, rotary_pos_emb=rotary_pos_emb),
271
+ hidden_states)
272
+ else:
273
+ layer_outputs = encoder_layer(
274
+ hidden_states,
275
+ cu_seqlens=cu_seqlens_now,
276
+ rotary_pos_emb=rotary_pos_emb,
277
+ )
278
+ hidden_states = layer_outputs
279
+
280
+ if output_hidden_states:
281
+ encoder_states = encoder_states + (hidden_states,)
282
+
283
+ if not return_dict:
284
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
285
+ return BaseModelOutput(
286
+ last_hidden_state=hidden_states, hidden_states=encoder_states
287
+ )
288
+
289
+
290
+ class NaViLVisionModelAnyRes(PreTrainedModel):
291
+ main_input_name = 'pixel_values'
292
+ config_class = NaViLVisionConfig
293
+ _no_split_modules = ['NaViLVisionEncoderLayerAnyRes']
294
+
295
+ def __init__(self, config: NaViLVisionConfig):
296
+ super().__init__(config)
297
+ self.config = config
298
+
299
+ self.merge_size = int(1.0 / config.downsample_ratio)
300
+ self.embeddings = NaViLVisionEmbeddingsAnyRes(config)
301
+ self.encoder = NaViLVisionEncoderAnyRes(config)
302
+
303
+ def get_input_embeddings(self):
304
+ return self.embeddings
305
+
306
+ def forward(
307
+ self,
308
+ pixel_values: Optional[torch.FloatTensor] = None,
309
+ output_hidden_states: Optional[bool] = None,
310
+ return_dict: Optional[bool] = None,
311
+ pixel_embeds: Optional[torch.FloatTensor] = None,
312
+ grid_thw: Optional[torch.Tensor] = None,
313
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
314
+ output_hidden_states = (
315
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
316
+ )
317
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
318
+
319
+ if pixel_values is None and pixel_embeds is None:
320
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
321
+
322
+ if pixel_embeds is not None:
323
+ hidden_states = pixel_embeds
324
+ else:
325
+ if len(pixel_values.shape) == 4:
326
+ hidden_states = self.embeddings(pixel_values)
327
+ else:
328
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
329
+
330
+ encoder_outputs = self.encoder(
331
+ inputs_embeds=hidden_states,
332
+ output_hidden_states=output_hidden_states,
333
+ return_dict=return_dict,
334
+ grid_thw=grid_thw
335
+ )
336
+ last_hidden_state = encoder_outputs.last_hidden_state
337
+ # pooled_output = last_hidden_state[:, 0, :]
338
+
339
+ last_hidden_state = last_hidden_state.unsqueeze(1).reshape(-1, self.merge_size, self.merge_size, last_hidden_state.shape[-1])
340
+
341
+ if not return_dict:
342
+ return (last_hidden_state, ) + encoder_outputs[1:]
343
+
344
+ return BaseModelOutputWithPooling(
345
+ last_hidden_state=last_hidden_state,
346
+ pooler_output=None,
347
+ hidden_states=encoder_outputs.hidden_states,
348
+ attentions=encoder_outputs.attentions,
349
+ )
modular_intern_vit.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+
10
+ from einops import rearrange
11
+ from torch import nn
12
+ from transformers.activations import ACT2FN
13
+ from transformers.utils import logging
14
+
15
+ from .configuration_navil_vit import NaViLVisionConfig
16
+
17
+ try:
18
+ # from .flash_attention import FlashAttention
19
+ from flash_attn import flash_attn_varlen_func
20
+ from flash_attn.layers.rotary import apply_rotary_emb
21
+ has_flash_attn = True
22
+ except:
23
+ print('FlashAttention is not installed.')
24
+ has_flash_attn = False
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class InternRMSNorm(nn.Module):
30
+ def __init__(self, hidden_size, eps=1e-6):
31
+ super().__init__()
32
+ self.weight = nn.Parameter(torch.ones(hidden_size))
33
+ self.variance_epsilon = eps
34
+
35
+ def forward(self, hidden_states):
36
+ input_dtype = hidden_states.dtype
37
+ hidden_states = hidden_states.to(torch.float32)
38
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
39
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
40
+ return self.weight * hidden_states.to(input_dtype)
41
+
42
+
43
+ try:
44
+ from apex.normalization import FusedRMSNorm
45
+
46
+ InternRMSNorm = FusedRMSNorm # noqa
47
+
48
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
49
+ except ImportError:
50
+ # using the normal InternRMSNorm
51
+ pass
52
+ except Exception:
53
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
54
+ pass
55
+
56
+
57
+ NORM2FN = {
58
+ 'rms_norm': InternRMSNorm,
59
+ 'layer_norm': nn.LayerNorm,
60
+ }
61
+
62
+
63
+ class InternVisionRotaryEmbedding(nn.Module):
64
+ def __init__(self, dim: int, theta: float = 10000.0) -> None:
65
+ super().__init__()
66
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
67
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
68
+
69
+ def forward(self, seqlen: int) -> torch.Tensor:
70
+ seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
71
+ freqs = torch.outer(seq, self.inv_freq)
72
+ return freqs
73
+
74
+
75
+ class InternAttention(nn.Module):
76
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
77
+
78
+ def __init__(self, config: NaViLVisionConfig):
79
+ super().__init__()
80
+ self.config = config
81
+ self.embed_dim = config.hidden_size
82
+ self.num_heads = config.num_attention_heads
83
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
84
+ if config.use_flash_attn and not has_flash_attn:
85
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
86
+ self.head_dim = self.embed_dim // self.num_heads
87
+ if self.head_dim * self.num_heads != self.embed_dim:
88
+ raise ValueError(
89
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
90
+ f' {self.num_heads}).'
91
+ )
92
+
93
+ self.scale = self.head_dim ** -0.5
94
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
95
+ self.attn_drop = nn.Dropout(config.attention_dropout)
96
+ self.proj_drop = nn.Dropout(config.dropout)
97
+
98
+ self.qk_normalization = config.qk_normalization
99
+
100
+ if self.qk_normalization:
101
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
102
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
103
+
104
+ if self.use_flash_attn:
105
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
106
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
107
+
108
+ def _naive_attn(self, x):
109
+ B, N, C = x.shape
110
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
111
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
112
+
113
+ if self.qk_normalization:
114
+ B_, H_, N_, D_ = q.shape
115
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
116
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
117
+
118
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
119
+ attn = attn.softmax(dim=-1)
120
+ attn = self.attn_drop(attn)
121
+
122
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
123
+ x = self.proj(x)
124
+ x = self.proj_drop(x)
125
+ return x
126
+
127
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
128
+ qkv = self.qkv(x)
129
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
130
+
131
+ if self.qk_normalization:
132
+ q, k, v = qkv.unbind(2)
133
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
134
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
135
+ qkv = torch.stack([q, k, v], dim=2)
136
+
137
+ context, _ = self.inner_attn(
138
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
139
+ )
140
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
141
+ outs = self.proj_drop(outs)
142
+ return outs
143
+
144
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
145
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
146
+ return x
147
+
148
+
149
+ def rotate_half(x):
150
+ """Rotates half the hidden dims of the input."""
151
+ x1 = x[..., : x.shape[-1] // 2]
152
+ x2 = x[..., x.shape[-1] // 2 :]
153
+ return torch.cat((-x2, x1), dim=-1)
154
+
155
+ def apply_rotary_pos_emb_vision(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
156
+ orig_dtype = tensor.dtype
157
+ tensor = tensor.float()
158
+ cos = freqs.cos()
159
+ sin = freqs.sin()
160
+ cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
161
+ sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
162
+ output = (tensor * cos) + (rotate_half(tensor) * sin)
163
+ output = output.to(orig_dtype)
164
+ return output
165
+
166
+
167
+ class InternVisionSdpaAttention(nn.Module):
168
+ def __init__(self, config: NaViLVisionConfig) -> None:
169
+ super().__init__()
170
+
171
+ self.config = config
172
+
173
+ dim = config.hidden_size
174
+ num_heads = config.num_attention_heads
175
+ self.num_heads = num_heads
176
+ self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias)
177
+ self.proj = nn.Linear(dim, dim)
178
+
179
+ self.qk_normalization = config.qk_normalization
180
+
181
+ if self.qk_normalization:
182
+ self.q_norm = InternRMSNorm(dim, eps=config.layer_norm_eps)
183
+ self.k_norm = InternRMSNorm(dim, eps=config.layer_norm_eps)
184
+
185
+ self.proj_drop = nn.Dropout(config.dropout)
186
+
187
+ def forward(
188
+ self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: torch.Tensor = None
189
+ ) -> torch.Tensor:
190
+ seq_length = hidden_states.shape[0]
191
+ q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
192
+
193
+ if self.qk_normalization:
194
+ q = self.q_norm(q.flatten(1).view(q.shape))
195
+ k = self.k_norm(k.flatten(1).view(k.shape))
196
+
197
+ q = apply_rotary_pos_emb_vision(q.unsqueeze(0), rotary_pos_emb).squeeze(0)
198
+ k = apply_rotary_pos_emb_vision(k.unsqueeze(0), rotary_pos_emb).squeeze(0)
199
+
200
+ attention_mask = torch.zeros([1, seq_length, seq_length], device=q.device, dtype=torch.bool)
201
+ for i in range(1, len(cu_seqlens)):
202
+ attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = True
203
+ q = q.transpose(0, 1)
204
+ k = k.transpose(0, 1)
205
+ v = v.transpose(0, 1)
206
+ attn_output = F.scaled_dot_product_attention(q, k, v, attention_mask, dropout_p=0.0)
207
+ attn_output = attn_output.transpose(0, 1)
208
+ attn_output = attn_output.reshape(seq_length, -1)
209
+ attn_output = self.proj(attn_output)
210
+ attn_output = self.proj_drop(attn_output)
211
+ return attn_output
212
+
213
+
214
+ def apply_rotary_pos_emb_flashatt(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
215
+ tensor_ = tensor.float()
216
+ cos = freqs.cos().float()
217
+ sin = freqs.sin().float()
218
+ output = apply_rotary_emb(tensor_, cos, sin).type_as(tensor)
219
+ return output
220
+
221
+
222
+ class InternVisionFlashAttention2(nn.Module):
223
+ def __init__(self, config: NaViLVisionConfig) -> None:
224
+ super().__init__()
225
+ self.config = config
226
+
227
+ dim = config.hidden_size
228
+ num_heads = config.num_attention_heads
229
+
230
+ self.num_heads = num_heads
231
+ self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias)
232
+ self.proj = nn.Linear(dim, dim)
233
+
234
+ self.qk_normalization = config.qk_normalization
235
+
236
+ if self.qk_normalization:
237
+ self.q_norm = InternRMSNorm(dim, eps=config.layer_norm_eps)
238
+ self.k_norm = InternRMSNorm(dim, eps=config.layer_norm_eps)
239
+
240
+ self.proj_drop = nn.Dropout(config.dropout)
241
+
242
+ def forward(
243
+ self,
244
+ hidden_states: torch.Tensor,
245
+ cu_seqlens: torch.Tensor,
246
+ rotary_pos_emb: torch.Tensor = None,
247
+ ) -> torch.Tensor:
248
+ seq_length = hidden_states.shape[0]
249
+ q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
250
+
251
+ if self.qk_normalization:
252
+ q = self.q_norm(q.flatten(1).view(q.shape))
253
+ k = self.k_norm(k.flatten(1).view(k.shape))
254
+
255
+ q = apply_rotary_pos_emb_flashatt(q.unsqueeze(0), rotary_pos_emb).squeeze(0)
256
+ k = apply_rotary_pos_emb_flashatt(k.unsqueeze(0), rotary_pos_emb).squeeze(0)
257
+
258
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
259
+ attn_output = flash_attn_varlen_func(q, k, v, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen).reshape(
260
+ seq_length, -1
261
+ )
262
+ attn_output = self.proj(attn_output)
263
+ attn_output = self.proj_drop(attn_output)
264
+ return attn_output
265
+
266
+
267
+ class InternMLP(nn.Module):
268
+ def __init__(self, config: NaViLVisionConfig):
269
+ super().__init__()
270
+ self.config = config
271
+ self.act = ACT2FN[config.hidden_act]
272
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
273
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
274
+
275
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
276
+ hidden_states = self.fc1(hidden_states)
277
+ hidden_states = self.act(hidden_states)
278
+ hidden_states = self.fc2(hidden_states)
279
+ return hidden_states
special_tokens_map.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "</box>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<box>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<IMG_CONTEXT>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "</img>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "<img>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "</quad>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "<quad>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "</ref>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "<ref>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ {
67
+ "content": "<img_uncond>",
68
+ "lstrip": false,
69
+ "normalized": false,
70
+ "rstrip": false,
71
+ "single_word": false
72
+ },
73
+ {
74
+ "content": "<IMG_LINE_BREAK>",
75
+ "lstrip": false,
76
+ "normalized": false,
77
+ "rstrip": false,
78
+ "single_word": false
79
+ },
80
+ {
81
+ "content": "<IMG_FRAME_BREAK>",
82
+ "lstrip": false,
83
+ "normalized": false,
84
+ "rstrip": false,
85
+ "single_word": false
86
+ }
87
+ ],
88
+ "bos_token": {
89
+ "content": "<s>",
90
+ "lstrip": false,
91
+ "normalized": false,
92
+ "rstrip": false,
93
+ "single_word": false
94
+ },
95
+ "eos_token": {
96
+ "content": "</s>",
97
+ "lstrip": false,
98
+ "normalized": false,
99
+ "rstrip": false,
100
+ "single_word": false
101
+ },
102
+ "pad_token": {
103
+ "content": "</s>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false
108
+ },
109
+ "unk_token": {
110
+ "content": "<unk>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false
115
+ }
116
+ }
tokenization_internlm2.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ """Tokenization classes for InternLM."""
19
+ import os
20
+ from shutil import copyfile
21
+ from typing import Any, Dict, List, Optional, Tuple
22
+
23
+ import sentencepiece as spm
24
+ from transformers.tokenization_utils import PreTrainedTokenizer
25
+ from transformers.utils import logging
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {}
32
+
33
+
34
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
35
+ class InternLM2Tokenizer(PreTrainedTokenizer):
36
+ """
37
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ Path to the vocabulary file.
42
+ """
43
+
44
+ vocab_files_names = VOCAB_FILES_NAMES
45
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
46
+ model_input_names = ["input_ids", "attention_mask"]
47
+ _auto_class = "AutoTokenizer"
48
+
49
+ def __init__(
50
+ self,
51
+ vocab_file,
52
+ unk_token="<unk>",
53
+ bos_token="<s>",
54
+ eos_token="</s>",
55
+ pad_token="</s>",
56
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
57
+ add_bos_token=True,
58
+ add_eos_token=False,
59
+ decode_with_prefix_space=False,
60
+ clean_up_tokenization_spaces=False,
61
+ **kwargs,
62
+ ):
63
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
64
+ self.vocab_file = vocab_file
65
+ self.add_bos_token = add_bos_token
66
+ self.add_eos_token = add_eos_token
67
+ self.decode_with_prefix_space = decode_with_prefix_space
68
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
69
+ self.sp_model.Load(vocab_file)
70
+ self._no_prefix_space_tokens = None
71
+ super().__init__(
72
+ bos_token=bos_token,
73
+ eos_token=eos_token,
74
+ unk_token=unk_token,
75
+ pad_token=pad_token,
76
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
77
+ **kwargs,
78
+ )
79
+
80
+ @property
81
+ def no_prefix_space_tokens(self):
82
+ if self._no_prefix_space_tokens is None:
83
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
84
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
85
+ return self._no_prefix_space_tokens
86
+
87
+ @property
88
+ def vocab_size(self):
89
+ """Returns vocab size"""
90
+ return self.sp_model.get_piece_size()
91
+
92
+ @property
93
+ def bos_token_id(self) -> Optional[int]:
94
+ return self.sp_model.bos_id()
95
+
96
+ @property
97
+ def eos_token_id(self) -> Optional[int]:
98
+ return self.sp_model.eos_id()
99
+
100
+ def get_vocab(self):
101
+ """Returns vocab as a dict"""
102
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
103
+ vocab.update(self.added_tokens_encoder)
104
+ return vocab
105
+
106
+ def _tokenize(self, text):
107
+ """Returns a tokenized string."""
108
+ return self.sp_model.encode(text, out_type=str)
109
+
110
+ def _convert_token_to_id(self, token):
111
+ """Converts a token (str) in an id using the vocab."""
112
+ return self.sp_model.piece_to_id(token)
113
+
114
+ def _convert_id_to_token(self, index):
115
+ """Converts an index (integer) in a token (str) using the vocab."""
116
+ token = self.sp_model.IdToPiece(index)
117
+ return token
118
+
119
+ def _maybe_add_prefix_space(self, tokens, decoded):
120
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
121
+ return " " + decoded
122
+ else:
123
+ return decoded
124
+
125
+ def convert_tokens_to_string(self, tokens):
126
+ """Converts a sequence of tokens (string) in a single string."""
127
+ current_sub_tokens = []
128
+ out_string = ""
129
+ prev_is_special = False
130
+ for token in tokens:
131
+ # make sure that special tokens are not decoded using sentencepiece model
132
+ if token in self.all_special_tokens:
133
+ if not prev_is_special:
134
+ out_string += " "
135
+ out_string += self.sp_model.decode(current_sub_tokens) + token
136
+ prev_is_special = True
137
+ current_sub_tokens = []
138
+ else:
139
+ current_sub_tokens.append(token)
140
+ prev_is_special = False
141
+ out_string += self.sp_model.decode(current_sub_tokens)
142
+ out_string = self.clean_up_tokenization(out_string)
143
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
144
+ return out_string[1:]
145
+
146
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
147
+ """
148
+ Save the vocabulary and special tokens file to a directory.
149
+
150
+ Args:
151
+ save_directory (`str`):
152
+ The directory in which to save the vocabulary.
153
+
154
+ Returns:
155
+ `Tuple(str)`: Paths to the files saved.
156
+ """
157
+ if not os.path.isdir(save_directory):
158
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
159
+ return
160
+ out_vocab_file = os.path.join(
161
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
162
+ )
163
+
164
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
165
+ copyfile(self.vocab_file, out_vocab_file)
166
+ elif not os.path.isfile(self.vocab_file):
167
+ with open(out_vocab_file, "wb") as fi:
168
+ content_spiece_model = self.sp_model.serialized_model_proto()
169
+ fi.write(content_spiece_model)
170
+
171
+ return (out_vocab_file,)
172
+
173
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
174
+ if self.add_bos_token:
175
+ bos_token_ids = [self.bos_token_id]
176
+ else:
177
+ bos_token_ids = []
178
+
179
+ output = bos_token_ids + token_ids_0
180
+
181
+ if token_ids_1 is not None:
182
+ output = output + token_ids_1
183
+
184
+ if self.add_eos_token:
185
+ output = output + [self.eos_token_id]
186
+
187
+ return output
188
+
189
+ def get_special_tokens_mask(
190
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
191
+ ) -> List[int]:
192
+ """
193
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
194
+ special tokens using the tokenizer `prepare_for_model` method.
195
+
196
+ Args:
197
+ token_ids_0 (`List[int]`):
198
+ List of IDs.
199
+ token_ids_1 (`List[int]`, *optional*):
200
+ Optional second list of IDs for sequence pairs.
201
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
202
+ Whether or not the token list is already formatted with special tokens for the model.
203
+
204
+ Returns:
205
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
206
+ """
207
+ if already_has_special_tokens:
208
+ return super().get_special_tokens_mask(
209
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
210
+ )
211
+
212
+ if token_ids_1 is None:
213
+ return [1] + ([0] * len(token_ids_0)) + [1]
214
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
215
+
216
+ def create_token_type_ids_from_sequences(
217
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
218
+ ) -> List[int]:
219
+ """
220
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
221
+ use of token type ids, therefore a list of zeros is returned.
222
+
223
+ Args:
224
+ token_ids_0 (`List[int]`):
225
+ List of IDs.
226
+ token_ids_1 (`List[int]`, *optional*):
227
+ Optional second list of IDs for sequence pairs.
228
+
229
+ Returns:
230
+ `List[int]`: List of zeros.
231
+ """
232
+ eos = [self.eos_token_id]
233
+
234
+ if token_ids_1 is None:
235
+ return len(token_ids_0 + eos) * [0]
236
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92544": {
28
+ "content": "</box>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92545": {
36
+ "content": "<box>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92546": {
44
+ "content": "<IMG_CONTEXT>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92547": {
52
+ "content": "</img>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92548": {
60
+ "content": "<img>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92549": {
68
+ "content": "</quad>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "92550": {
76
+ "content": "<quad>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "92551": {
84
+ "content": "</ref>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "92552": {
92
+ "content": "<ref>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "92553": {
100
+ "content": "<img_uncond>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "92554": {
108
+ "content": "<IMG_LINE_BREAK>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "92555": {
116
+ "content": "<IMG_FRAME_BREAK>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ }
123
+ },
124
+ "additional_special_tokens": [
125
+ "</box>",
126
+ "<box>",
127
+ "<IMG_CONTEXT>",
128
+ "</img>",
129
+ "<img>",
130
+ "</quad>",
131
+ "<quad>",
132
+ "</ref>",
133
+ "<ref>",
134
+ "<img_uncond>",
135
+ "<IMG_LINE_BREAK>",
136
+ "<IMG_FRAME_BREAK>"
137
+ ],
138
+ "auto_map": {
139
+ "AutoTokenizer": [
140
+ "tokenization_internlm2.InternLM2Tokenizer",
141
+ null
142
+ ]
143
+ },
144
+ "bos_token": "<s>",
145
+ "clean_up_tokenization_spaces": false,
146
+ "eos_token": "</s>",
147
+ "extra_special_tokens": {},
148
+ "model_max_length": 16384,
149
+ "pad_token": "</s>",
150
+ "tokenizer_class": "InternLM2Tokenizer",
151
+ "unk_token": "<unk>"
152
+ }