|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | """ Phi-4-MM model configuration""" | 
					
						
						|  |  | 
					
						
						|  | from transformers.configuration_utils import PretrainedConfig | 
					
						
						|  | from transformers.utils import logging | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | logger = logging.get_logger(__name__) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class Phi4MMConfig(PretrainedConfig): | 
					
						
						|  | r""" | 
					
						
						|  | This is the configuration class to store the configuration of a [`Phi4MMModel`]. It is used to instantiate a Phi-4-MM | 
					
						
						|  | model according to the specified arguments, defining the model architecture. | 
					
						
						|  |  | 
					
						
						|  | Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the | 
					
						
						|  | documentation from [`PretrainedConfig`] for more information. | 
					
						
						|  |  | 
					
						
						|  | Args: | 
					
						
						|  | vocab_size (`int`, *optional*, defaults to 200064): | 
					
						
						|  | Vocabulary size of the Phi-4-MM model. Defines the number of different tokens that can be represented by the | 
					
						
						|  | `inputs_ids` passed when calling [`Phi4MMModel`]. | 
					
						
						|  | hidden_size (`int`, *optional*, defaults to 3072): | 
					
						
						|  | Dimension of the hidden representations. | 
					
						
						|  | intermediate_size (`int`, *optional*, defaults to 8192): | 
					
						
						|  | Dimension of the MLP representations. | 
					
						
						|  | num_hidden_layers (`int`, *optional*, defaults to 32): | 
					
						
						|  | Number of hidden layers in the Transformer decoder. | 
					
						
						|  | num_attention_heads (`int`, *optional*, defaults to 32): | 
					
						
						|  | Number of attention heads for each attention layer in the Transformer decoder. | 
					
						
						|  | num_key_value_heads (`int`, *optional*): | 
					
						
						|  | This is the number of key_value heads that should be used to implement Grouped Query Attention. If | 
					
						
						|  | `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if | 
					
						
						|  | `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When | 
					
						
						|  | converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed | 
					
						
						|  | by meanpooling all the original heads within that group. For more details checkout [this | 
					
						
						|  | paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to | 
					
						
						|  | `num_attention_heads`. | 
					
						
						|  | resid_pdrop (`float`, *optional*, defaults to 0.0): | 
					
						
						|  | Dropout probability for mlp outputs. | 
					
						
						|  | embd_pdrop (`int`, *optional*, defaults to 0.0): | 
					
						
						|  | The dropout ratio for the embeddings. | 
					
						
						|  | attention_dropout (`float`, *optional*, defaults to 0.0): | 
					
						
						|  | The dropout ratio after computing the attention scores. | 
					
						
						|  | hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): | 
					
						
						|  | The non-linear activation function (function or string) in the decoder. | 
					
						
						|  | max_position_embeddings (`int`, *optional*, defaults to 4096): | 
					
						
						|  | The maximum sequence length that this model might ever be used with. | 
					
						
						|  | original_max_position_embeddings (`int`, *optional*, defaults to 4096): | 
					
						
						|  | The maximum sequence length that this model was trained with. This is used to determine the size of the | 
					
						
						|  | original RoPE embeddings when using long scaling. | 
					
						
						|  | initializer_range (`float`, *optional*, defaults to 0.02): | 
					
						
						|  | The standard deviation of the truncated_normal_initializer for initializing all weight matrices. | 
					
						
						|  | rms_norm_eps (`float`, *optional*, defaults to 1e-05): | 
					
						
						|  | The epsilon value used for the RMSNorm. | 
					
						
						|  | use_cache (`bool`, *optional*, defaults to `True`): | 
					
						
						|  | Whether or not the model should return the last key/values attentions (not used by all models). Only | 
					
						
						|  | relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. | 
					
						
						|  | tie_word_embeddings (`bool`, *optional*, defaults to `False`): | 
					
						
						|  | Whether to tie weight embeddings | 
					
						
						|  | rope_theta (`float`, *optional*, defaults to 10000.0): | 
					
						
						|  | The base period of the RoPE embeddings. | 
					
						
						|  | rope_scaling (`dict`, *optional*): | 
					
						
						|  | The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must | 
					
						
						|  | contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and | 
					
						
						|  | the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size | 
					
						
						|  | divided by the number of attention heads divided by 2. | 
					
						
						|  | partial_rotary_factor (`float`, *optional*, defaults to 0.5): | 
					
						
						|  | Percentage of the query and keys which will have rotary embedding. | 
					
						
						|  | bos_token_id (`int`, *optional*, defaults to 199999): | 
					
						
						|  | The id of the "beginning-of-sequence" token. | 
					
						
						|  | eos_token_id (`int`, *optional*, defaults to 199999): | 
					
						
						|  | The id of the "end-of-sequence" token. | 
					
						
						|  | pad_token_id (`int`, *optional*, defaults to 199999): | 
					
						
						|  | The id of the padding token. | 
					
						
						|  | sliding_window (`int`, *optional*): | 
					
						
						|  | Sliding window attention window size. If `None`, no sliding window is applied. | 
					
						
						|  |  | 
					
						
						|  | Example: | 
					
						
						|  |  | 
					
						
						|  | ```python | 
					
						
						|  | >>> from transformers import Phi4MMModel, Phi4MMConfig | 
					
						
						|  |  | 
					
						
						|  | >>> # Initializing a Phi-4-MM style configuration | 
					
						
						|  | >>> configuration = Phi4MMConfig.from_pretrained("TBA") | 
					
						
						|  |  | 
					
						
						|  | >>> # Initializing a model from the configuration | 
					
						
						|  | >>> model = Phi4MMModel(configuration) | 
					
						
						|  |  | 
					
						
						|  | >>> # Accessing the model configuration | 
					
						
						|  | >>> configuration = model.config | 
					
						
						|  | ```""" | 
					
						
						|  |  | 
					
						
						|  | model_type = "phi4mm" | 
					
						
						|  | keys_to_ignore_at_inference = ["past_key_values"] | 
					
						
						|  |  | 
					
						
						|  | def __init__( | 
					
						
						|  | self, | 
					
						
						|  | vocab_size=200064, | 
					
						
						|  | hidden_size=3072, | 
					
						
						|  | intermediate_size=8192, | 
					
						
						|  | num_hidden_layers=32, | 
					
						
						|  | num_attention_heads=32, | 
					
						
						|  | num_key_value_heads=None, | 
					
						
						|  | resid_pdrop=0.0, | 
					
						
						|  | embd_pdrop=0.0, | 
					
						
						|  | attention_dropout=0.0, | 
					
						
						|  | hidden_act="silu", | 
					
						
						|  | max_position_embeddings=4096, | 
					
						
						|  | original_max_position_embeddings=4096, | 
					
						
						|  | initializer_range=0.02, | 
					
						
						|  | rms_norm_eps=1e-5, | 
					
						
						|  | use_cache=True, | 
					
						
						|  | tie_word_embeddings=False, | 
					
						
						|  | rope_theta=10000.0, | 
					
						
						|  | rope_scaling=None, | 
					
						
						|  | partial_rotary_factor=1, | 
					
						
						|  | bos_token_id=199999, | 
					
						
						|  | eos_token_id=199999, | 
					
						
						|  | pad_token_id=199999, | 
					
						
						|  | sliding_window=None, | 
					
						
						|  | embd_layer: str = "default", | 
					
						
						|  | img_processor=None, | 
					
						
						|  | audio_processor=None, | 
					
						
						|  | vision_lora=None, | 
					
						
						|  | speech_lora=None, | 
					
						
						|  | **kwargs, | 
					
						
						|  | ): | 
					
						
						|  | self.embd_layer = embd_layer | 
					
						
						|  | self.img_processor = img_processor | 
					
						
						|  | self.audio_processor = audio_processor | 
					
						
						|  | self.vision_lora = vision_lora | 
					
						
						|  | self.speech_lora = speech_lora | 
					
						
						|  |  | 
					
						
						|  | self.vocab_size = vocab_size | 
					
						
						|  | self.hidden_size = hidden_size | 
					
						
						|  | self.intermediate_size = intermediate_size | 
					
						
						|  | self.num_hidden_layers = num_hidden_layers | 
					
						
						|  | self.num_attention_heads = num_attention_heads | 
					
						
						|  |  | 
					
						
						|  | if num_key_value_heads is None: | 
					
						
						|  | num_key_value_heads = num_attention_heads | 
					
						
						|  |  | 
					
						
						|  | self.num_key_value_heads = num_key_value_heads | 
					
						
						|  | self.resid_pdrop = resid_pdrop | 
					
						
						|  | self.embd_pdrop = embd_pdrop | 
					
						
						|  | self.attention_dropout = attention_dropout | 
					
						
						|  | self.hidden_act = hidden_act | 
					
						
						|  | self.max_position_embeddings = max_position_embeddings | 
					
						
						|  | self.original_max_position_embeddings = original_max_position_embeddings | 
					
						
						|  | self.initializer_range = initializer_range | 
					
						
						|  | self.rms_norm_eps = rms_norm_eps | 
					
						
						|  | self.use_cache = use_cache | 
					
						
						|  | self.rope_theta = rope_theta | 
					
						
						|  | self.rope_scaling = rope_scaling | 
					
						
						|  | self.partial_rotary_factor = partial_rotary_factor | 
					
						
						|  | self._rope_scaling_adjustment() | 
					
						
						|  | self._rope_scaling_validation() | 
					
						
						|  | self.sliding_window = sliding_window | 
					
						
						|  |  | 
					
						
						|  | super().__init__( | 
					
						
						|  | bos_token_id=bos_token_id, | 
					
						
						|  | eos_token_id=eos_token_id, | 
					
						
						|  | pad_token_id=pad_token_id, | 
					
						
						|  | tie_word_embeddings=tie_word_embeddings, | 
					
						
						|  | **kwargs, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | def _rope_scaling_adjustment(self): | 
					
						
						|  | """ | 
					
						
						|  | Adjust the `type` of the `rope_scaling` configuration for backward compatibility. | 
					
						
						|  | """ | 
					
						
						|  | if self.rope_scaling is None: | 
					
						
						|  | return | 
					
						
						|  |  | 
					
						
						|  | rope_scaling_type = self.rope_scaling.get("type", None) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if rope_scaling_type is not None and rope_scaling_type in ["su", "yarn"]: | 
					
						
						|  | self.rope_scaling["type"] = "longrope" | 
					
						
						|  |  | 
					
						
						|  | def _rope_scaling_validation(self): | 
					
						
						|  | """ | 
					
						
						|  | Validate the `rope_scaling` configuration. | 
					
						
						|  | """ | 
					
						
						|  | if self.rope_scaling is None: | 
					
						
						|  | return | 
					
						
						|  |  | 
					
						
						|  | if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, " | 
					
						
						|  | f"got {self.rope_scaling}" | 
					
						
						|  | ) | 
					
						
						|  | rope_scaling_type = self.rope_scaling.get("type", None) | 
					
						
						|  | rope_scaling_short_factor = self.rope_scaling.get("short_factor", None) | 
					
						
						|  | rope_scaling_long_factor = self.rope_scaling.get("long_factor", None) | 
					
						
						|  | if rope_scaling_type is None or rope_scaling_type not in ["longrope"]: | 
					
						
						|  | raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}") | 
					
						
						|  | if not ( | 
					
						
						|  | isinstance(rope_scaling_short_factor, list) | 
					
						
						|  | and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor) | 
					
						
						|  | ): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}" | 
					
						
						|  | ) | 
					
						
						|  | rotary_ndims = int(self.hidden_size // self.num_attention_heads * self.partial_rotary_factor) | 
					
						
						|  | if not len(rope_scaling_short_factor) == rotary_ndims // 2: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"`rope_scaling`'s short_factor field must have length {rotary_ndims // 2}, got {len(rope_scaling_short_factor)}" | 
					
						
						|  | ) | 
					
						
						|  | if not ( | 
					
						
						|  | isinstance(rope_scaling_long_factor, list) | 
					
						
						|  | and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor) | 
					
						
						|  | ): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}" | 
					
						
						|  | ) | 
					
						
						|  | if not len(rope_scaling_long_factor) == rotary_ndims // 2: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"`rope_scaling`'s long_factor field must have length {rotary_ndims // 2}, got {len(rope_scaling_long_factor)}" | 
					
						
						|  | ) | 
					
						
						|  |  |