fxmarty-amd's picture
Update config.json
aa5e764 verified
raw
history blame
8.64 kB
{
"architectures": [
"Llama4ForConditionalGeneration"
],
"boi_token_index": 200080,
"eoi_token_index": 200081,
"image_token_index": 200092,
"model_type": "llama4",
"quantization_config": {
"algo_config": null,
"exclude": [
"vision_model.patch_embedding.linear",
"vision_model.model.layers.0.self_attn.q_proj",
"vision_model.model.layers.0.self_attn.k_proj",
"vision_model.model.layers.0.self_attn.v_proj",
"vision_model.model.layers.0.self_attn.o_proj",
"vision_model.model.layers.0.mlp.fc1",
"vision_model.model.layers.0.mlp.fc2",
"vision_model.model.layers.1.self_attn.q_proj",
"vision_model.model.layers.1.self_attn.k_proj",
"vision_model.model.layers.1.self_attn.v_proj",
"vision_model.model.layers.1.self_attn.o_proj",
"vision_model.model.layers.1.mlp.fc1",
"vision_model.model.layers.1.mlp.fc2",
"vision_model.vision_adapter.mlp.fc1",
"vision_model.vision_adapter.mlp.fc2",
"multi_modal_projector.linear_1",
"language_model.model.layers.0.self_attn.q_proj",
"language_model.model.layers.0.self_attn.k_proj",
"language_model.model.layers.0.self_attn.v_proj",
"language_model.model.layers.0.self_attn.o_proj",
"language_model.model.layers.0.feed_forward.router",
"language_model.model.layers.1.self_attn.q_proj",
"language_model.model.layers.1.self_attn.k_proj",
"language_model.model.layers.1.self_attn.v_proj",
"language_model.model.layers.1.self_attn.o_proj",
"language_model.model.layers.1.feed_forward.router",
"language_model.lm_head"
],
"export": {
"kv_cache_group": [
"*language_model.*k_proj",
"*language_model.*v_proj"
],
"min_kv_scale": 0.0,
"pack_method": "reorder",
"weight_format": "real_quantized",
"weight_merge_groups": null
},
"global_quant_config": {
"bias": null,
"input_tensors": {
"ch_axis": -1,
"dtype": "fp4",
"group_size": 32,
"is_dynamic": true,
"is_mx_scale_constraint": false,
"is_scale_quant": false,
"mx_element_dtype": null,
"observer_cls": "PerBlockMXObserver",
"qscheme": "per_group",
"round_method": "half_even",
"scale_calculation_mode": "even",
"scale_format": "e8m0",
"scale_type": "float",
"symmetric": null
},
"output_tensors": null,
"target_device": null,
"weight": {
"ch_axis": -1,
"dtype": "fp4",
"group_size": 32,
"is_dynamic": false,
"is_mx_scale_constraint": false,
"is_scale_quant": false,
"mx_element_dtype": null,
"observer_cls": "PerBlockMXObserver",
"qscheme": "per_group",
"round_method": "half_even",
"scale_calculation_mode": "even",
"scale_format": "e8m0",
"scale_type": "float",
"symmetric": null
}
},
"layer_quant_config": {
"*language_model.*k_proj": {
"bias": null,
"input_tensors": {
"ch_axis": -1,
"dtype": "fp4",
"group_size": 32,
"is_dynamic": true,
"is_mx_scale_constraint": false,
"is_scale_quant": false,
"mx_element_dtype": null,
"observer_cls": "PerBlockMXObserver",
"qscheme": "per_group",
"round_method": "half_even",
"scale_calculation_mode": "even",
"scale_format": "e8m0",
"scale_type": "float",
"symmetric": null
},
"output_tensors": {
"ch_axis": null,
"dtype": "fp8_e4m3",
"group_size": null,
"is_dynamic": false,
"is_mx_scale_constraint": null,
"is_scale_quant": false,
"mx_element_dtype": null,
"observer_cls": "PerTensorMinMaxObserver",
"qscheme": "per_tensor",
"round_method": null,
"scale_calculation_mode": null,
"scale_format": null,
"scale_type": null,
"symmetric": null
},
"target_device": null,
"weight": {
"ch_axis": -1,
"dtype": "fp4",
"group_size": 32,
"is_dynamic": false,
"is_mx_scale_constraint": false,
"is_scale_quant": false,
"mx_element_dtype": null,
"observer_cls": "PerBlockMXObserver",
"qscheme": "per_group",
"round_method": "half_even",
"scale_calculation_mode": "even",
"scale_format": "e8m0",
"scale_type": "float",
"symmetric": null
}
},
"*language_model.*v_proj": {
"bias": null,
"input_tensors": {
"ch_axis": -1,
"dtype": "fp4",
"group_size": 32,
"is_dynamic": true,
"is_mx_scale_constraint": false,
"is_scale_quant": false,
"mx_element_dtype": null,
"observer_cls": "PerBlockMXObserver",
"qscheme": "per_group",
"round_method": "half_even",
"scale_calculation_mode": "even",
"scale_format": "e8m0",
"scale_type": "float",
"symmetric": null
},
"output_tensors": {
"ch_axis": null,
"dtype": "fp8_e4m3",
"group_size": null,
"is_dynamic": false,
"is_mx_scale_constraint": null,
"is_scale_quant": false,
"mx_element_dtype": null,
"observer_cls": "PerTensorMinMaxObserver",
"qscheme": "per_tensor",
"round_method": null,
"scale_calculation_mode": null,
"scale_format": null,
"scale_type": null,
"symmetric": null
},
"target_device": null,
"weight": {
"ch_axis": -1,
"dtype": "fp4",
"group_size": 32,
"is_dynamic": false,
"is_mx_scale_constraint": false,
"is_scale_quant": false,
"mx_element_dtype": null,
"observer_cls": "PerBlockMXObserver",
"qscheme": "per_group",
"round_method": "half_even",
"scale_calculation_mode": "even",
"scale_format": "e8m0",
"scale_type": "float",
"symmetric": null
}
}
},
"layer_type_quant_config": {},
"quant_method": "quark",
"quant_mode": "eager_mode",
"softmax_quant_spec": null
},
"text_config": {
"_attn_implementation_autoset": true,
"attention_bias": false,
"attention_chunk_size": 8192,
"attention_dropout": 0.0,
"attn_scale": 0.1,
"attn_temperature_tuning": 4,
"bos_token_id": 200000,
"eos_token_id": [
200001,
200007,
200008
],
"floor_scale": 8192,
"for_llm_compressor": false,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 5120,
"initializer_range": 0.02,
"interleave_moe_layer_step": 1,
"intermediate_size": 8192,
"intermediate_size_mlp": 16384,
"max_position_embeddings": 10485760,
"model_type": "llama4_text",
"moe_layers": [
0,
1,
],
"no_rope_layers": [
1,
0,
],
"num_attention_heads": 40,
"num_experts_per_tok": 1,
"num_hidden_layers": 2,
"num_key_value_heads": 8,
"num_local_experts": 16,
"output_router_logits": false,
"pad_token_id": 200018,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 16.0,
"high_freq_factor": 1.0,
"low_freq_factor": 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3"
},
"rope_theta": 500000.0,
"router_aux_loss_coef": 0.001,
"router_jitter_noise": 0.0,
"torch_dtype": "bfloat16",
"use_cache": true,
"use_qk_norm": true,
"vocab_size": 202048
},
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.51.0",
"vision_config": {
"_attn_implementation_autoset": true,
"attention_dropout": 0.0,
"hidden_act": "gelu",
"hidden_size": 1408,
"image_size": 336,
"initializer_range": 0.02,
"intermediate_size": 5632,
"model_type": "llama4_vision_model",
"multi_modal_projector_bias": false,
"norm_eps": 1e-05,
"num_attention_heads": 16,
"num_channels": 3,
"num_hidden_layers": 2,
"patch_size": 14,
"pixel_shuffle_ratio": 0.5,
"projector_dropout": 0.0,
"projector_input_dim": 4096,
"projector_output_dim": 4096,
"rope_theta": 10000,
"vision_feature_layer": -1,
"vision_feature_select_strategy": "default",
"vision_output_dim": 4096
}
}