| { | |
| "activation_dropout": 0.0, | |
| "activation_function": "relu", | |
| "adaptor_dropout": 0.1, | |
| "adaptor_kernel_size": 8, | |
| "adaptor_stride": 8, | |
| "add_adapter": true, | |
| "architectures": [ | |
| "SeamlessM4Tv2Model" | |
| ], | |
| "attention_dropout": 0.1, | |
| "bos_token_id": 2, | |
| "char_vocab_size": 10943, | |
| "conv_depthwise_kernel_size": 31, | |
| "decoder_attention_heads": 16, | |
| "decoder_ffn_dim": 8192, | |
| "decoder_layerdrop": 0.05, | |
| "decoder_layers": 24, | |
| "decoder_start_token_id": 3, | |
| "dropout": 0.1, | |
| "encoder_attention_heads": 16, | |
| "encoder_ffn_dim": 8192, | |
| "encoder_layerdrop": 0.05, | |
| "encoder_layers": 24, | |
| "eos_token_id": 3, | |
| "feature_projection_input_dim": 160, | |
| "hidden_size": 1024, | |
| "initializer_range": 0.02, | |
| "is_encoder_decoder": true, | |
| "lang_embed_dim": 256, | |
| "layer_norm_eps": 1e-05, | |
| "leaky_relu_slope": 0.1, | |
| "left_max_position_embeddings": 64, | |
| "max_new_tokens": 256, | |
| "max_position_embeddings": 4096, | |
| "model_type": "seamless_m4t_v2", | |
| "num_adapter_layers": 1, | |
| "num_attention_heads": 16, | |
| "num_hidden_layers": 24, | |
| "pad_token_id": 0, | |
| "position_embeddings_type": "relative_key", | |
| "resblock_dilation_sizes": [ | |
| [ | |
| 1, | |
| 3, | |
| 5 | |
| ], | |
| [ | |
| 1, | |
| 3, | |
| 5 | |
| ], | |
| [ | |
| 1, | |
| 3, | |
| 5 | |
| ] | |
| ], | |
| "resblock_kernel_sizes": [ | |
| 3, | |
| 7, | |
| 11 | |
| ], | |
| "right_max_position_embeddings": 8, | |
| "sampling_rate": 16000, | |
| "scale_embedding": true, | |
| "speech_encoder_attention_heads": 16, | |
| "speech_encoder_chunk_size": 20000, | |
| "speech_encoder_dropout": 0.0, | |
| "speech_encoder_hidden_act": "swish", | |
| "speech_encoder_intermediate_size": 4096, | |
| "speech_encoder_layerdrop": 0.1, | |
| "speech_encoder_layers": 24, | |
| "speech_encoder_left_chunk_num": 128, | |
| "spkr_embed_dim": 256, | |
| "t2u_bos_token_id": 0, | |
| "t2u_decoder_attention_heads": 16, | |
| "t2u_decoder_ffn_dim": 8192, | |
| "t2u_decoder_layers": 6, | |
| "t2u_encoder_attention_heads": 16, | |
| "t2u_encoder_ffn_dim": 8192, | |
| "t2u_encoder_layers": 6, | |
| "t2u_eos_token_id": 2, | |
| "t2u_max_position_embeddings": 4096, | |
| "t2u_pad_token_id": 1, | |
| "t2u_variance_pred_dropout": 0.5, | |
| "t2u_variance_predictor_embed_dim": 1024, | |
| "t2u_variance_predictor_hidden_dim": 256, | |
| "t2u_variance_predictor_kernel_size": 3, | |
| "t2u_vocab_size": 10082, | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.36.0.dev0", | |
| "unit_embed_dim": 1280, | |
| "unit_hifi_gan_vocab_size": 10000, | |
| "upsample_initial_channel": 512, | |
| "upsample_kernel_sizes": [ | |
| 11, | |
| 8, | |
| 8, | |
| 4, | |
| 4 | |
| ], | |
| "upsample_rates": [ | |
| 5, | |
| 4, | |
| 4, | |
| 2, | |
| 2 | |
| ], | |
| "use_cache": true, | |
| "var_pred_dropout": 0.5, | |
| "variance_predictor_kernel_size": 3, | |
| "vocab_size": 256102, | |
| "vocoder_num_langs": 36, | |
| "vocoder_num_spkrs": 200, | |
| "vocoder_offset": 4 | |
| } | |