| { | |
| "architectures": [ | |
| "MERaLiON2ForConditionalGeneration" | |
| ], | |
| "auto_map": { | |
| "AutoConfig": "configuration_meralion2.MERaLiON2Config", | |
| "AutoModelForSpeechSeq2Seq": "modeling_meralion2.MERaLiON2ForConditionalGeneration" | |
| }, | |
| "head_dim": 256, | |
| "hidden_size": 3584, | |
| "intermediate_size": 14336, | |
| "model_type": "meralion2", | |
| "num_attention_heads": 16, | |
| "num_hidden_layers": 42, | |
| "num_key_value_heads": 8, | |
| "sliding_window": 4096, | |
| "speech_config": { | |
| "_attn_implementation_autoset": true, | |
| "_name_or_path": "openai/whisper-large-v3", | |
| "activation_dropout": 0.0, | |
| "activation_function": "gelu", | |
| "apply_spec_augment": true, | |
| "architectures": [ | |
| "WhisperForConditionalGeneration" | |
| ], | |
| "attention_dropout": 0.0, | |
| "begin_suppress_tokens": [ | |
| 220, | |
| 50257 | |
| ], | |
| "bos_token_id": 50257, | |
| "classifier_proj_size": 256, | |
| "d_model": 1280, | |
| "decoder_attention_heads": 20, | |
| "decoder_ffn_dim": 5120, | |
| "decoder_layerdrop": 0.0, | |
| "decoder_layers": 32, | |
| "decoder_start_token_id": 50258, | |
| "dropout": 0.0, | |
| "encoder_attention_heads": 20, | |
| "encoder_ffn_dim": 5120, | |
| "encoder_layerdrop": 0.0, | |
| "encoder_layers": 32, | |
| "eos_token_id": 50257, | |
| "init_std": 0.02, | |
| "mask_feature_length": 10, | |
| "mask_feature_min_masks": 0, | |
| "mask_feature_prob": 0.1, | |
| "mask_time_length": 20, | |
| "mask_time_min_masks": 2, | |
| "mask_time_prob": 0.1, | |
| "max_length": 448, | |
| "max_source_positions": 1500, | |
| "max_target_positions": 448, | |
| "median_filter_width": 7, | |
| "model_type": "whisper", | |
| "num_hidden_layers": 32, | |
| "num_mel_bins": 128, | |
| "scale_embedding": false, | |
| "torch_dtype": "bfloat16", | |
| "use_cache": true, | |
| "use_weighted_layer_sum": false, | |
| "vocab_size": 51866 | |
| }, | |
| "speech_mlp_scale_factor": 15, | |
| "speech_token_index": 255999, | |
| "text_config": { | |
| "_attn_implementation_autoset": true, | |
| "_name_or_path": "google/gemma-2-9b-it", | |
| "architectures": [ | |
| "Gemma2ForCausalLM" | |
| ], | |
| "attention_bias": false, | |
| "attention_dropout": 0.0, | |
| "attn_logit_softcapping": 50.0, | |
| "cache_implementation": "hybrid", | |
| "final_logit_softcapping": 30.0, | |
| "head_dim": 256, | |
| "hidden_act": "gelu_pytorch_tanh", | |
| "hidden_activation": "gelu_pytorch_tanh", | |
| "hidden_size": 3584, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 14336, | |
| "max_position_embeddings": 8192, | |
| "model_type": "gemma2", | |
| "num_attention_heads": 16, | |
| "num_hidden_layers": 42, | |
| "num_key_value_heads": 8, | |
| "query_pre_attn_scalar": 256, | |
| "rms_norm_eps": 1e-06, | |
| "rope_theta": 10000.0, | |
| "sliding_window": 4096, | |
| "sliding_window_size": 4096, | |
| "torch_dtype": "bfloat16", | |
| "use_cache": true, | |
| "vocab_size": 256000 | |
| }, | |
| "torch_dtype": "bfloat16", | |
| "transformers_version": "4.50.1" | |
| } | |