File size: 11,499 Bytes
61e94ab 6c5d479 61e94ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
{
"architectures": [
"InternS1ForConditionalGeneration"
],
"downsample_ratio": 0.5,
"image_seq_length": 256,
"image_token_id": 152957,
"model_type": "interns1",
"projector_hidden_act": "gelu",
"quantization_config": {
"activation_scheme": "dynamic",
"fmt": "e4m3",
"modules_to_not_convert": [
"lm_head",
"model.language_model.embed_tokens",
"model.multi_modal_projector.linear_1",
"model.multi_modal_projector.linear_2",
"model.vision_tower.embeddings.patch_embeddings.projection",
"model.vision_tower.encoder.layer.0.attention.k_proj",
"model.vision_tower.encoder.layer.0.attention.projection_layer",
"model.vision_tower.encoder.layer.0.attention.q_proj",
"model.vision_tower.encoder.layer.0.attention.v_proj",
"model.vision_tower.encoder.layer.0.mlp.fc1",
"model.vision_tower.encoder.layer.0.mlp.fc2",
"model.vision_tower.encoder.layer.1.attention.k_proj",
"model.vision_tower.encoder.layer.1.attention.projection_layer",
"model.vision_tower.encoder.layer.1.attention.q_proj",
"model.vision_tower.encoder.layer.1.attention.v_proj",
"model.vision_tower.encoder.layer.1.mlp.fc1",
"model.vision_tower.encoder.layer.1.mlp.fc2",
"model.vision_tower.encoder.layer.10.attention.k_proj",
"model.vision_tower.encoder.layer.10.attention.projection_layer",
"model.vision_tower.encoder.layer.10.attention.q_proj",
"model.vision_tower.encoder.layer.10.attention.v_proj",
"model.vision_tower.encoder.layer.10.mlp.fc1",
"model.vision_tower.encoder.layer.10.mlp.fc2",
"model.vision_tower.encoder.layer.11.attention.k_proj",
"model.vision_tower.encoder.layer.11.attention.projection_layer",
"model.vision_tower.encoder.layer.11.attention.q_proj",
"model.vision_tower.encoder.layer.11.attention.v_proj",
"model.vision_tower.encoder.layer.11.mlp.fc1",
"model.vision_tower.encoder.layer.11.mlp.fc2",
"model.vision_tower.encoder.layer.12.attention.k_proj",
"model.vision_tower.encoder.layer.12.attention.projection_layer",
"model.vision_tower.encoder.layer.12.attention.q_proj",
"model.vision_tower.encoder.layer.12.attention.v_proj",
"model.vision_tower.encoder.layer.12.mlp.fc1",
"model.vision_tower.encoder.layer.12.mlp.fc2",
"model.vision_tower.encoder.layer.13.attention.k_proj",
"model.vision_tower.encoder.layer.13.attention.projection_layer",
"model.vision_tower.encoder.layer.13.attention.q_proj",
"model.vision_tower.encoder.layer.13.attention.v_proj",
"model.vision_tower.encoder.layer.13.mlp.fc1",
"model.vision_tower.encoder.layer.13.mlp.fc2",
"model.vision_tower.encoder.layer.14.attention.k_proj",
"model.vision_tower.encoder.layer.14.attention.projection_layer",
"model.vision_tower.encoder.layer.14.attention.q_proj",
"model.vision_tower.encoder.layer.14.attention.v_proj",
"model.vision_tower.encoder.layer.14.mlp.fc1",
"model.vision_tower.encoder.layer.14.mlp.fc2",
"model.vision_tower.encoder.layer.15.attention.k_proj",
"model.vision_tower.encoder.layer.15.attention.projection_layer",
"model.vision_tower.encoder.layer.15.attention.q_proj",
"model.vision_tower.encoder.layer.15.attention.v_proj",
"model.vision_tower.encoder.layer.15.mlp.fc1",
"model.vision_tower.encoder.layer.15.mlp.fc2",
"model.vision_tower.encoder.layer.16.attention.k_proj",
"model.vision_tower.encoder.layer.16.attention.projection_layer",
"model.vision_tower.encoder.layer.16.attention.q_proj",
"model.vision_tower.encoder.layer.16.attention.v_proj",
"model.vision_tower.encoder.layer.16.mlp.fc1",
"model.vision_tower.encoder.layer.16.mlp.fc2",
"model.vision_tower.encoder.layer.17.attention.k_proj",
"model.vision_tower.encoder.layer.17.attention.projection_layer",
"model.vision_tower.encoder.layer.17.attention.q_proj",
"model.vision_tower.encoder.layer.17.attention.v_proj",
"model.vision_tower.encoder.layer.17.mlp.fc1",
"model.vision_tower.encoder.layer.17.mlp.fc2",
"model.vision_tower.encoder.layer.18.attention.k_proj",
"model.vision_tower.encoder.layer.18.attention.projection_layer",
"model.vision_tower.encoder.layer.18.attention.q_proj",
"model.vision_tower.encoder.layer.18.attention.v_proj",
"model.vision_tower.encoder.layer.18.mlp.fc1",
"model.vision_tower.encoder.layer.18.mlp.fc2",
"model.vision_tower.encoder.layer.19.attention.k_proj",
"model.vision_tower.encoder.layer.19.attention.projection_layer",
"model.vision_tower.encoder.layer.19.attention.q_proj",
"model.vision_tower.encoder.layer.19.attention.v_proj",
"model.vision_tower.encoder.layer.19.mlp.fc1",
"model.vision_tower.encoder.layer.19.mlp.fc2",
"model.vision_tower.encoder.layer.2.attention.k_proj",
"model.vision_tower.encoder.layer.2.attention.projection_layer",
"model.vision_tower.encoder.layer.2.attention.q_proj",
"model.vision_tower.encoder.layer.2.attention.v_proj",
"model.vision_tower.encoder.layer.2.mlp.fc1",
"model.vision_tower.encoder.layer.2.mlp.fc2",
"model.vision_tower.encoder.layer.20.attention.k_proj",
"model.vision_tower.encoder.layer.20.attention.projection_layer",
"model.vision_tower.encoder.layer.20.attention.q_proj",
"model.vision_tower.encoder.layer.20.attention.v_proj",
"model.vision_tower.encoder.layer.20.mlp.fc1",
"model.vision_tower.encoder.layer.20.mlp.fc2",
"model.vision_tower.encoder.layer.21.attention.k_proj",
"model.vision_tower.encoder.layer.21.attention.projection_layer",
"model.vision_tower.encoder.layer.21.attention.q_proj",
"model.vision_tower.encoder.layer.21.attention.v_proj",
"model.vision_tower.encoder.layer.21.mlp.fc1",
"model.vision_tower.encoder.layer.21.mlp.fc2",
"model.vision_tower.encoder.layer.22.attention.k_proj",
"model.vision_tower.encoder.layer.22.attention.projection_layer",
"model.vision_tower.encoder.layer.22.attention.q_proj",
"model.vision_tower.encoder.layer.22.attention.v_proj",
"model.vision_tower.encoder.layer.22.mlp.fc1",
"model.vision_tower.encoder.layer.22.mlp.fc2",
"model.vision_tower.encoder.layer.23.attention.k_proj",
"model.vision_tower.encoder.layer.23.attention.projection_layer",
"model.vision_tower.encoder.layer.23.attention.q_proj",
"model.vision_tower.encoder.layer.23.attention.v_proj",
"model.vision_tower.encoder.layer.23.mlp.fc1",
"model.vision_tower.encoder.layer.23.mlp.fc2",
"model.vision_tower.encoder.layer.3.attention.k_proj",
"model.vision_tower.encoder.layer.3.attention.projection_layer",
"model.vision_tower.encoder.layer.3.attention.q_proj",
"model.vision_tower.encoder.layer.3.attention.v_proj",
"model.vision_tower.encoder.layer.3.mlp.fc1",
"model.vision_tower.encoder.layer.3.mlp.fc2",
"model.vision_tower.encoder.layer.4.attention.k_proj",
"model.vision_tower.encoder.layer.4.attention.projection_layer",
"model.vision_tower.encoder.layer.4.attention.q_proj",
"model.vision_tower.encoder.layer.4.attention.v_proj",
"model.vision_tower.encoder.layer.4.mlp.fc1",
"model.vision_tower.encoder.layer.4.mlp.fc2",
"model.vision_tower.encoder.layer.5.attention.k_proj",
"model.vision_tower.encoder.layer.5.attention.projection_layer",
"model.vision_tower.encoder.layer.5.attention.q_proj",
"model.vision_tower.encoder.layer.5.attention.v_proj",
"model.vision_tower.encoder.layer.5.mlp.fc1",
"model.vision_tower.encoder.layer.5.mlp.fc2",
"model.vision_tower.encoder.layer.6.attention.k_proj",
"model.vision_tower.encoder.layer.6.attention.projection_layer",
"model.vision_tower.encoder.layer.6.attention.q_proj",
"model.vision_tower.encoder.layer.6.attention.v_proj",
"model.vision_tower.encoder.layer.6.mlp.fc1",
"model.vision_tower.encoder.layer.6.mlp.fc2",
"model.vision_tower.encoder.layer.7.attention.k_proj",
"model.vision_tower.encoder.layer.7.attention.projection_layer",
"model.vision_tower.encoder.layer.7.attention.q_proj",
"model.vision_tower.encoder.layer.7.attention.v_proj",
"model.vision_tower.encoder.layer.7.mlp.fc1",
"model.vision_tower.encoder.layer.7.mlp.fc2",
"model.vision_tower.encoder.layer.8.attention.k_proj",
"model.vision_tower.encoder.layer.8.attention.projection_layer",
"model.vision_tower.encoder.layer.8.attention.q_proj",
"model.vision_tower.encoder.layer.8.attention.v_proj",
"model.vision_tower.encoder.layer.8.mlp.fc1",
"model.vision_tower.encoder.layer.8.mlp.fc2",
"model.vision_tower.encoder.layer.9.attention.k_proj",
"model.vision_tower.encoder.layer.9.attention.projection_layer",
"model.vision_tower.encoder.layer.9.attention.q_proj",
"model.vision_tower.encoder.layer.9.attention.v_proj",
"model.vision_tower.encoder.layer.9.mlp.fc1",
"model.vision_tower.encoder.layer.9.mlp.fc2"
],
"quant_method": "fp8",
"weight_block_size": [
128,
128
]
},
"text_config": {
"_attn_implementation_autoset": true,
"architectures": [
"Qwen3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 151643,
"eos_token_id": 151645,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 12288,
"max_position_embeddings": 65536,
"max_window_layers": 36,
"model_type": "qwen3",
"num_attention_heads": 32,
"num_hidden_layers": 36,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 1000000,
"sliding_window": null,
"torch_dtype": "bfloat16",
"use_cache": true,
"use_sliding_window": false,
"vocab_size": 153216
},
"torch_dtype": "bfloat16",
"transformers_version": "4.53.0",
"vision_config": {
"_attn_implementation_autoset": true,
"architectures": [
"InternVisionModel"
],
"attention_bias": true,
"attention_dropout": 0.0,
"auto_map": {
"AutoConfig": "configuration_interns1.InternS1VisionConfig",
"AutoModel": "modeling_interns1.InternS1VisionModel"
},
"drop_path_rate": 0.0,
"dropout": 0.0,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 1024,
"image_size": [
448,
448
],
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 4096,
"layer_norm_eps": 1e-06,
"layer_scale_init_value": 0.1,
"model_type": "interns1_vision",
"norm_type": "layer_norm",
"num_attention_heads": 16,
"num_channels": 3,
"num_hidden_layers": 24,
"patch_size": [
14,
14
],
"projection_dropout": 0.0,
"torch_dtype": "bfloat16",
"use_absolute_position_embeddings": true,
"use_mask_token": false,
"use_mean_pooling": true,
"use_qk_norm": false
},
"vision_feature_layer": -1,
"vision_feature_select_strategy": "default",
"auto_map": {
"AutoConfig": "configuration_interns1.InternS1Config",
"AutoModel": "modeling_interns1.InternS1Model",
"AutoModelForCausalLM": "modeling_interns1.InternS1ForConditionalGeneration"
}
} |