Add files using upload-large-folder tool
Browse files- config.json +61 -61
- model.safetensors +3 -0
config.json
CHANGED
@@ -25,67 +25,67 @@
|
|
25 |
"multi_modal_projector",
|
26 |
"merger",
|
27 |
"modality_projection",
|
28 |
-
"language_model.model.layers.
|
29 |
-
"language_model.model.layers.
|
30 |
-
"language_model.model.layers.
|
31 |
-
"language_model.model.layers.
|
32 |
-
"
|
33 |
-
"language_model.model.layers.
|
34 |
-
"
|
35 |
-
"language_model.model.layers.
|
36 |
-
"
|
37 |
-
"
|
38 |
-
"
|
39 |
-
"
|
40 |
-
"
|
41 |
-
"
|
42 |
-
"language_model.model.layers.
|
43 |
-
"
|
44 |
-
"
|
45 |
-
"
|
46 |
-
"
|
47 |
-
"
|
48 |
-
"
|
49 |
-
"
|
50 |
-
"
|
51 |
-
"
|
52 |
-
"
|
53 |
-
"
|
54 |
-
"
|
55 |
-
"
|
56 |
-
"
|
57 |
-
"
|
58 |
-
"
|
59 |
-
"
|
60 |
-
"
|
61 |
-
"
|
62 |
-
"
|
63 |
-
"
|
64 |
-
"
|
65 |
-
"
|
66 |
-
"
|
67 |
-
"
|
68 |
-
"
|
69 |
-
"
|
70 |
-
"
|
71 |
-
"
|
72 |
-
"
|
73 |
-
"
|
74 |
-
"
|
75 |
-
"
|
76 |
-
"
|
77 |
-
"
|
78 |
-
"
|
79 |
-
"
|
80 |
-
"
|
81 |
-
"
|
82 |
-
"
|
83 |
-
"
|
84 |
-
"
|
85 |
-
"
|
86 |
-
"
|
87 |
-
"
|
88 |
-
"
|
89 |
],
|
90 |
"llm_int8_threshold": 6.0,
|
91 |
"load_in_4bit": true,
|
|
|
25 |
"multi_modal_projector",
|
26 |
"merger",
|
27 |
"modality_projection",
|
28 |
+
"language_model.model.layers.1.mlp",
|
29 |
+
"language_model.model.layers.4.mlp",
|
30 |
+
"language_model.model.layers.2.mlp",
|
31 |
+
"language_model.model.layers.5.mlp",
|
32 |
+
"vision_tower.vision_model.encoder.layers.22.self_attn",
|
33 |
+
"language_model.model.layers.3.mlp",
|
34 |
+
"vision_tower.vision_model.encoder.layers.26.self_attn",
|
35 |
+
"language_model.model.layers.0.mlp",
|
36 |
+
"vision_tower.vision_model.encoder.layers.19.self_attn",
|
37 |
+
"vision_tower.vision_model.encoder.layers.25.self_attn",
|
38 |
+
"vision_tower.vision_model.encoder.layers.23.self_attn",
|
39 |
+
"vision_tower.vision_model.encoder.layers.25.mlp",
|
40 |
+
"vision_tower.vision_model.encoder.layers.20.self_attn",
|
41 |
+
"vision_tower.vision_model.encoder.layers.24.self_attn",
|
42 |
+
"language_model.model.layers.2.self_attn",
|
43 |
+
"vision_tower.vision_model.encoder.layers.21.self_attn",
|
44 |
+
"vision_tower.vision_model.encoder.layers.18.self_attn",
|
45 |
+
"vision_tower.vision_model.encoder.layers.19.mlp",
|
46 |
+
"vision_tower.vision_model.encoder.layers.20.mlp",
|
47 |
+
"vision_tower.vision_model.encoder.layers.21.mlp",
|
48 |
+
"vision_tower.vision_model.encoder.layers.15.mlp",
|
49 |
+
"vision_tower.vision_model.encoder.layers.22.mlp",
|
50 |
+
"vision_tower.vision_model.encoder.layers.23.mlp",
|
51 |
+
"vision_tower.vision_model.encoder.layers.17.mlp",
|
52 |
+
"vision_tower.vision_model.encoder.layers.18.mlp",
|
53 |
+
"vision_tower.vision_model.encoder.layers.24.mlp",
|
54 |
+
"vision_tower.vision_model.encoder.layers.16.self_attn",
|
55 |
+
"vision_tower.vision_model.encoder.layers.16.mlp",
|
56 |
+
"vision_tower.vision_model.encoder.layers.17.self_attn",
|
57 |
+
"vision_tower.vision_model.encoder.layers.13.mlp",
|
58 |
+
"vision_tower.vision_model.encoder.layers.14.mlp",
|
59 |
+
"vision_tower.vision_model.encoder.layers.15.self_attn",
|
60 |
+
"vision_tower.vision_model.encoder.layers.12.mlp",
|
61 |
+
"vision_tower.vision_model.encoder.layers.14.self_attn",
|
62 |
+
"vision_tower.vision_model.encoder.layers.9.mlp",
|
63 |
+
"vision_tower.vision_model.encoder.layers.5.mlp",
|
64 |
+
"vision_tower.vision_model.encoder.layers.10.self_attn",
|
65 |
+
"vision_tower.vision_model.encoder.layers.7.mlp",
|
66 |
+
"vision_tower.vision_model.encoder.layers.9.self_attn",
|
67 |
+
"vision_tower.vision_model.encoder.layers.8.mlp",
|
68 |
+
"vision_tower.vision_model.encoder.layers.10.mlp",
|
69 |
+
"vision_tower.vision_model.encoder.layers.13.self_attn",
|
70 |
+
"vision_tower.vision_model.encoder.layers.11.mlp",
|
71 |
+
"vision_tower.vision_model.encoder.layers.11.self_attn",
|
72 |
+
"vision_tower.vision_model.encoder.layers.12.self_attn",
|
73 |
+
"vision_tower.vision_model.encoder.layers.3.mlp",
|
74 |
+
"vision_tower.vision_model.encoder.layers.8.self_attn",
|
75 |
+
"vision_tower.vision_model.encoder.layers.4.self_attn",
|
76 |
+
"vision_tower.vision_model.encoder.layers.6.mlp",
|
77 |
+
"vision_tower.vision_model.encoder.layers.4.mlp",
|
78 |
+
"vision_tower.vision_model.encoder.layers.2.mlp",
|
79 |
+
"vision_tower.vision_model.encoder.layers.6.self_attn",
|
80 |
+
"vision_tower.vision_model.encoder.layers.7.self_attn",
|
81 |
+
"vision_tower.vision_model.encoder.layers.1.self_attn",
|
82 |
+
"vision_tower.vision_model.encoder.layers.1.mlp",
|
83 |
+
"vision_tower.vision_model.encoder.layers.3.self_attn",
|
84 |
+
"vision_tower.vision_model.encoder.layers.5.self_attn",
|
85 |
+
"vision_tower.vision_model.encoder.layers.0.self_attn",
|
86 |
+
"vision_tower.vision_model.encoder.layers.0.mlp",
|
87 |
+
"vision_tower.vision_model.encoder.layers.2.self_attn",
|
88 |
+
"vision_tower.vision_model.encoder.layers.26.mlp"
|
89 |
],
|
90 |
"llm_int8_threshold": 6.0,
|
91 |
"load_in_4bit": true,
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:50fee60341951d70ff28e0480f908480927a1b11e69738457bfe17da34f548c4
|
3 |
+
size 4562294331
|