danielhanchen commited on
Commit
b74163c
·
verified ·
1 Parent(s): ddaa9e0

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -3,13 +3,14 @@
3
  "Qwen2_5_VLForConditionalGeneration"
4
  ],
5
  "attention_dropout": 0.0,
 
6
  "eos_token_id": 151645,
7
  "hidden_act": "silu",
8
  "hidden_size": 3584,
9
  "image_token_id": 151655,
10
  "initializer_range": 0.02,
11
  "intermediate_size": 18944,
12
- "max_position_embeddings": 128000,
13
  "max_window_layers": 28,
14
  "model_type": "qwen2_5_vl",
15
  "num_attention_heads": 28,
@@ -28,15 +29,84 @@
28
  },
29
  "rope_theta": 1000000.0,
30
  "sliding_window": 32768,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  "tie_word_embeddings": false,
32
- "torch_dtype": "bfloat16",
33
- "transformers_version": "4.51.3",
34
  "unsloth_fixed": true,
35
  "use_cache": true,
36
  "use_sliding_window": false,
37
  "video_token_id": 151656,
38
  "vision_config": {
39
  "depth": 32,
 
40
  "fullatt_block_indexes": [
41
  7,
42
  15,
@@ -47,6 +117,7 @@
47
  "hidden_size": 1280,
48
  "in_channels": 3,
49
  "in_chans": 3,
 
50
  "intermediate_size": 3420,
51
  "model_type": "qwen2_5_vl",
52
  "num_heads": 16,
@@ -56,7 +127,6 @@
56
  "spatial_patch_size": 14,
57
  "temporal_patch_size": 2,
58
  "tokens_per_second": 2,
59
- "torch_dtype": "bfloat16",
60
  "window_size": 112
61
  },
62
  "vision_end_token_id": 151653,
 
3
  "Qwen2_5_VLForConditionalGeneration"
4
  ],
5
  "attention_dropout": 0.0,
6
+ "dtype": "bfloat16",
7
  "eos_token_id": 151645,
8
  "hidden_act": "silu",
9
  "hidden_size": 3584,
10
  "image_token_id": 151655,
11
  "initializer_range": 0.02,
12
  "intermediate_size": 18944,
13
+ "max_position_embeddings": 32768,
14
  "max_window_layers": 28,
15
  "model_type": "qwen2_5_vl",
16
  "num_attention_heads": 28,
 
29
  },
30
  "rope_theta": 1000000.0,
31
  "sliding_window": 32768,
32
+ "text_config": {
33
+ "architectures": [
34
+ "Qwen2_5_VLForConditionalGeneration"
35
+ ],
36
+ "attention_dropout": 0.0,
37
+ "bos_token_id": 151643,
38
+ "dtype": "bfloat16",
39
+ "eos_token_id": 151645,
40
+ "hidden_act": "silu",
41
+ "hidden_size": 3584,
42
+ "image_token_id": null,
43
+ "initializer_range": 0.02,
44
+ "intermediate_size": 18944,
45
+ "layer_types": [
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention",
72
+ "full_attention",
73
+ "full_attention"
74
+ ],
75
+ "max_position_embeddings": 128000,
76
+ "max_window_layers": 28,
77
+ "model_type": "qwen2_5_vl_text",
78
+ "num_attention_heads": 28,
79
+ "num_hidden_layers": 28,
80
+ "num_key_value_heads": 4,
81
+ "rms_norm_eps": 1e-06,
82
+ "rope_scaling": {
83
+ "mrope_section": [
84
+ 16,
85
+ 24,
86
+ 24
87
+ ],
88
+ "rope_type": "default",
89
+ "type": "default"
90
+ },
91
+ "rope_theta": 1000000.0,
92
+ "sliding_window": null,
93
+ "use_cache": true,
94
+ "use_sliding_window": false,
95
+ "video_token_id": null,
96
+ "vision_end_token_id": 151653,
97
+ "vision_start_token_id": 151652,
98
+ "vision_token_id": 151654,
99
+ "vocab_size": 152064
100
+ },
101
  "tie_word_embeddings": false,
102
+ "transformers_version": "4.56.1",
 
103
  "unsloth_fixed": true,
104
  "use_cache": true,
105
  "use_sliding_window": false,
106
  "video_token_id": 151656,
107
  "vision_config": {
108
  "depth": 32,
109
+ "dtype": "bfloat16",
110
  "fullatt_block_indexes": [
111
  7,
112
  15,
 
117
  "hidden_size": 1280,
118
  "in_channels": 3,
119
  "in_chans": 3,
120
+ "initializer_range": 0.02,
121
  "intermediate_size": 3420,
122
  "model_type": "qwen2_5_vl",
123
  "num_heads": 16,
 
127
  "spatial_patch_size": 14,
128
  "temporal_patch_size": 2,
129
  "tokens_per_second": 2,
 
130
  "window_size": 112
131
  },
132
  "vision_end_token_id": 151653,
generation_config.json CHANGED
@@ -5,9 +5,9 @@
5
  151645,
6
  151643
7
  ],
8
- "max_length": 128000,
9
  "pad_token_id": 151654,
10
  "repetition_penalty": 1.05,
11
  "temperature": 1e-06,
12
- "transformers_version": "4.51.3"
13
  }
 
5
  151645,
6
  151643
7
  ],
8
+ "max_length": 32768,
9
  "pad_token_id": 151654,
10
  "repetition_penalty": 1.05,
11
  "temperature": 1e-06,
12
+ "transformers_version": "4.56.1"
13
  }
model.safetensors.index.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "metadata": {
 
3
  "total_size": 16584333312
4
  },
5
  "weight_map": {
 
1
  {
2
  "metadata": {
3
+ "total_parameters": 8292166656,
4
  "total_size": 16584333312
5
  },
6
  "weight_map": {
preprocessor_config.json CHANGED
@@ -1,4 +1,10 @@
1
  {
 
 
 
 
 
 
2
  "do_convert_rgb": true,
3
  "do_normalize": true,
4
  "do_rescale": true,
@@ -8,12 +14,13 @@
8
  0.4578275,
9
  0.40821073
10
  ],
11
- "image_processor_type": "Qwen2VLImageProcessor",
12
  "image_std": [
13
  0.26862954,
14
  0.26130258,
15
  0.27577711
16
  ],
 
17
  "max_pixels": 12845056,
18
  "merge_size": 2,
19
  "min_pixels": 3136,
@@ -21,6 +28,7 @@
21
  "processor_class": "Qwen2_5_VLProcessor",
22
  "resample": 3,
23
  "rescale_factor": 0.00392156862745098,
 
24
  "size": {
25
  "longest_edge": 12845056,
26
  "shortest_edge": 3136
 
1
  {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
  "do_convert_rgb": true,
9
  "do_normalize": true,
10
  "do_rescale": true,
 
14
  0.4578275,
15
  0.40821073
16
  ],
17
+ "image_processor_type": "Qwen2VLImageProcessorFast",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
21
  0.27577711
22
  ],
23
+ "input_data_format": null,
24
  "max_pixels": 12845056,
25
  "merge_size": 2,
26
  "min_pixels": 3136,
 
28
  "processor_class": "Qwen2_5_VLProcessor",
29
  "resample": 3,
30
  "rescale_factor": 0.00392156862745098,
31
+ "return_tensors": null,
32
  "size": {
33
  "longest_edge": 12845056,
34
  "shortest_edge": 3136
tokenizer_config.json CHANGED
@@ -195,16 +195,16 @@
195
  "<|video_pad|>"
196
  ],
197
  "bos_token": null,
198
- "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
199
  "clean_up_tokenization_spaces": false,
200
  "eos_token": "<|im_end|>",
201
  "errors": "replace",
202
  "extra_special_tokens": {},
203
- "model_max_length": 128000,
204
  "pad_token": "<|vision_pad|>",
205
  "padding_side": "left",
206
  "processor_class": "Qwen2_5_VLProcessor",
207
  "split_special_tokens": false,
208
  "tokenizer_class": "Qwen2Tokenizer",
209
- "unk_token": null
 
210
  }
 
195
  "<|video_pad|>"
196
  ],
197
  "bos_token": null,
 
198
  "clean_up_tokenization_spaces": false,
199
  "eos_token": "<|im_end|>",
200
  "errors": "replace",
201
  "extra_special_tokens": {},
202
+ "model_max_length": 32768,
203
  "pad_token": "<|vision_pad|>",
204
  "padding_side": "left",
205
  "processor_class": "Qwen2_5_VLProcessor",
206
  "split_special_tokens": false,
207
  "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null,
209
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
210
  }
video_preprocessor_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_pad": null,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "do_sample_frames": false,
13
+ "fps": null,
14
+ "image_mean": [
15
+ 0.48145466,
16
+ 0.4578275,
17
+ 0.40821073
18
+ ],
19
+ "image_std": [
20
+ 0.26862954,
21
+ 0.26130258,
22
+ 0.27577711
23
+ ],
24
+ "input_data_format": null,
25
+ "max_frames": 768,
26
+ "max_pixels": 12845056,
27
+ "merge_size": 2,
28
+ "min_frames": 4,
29
+ "min_pixels": 3136,
30
+ "num_frames": null,
31
+ "patch_size": 14,
32
+ "processor_class": "Qwen2_5_VLProcessor",
33
+ "resample": 3,
34
+ "rescale_factor": 0.00392156862745098,
35
+ "return_metadata": false,
36
+ "size": {
37
+ "longest_edge": 12845056,
38
+ "shortest_edge": 3136
39
+ },
40
+ "size_divisor": null,
41
+ "temporal_patch_size": 2,
42
+ "video_metadata": null,
43
+ "video_processor_type": "Qwen2VLVideoProcessor"
44
+ }