Text Generation
Safetensors
Chinese
Traditional Chinese Medicin
Multimodal LLM
multimodal
Image-text-to-text
Audio-text-to-text
jymcc commited on
Commit
4de5eb8
·
1 Parent(s): 209e9c5
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.json filter=lfs diff=lfs merge=lfs -text
37
+ *.memmap filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd34727d2dc16398d5e80b51b25f2abb384842830403015ffa89dc10d22a1c79
3
+ size 703
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99c3fa3dee40344dab38471516190d98121f5b93eccddf277d3b40a4745b2a81
3
+ size 1141
config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f1efe4a675a3640b3b9bff6c8e3c0c9ff9633660a3a72aa86b9c3fcb8976086
3
+ size 2216
generation_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ceb44fb26104938913e55b42bef6d5500996917625e37056d9f1baf823b70f5
3
+ size 244
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4335e30cc76cf7c214dc98c97072ed1123f280e3b8d3046b18ce6788aacff4e
3
+ size 4988859560
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2bec48cbe4ef14298eafb93f6989920b90657c8fb49b5ad441231a3e97deb43
3
+ size 4991495784
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091008a2b1a7242e9f58da39f061fd32de0d5aafafddbf20e5de6083b422f90a
3
+ size 4991495888
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4fdedda34759e07319be544f2ca73874cec389dd6b3aa750f04957f58a5a007
3
+ size 2895739720
model.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf0b930b9f89e618ec63a7168dcfa55302058af1e8b1460c14d85131c2f46d3b
3
+ size 99107
modeling_shizhen.py ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b46dda45ad5de63f60f2b9efee7482b319b9453fe32d4f5b168095c8eb609f51
3
+ size 993
processing_qwen2_5_vl_audio.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_qwen2_5_vl.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
11
+ # and OPT implementations in this library. It has been modified from its
12
+ # original forms to accommodate minor architectural differences compared
13
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
14
+ #
15
+ # Licensed under the Apache License, Version 2.0 (the "License");
16
+ # you may not use this file except in compliance with the License.
17
+ # You may obtain a copy of the License at
18
+ #
19
+ # http://www.apache.org/licenses/LICENSE-2.0
20
+ #
21
+ # Unless required by applicable law or agreed to in writing, software
22
+ # distributed under the License is distributed on an "AS IS" BASIS,
23
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24
+ # See the License for the specific language governing permissions and
25
+ # limitations under the License.
26
+ from typing import List, Union, Optional
27
+
28
+ from transformers.feature_extraction_utils import BatchFeature
29
+ from transformers.image_utils import ImageInput, VideoInput
30
+ from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs
31
+ from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
32
+ import numpy as np
33
+
34
+
35
+ class Qwen2_5_VLVideosProcessorKwargs(VideosKwargs, total=False):
36
+ fps: Union[List[float], float]
37
+
38
+
39
+ class Qwen2_5_VLProcessorKwargs(ProcessingKwargs, total=False):
40
+ videos_kwargs: Qwen2_5_VLVideosProcessorKwargs
41
+ _defaults = {
42
+ "text_kwargs": {
43
+ "padding": False,
44
+ },
45
+ "videos_kwargs": {"fps": 2.0},
46
+ }
47
+
48
+
49
+ class Qwen2_5_VL_Audio_Processor(ProcessorMixin):
50
+ r"""
51
+ Constructs a Qwen2.5-VL processor which wraps a Qwen2.5-VL image processor and a Qwen2 tokenizer into a single processor.
52
+ [`Qwen2_5_VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
53
+ [`~Qwen2_5_VLProcessor.__call__`] and [`~Qwen2_5_VLProcessor.decode`] for more information.
54
+ Args:
55
+ image_processor ([`Qwen2VLImageProcessor`], *optional*):
56
+ The image processor is a required input.
57
+ tokenizer ([`Qwen2TokenizerFast`], *optional*):
58
+ The tokenizer is a required input.
59
+ chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
60
+ in a chat into a tokenizable string.
61
+ """
62
+
63
+ attributes = ["image_processor", "tokenizer","feature_extractor"]
64
+ valid_kwargs = ["chat_template"]
65
+ feature_extractor_class = "WhisperFeatureExtractor"
66
+
67
+ image_processor_class = "AutoImageProcessor"
68
+ tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
69
+
70
+ def __init__(self, image_processor=None, feature_extractor=None, tokenizer=None, chat_template=None, **kwargs):
71
+ # def __init__(self, image_processor=None, tokenizer=None, **kwargs):
72
+ # print(kwargs['chat_template'])
73
+ self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
74
+ self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
75
+ self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else "<|AUDIO|>"
76
+ self.audio_bos_token = tokenizer.audio_bos_token if hasattr(tokenizer, "audio_bos_token") else "<|audio_bos|>"
77
+ self.audio_eos_token = tokenizer.audio_eos_token if hasattr(tokenizer, "audio_eos_token") else "<|audio_eos|>"
78
+ super().__init__(image_processor, feature_extractor, tokenizer, chat_template=chat_template)
79
+
80
+ def __call__(
81
+ self,
82
+ images: ImageInput = None,
83
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
84
+ videos: VideoInput = None,
85
+ audios: Union[np.ndarray, List[np.ndarray]] = None,
86
+ sampling_rate: Optional[int] = None,
87
+ **kwargs: Unpack[Qwen2_5_VLProcessorKwargs],
88
+ ) -> BatchFeature:
89
+ """
90
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
91
+ and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
92
+ the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
93
+ Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
94
+
95
+ Args:
96
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
97
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
98
+ tensor. Both channels-first and channels-last formats are supported.
99
+ text (`str`, `List[str]`, `List[List[str]]`):
100
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
101
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
102
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
103
+ videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
104
+ The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
105
+ tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
106
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
107
+ If set, will return tensors of a particular framework. Acceptable values are:
108
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
109
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
110
+ - `'np'`: Return NumPy `np.ndarray` objects.
111
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
112
+
113
+ Returns:
114
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
115
+
116
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
117
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
118
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
119
+ `None`).
120
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
121
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
122
+ - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
123
+ - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
124
+ - **second_per_grid_ts** -- List of video seconds per time grid. Returned when `videos` is not `None`.
125
+ """
126
+ output_kwargs = self._merge_kwargs(
127
+ Qwen2_5_VLProcessorKwargs,
128
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
129
+ **kwargs,
130
+ )
131
+ if images is not None:
132
+ image_inputs = self.image_processor(images=images, videos=None, **output_kwargs["images_kwargs"])
133
+ image_grid_thw = image_inputs["image_grid_thw"]
134
+ else:
135
+ image_inputs = {}
136
+ image_grid_thw = None
137
+
138
+ if videos is not None:
139
+ videos_inputs = self.image_processor(images=None, videos=videos, **output_kwargs["images_kwargs"])
140
+ video_grid_thw = videos_inputs["video_grid_thw"]
141
+
142
+ fps = output_kwargs["videos_kwargs"].pop("fps", 2.0)
143
+ if isinstance(fps, (int, float)):
144
+ second_per_grid_ts = [self.image_processor.temporal_patch_size / fps] * len(video_grid_thw)
145
+ elif hasattr(fps, "__len__") and len(fps) == len(video_grid_thw):
146
+ second_per_grid_ts = [self.image_processor.temporal_patch_size / tmp for tmp in fps]
147
+ else:
148
+ raise ValueError(
149
+ f"The length of fps ({len(fps) if hasattr(fps, '__len__') else fps}) must be equal to the length of video_grid_thw ({len(video_grid_thw)}) or fps should be a single number."
150
+ )
151
+ videos_inputs.update({"second_per_grid_ts": second_per_grid_ts})
152
+
153
+ else:
154
+ videos_inputs = {}
155
+ video_grid_thw = None
156
+
157
+ if audios is not None:
158
+ new_kwargs = {k: kwargs[k] for k in kwargs if k not in ['padding', 'truncation','max_length']}
159
+
160
+ audio_inputs = self.feature_extractor(
161
+ audios, sampling_rate=sampling_rate, return_attention_mask=True, padding="max_length", **new_kwargs
162
+ )
163
+
164
+ audio_inputs["feature_attention_mask"] = audio_inputs.pop(
165
+ "attention_mask"
166
+ ) # rename attention_mask to prevent conflicts later on
167
+
168
+ expanded_text = []
169
+ audio_lengths = audio_inputs["feature_attention_mask"].sum(-1).tolist()
170
+
171
+ for sample in text:
172
+ replace_str = []
173
+ while self.audio_token in sample:
174
+ audio_length = audio_lengths.pop(0)
175
+ input_length = (audio_length - 1) // 2 + 1
176
+ num_audio_tokens = (input_length - 2) // 2 + 1
177
+
178
+ expanded_audio_token = self.audio_token * num_audio_tokens
179
+
180
+ audio_token_start_idx = sample.find(self.audio_token)
181
+ audio_token_end_idx = audio_token_start_idx + len(self.audio_token)
182
+
183
+ has_bos = (
184
+ sample[audio_token_start_idx - len(self.audio_bos_token) : audio_token_start_idx]
185
+ == self.audio_bos_token
186
+ )
187
+ has_eos = (
188
+ sample[audio_token_end_idx : audio_token_end_idx + len(self.audio_eos_token)]
189
+ == self.audio_eos_token
190
+ )
191
+
192
+ # Check if this audio token is surrounded by bos/eos tokens
193
+ if not has_bos and not has_eos:
194
+ expanded_audio_token = self.audio_bos_token + expanded_audio_token + self.audio_eos_token
195
+
196
+ replace_str.append(expanded_audio_token)
197
+ sample = sample.replace(self.audio_token, "<placeholder>", 1)
198
+
199
+ while "<placeholder>" in sample:
200
+ sample = sample.replace("<placeholder>", replace_str.pop(0), 1)
201
+ expanded_text.append(sample)
202
+ text = expanded_text
203
+ else:
204
+ audio_inputs = {}
205
+
206
+ if not isinstance(text, list):
207
+ text = [text]
208
+
209
+ if image_grid_thw is not None:
210
+ merge_length = self.image_processor.merge_size**2
211
+ index = 0
212
+ for i in range(len(text)):
213
+ while self.image_token in text[i]:
214
+ text[i] = text[i].replace(
215
+ self.image_token,
216
+ "<|placeholder|>" * (image_grid_thw[index].prod() // merge_length),
217
+ 1,
218
+ )
219
+ index += 1
220
+ text[i] = text[i].replace("<|placeholder|>", self.image_token)
221
+
222
+ if video_grid_thw is not None:
223
+ merge_length = self.image_processor.merge_size**2
224
+ index = 0
225
+ for i in range(len(text)):
226
+ while self.video_token in text[i]:
227
+ text[i] = text[i].replace(
228
+ self.video_token,
229
+ "<|placeholder|>" * (video_grid_thw[index].prod() // merge_length),
230
+ 1,
231
+ )
232
+ index += 1
233
+ text[i] = text[i].replace("<|placeholder|>", self.video_token)
234
+
235
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
236
+
237
+ return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs, **audio_inputs})
238
+
239
+ def batch_decode(self, *args, **kwargs):
240
+ """
241
+ This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
242
+ refer to the docstring of this method for more information.
243
+ """
244
+ return self.tokenizer.batch_decode(*args, **kwargs)
245
+
246
+ def decode(self, *args, **kwargs):
247
+ """
248
+ This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
249
+ the docstring of this method for more information.
250
+ """
251
+ return self.tokenizer.decode(*args, **kwargs)
252
+
253
+ def post_process_image_text_to_text(
254
+ self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
255
+ ):
256
+ """
257
+ Post-process the output of the model to decode the text.
258
+
259
+ Args:
260
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
261
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
262
+ or `(sequence_length,)`.
263
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
264
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
265
+ Clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
266
+ Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
267
+ **kwargs:
268
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
269
+
270
+ Returns:
271
+ `List[str]`: The decoded text.
272
+ """
273
+ return self.tokenizer.batch_decode(
274
+ generated_outputs,
275
+ skip_special_tokens=skip_special_tokens,
276
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
277
+ **kwargs,
278
+ )
279
+
280
+ @property
281
+ def model_input_names(self):
282
+ tokenizer_input_names = self.tokenizer.model_input_names
283
+ image_processor_input_names = self.image_processor.model_input_names
284
+ names_from_processor = list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
285
+ return names_from_processor + ["second_per_grid_ts"]
286
+
287
+
288
+ __all__ = ["Qwen2_5_VL_Audio_Processor"]
special_tokens_map.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab4d6f5d1c63ae9615e404ded95ac9b15a78d63127d21cc4427fd9278e1579c8
3
+ size 403
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74c0815d2ea2c2261d98e3230592b44775b545900b8759a946b8dcb5462ec8bf
3
+ size 11422646
tokenizer_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b469c3b56bbd4d01f06fbeebc13dc154546577525b44dba7b3bf1d70989c3fd3
3
+ size 7991
vocab.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca10d7e9fb3ed18575dd1e277a2579c16d108e32f27439684afa0e10b1440910
3
+ size 2776833