kevinwang676 commited on
Commit
339308f
·
verified ·
1 Parent(s): fd82c69

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.a10.plan +3 -0
  3. CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.l20.plan +3 -0
  4. CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.v100.plan +3 -0
  5. CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp32.onnx +3 -0
  6. CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.encoder.fp32.zip +3 -0
  7. CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.pt +3 -0
  8. CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/speech_tokenizer_v2.onnx +3 -0
  9. third_party/1new_zero_0_0.wav +3 -0
  10. third_party/cosyvoice/cli/__init__.py +0 -0
  11. third_party/cosyvoice/cli/__pycache__/__init__.cpython-311.pyc +0 -0
  12. third_party/cosyvoice/cli/__pycache__/cosyvoice.cpython-311.pyc +0 -0
  13. third_party/cosyvoice/cli/__pycache__/frontend.cpython-311.pyc +0 -0
  14. third_party/cosyvoice/cli/__pycache__/model.cpython-311.pyc +0 -0
  15. third_party/cosyvoice/cli/cosyvoice.py +188 -0
  16. third_party/cosyvoice/cli/frontend.py +218 -0
  17. third_party/cosyvoice/cli/model.py +412 -0
  18. third_party/cosyvoice/llm/__pycache__/llm.cpython-311.pyc +0 -0
  19. third_party/cosyvoice/llm/llm.py +434 -0
  20. third_party/cosyvoice/tokenizer/tokenizer.py +279 -0
  21. third_party/cosyvoice/transformer/__pycache__/__init__.cpython-311.pyc +0 -0
  22. third_party/cosyvoice/transformer/__pycache__/activation.cpython-311.pyc +0 -0
  23. third_party/cosyvoice/transformer/__pycache__/attention.cpython-311.pyc +0 -0
  24. third_party/cosyvoice/transformer/__pycache__/convolution.cpython-311.pyc +0 -0
  25. third_party/cosyvoice/transformer/__pycache__/embedding.cpython-311.pyc +0 -0
  26. third_party/cosyvoice/transformer/__pycache__/encoder_layer.cpython-311.pyc +0 -0
  27. third_party/cosyvoice/transformer/__pycache__/label_smoothing_loss.cpython-311.pyc +0 -0
  28. third_party/cosyvoice/transformer/__pycache__/positionwise_feed_forward.cpython-311.pyc +0 -0
  29. third_party/cosyvoice/transformer/__pycache__/subsampling.cpython-311.pyc +0 -0
  30. third_party/cosyvoice/transformer/__pycache__/upsample_encoder.cpython-311.pyc +0 -0
  31. third_party/cosyvoice/transformer/attention.py +330 -0
  32. third_party/cosyvoice/transformer/decoder.py +396 -0
  33. third_party/cosyvoice/transformer/decoder_layer.py +132 -0
  34. third_party/cosyvoice/transformer/embedding.py +294 -0
  35. third_party/cosyvoice/transformer/encoder.py +474 -0
  36. third_party/cosyvoice/utils/__init__.py +0 -0
  37. third_party/cosyvoice/utils/__pycache__/__init__.cpython-311.pyc +0 -0
  38. third_party/cosyvoice/utils/__pycache__/class_utils.cpython-311.pyc +0 -0
  39. third_party/cosyvoice/utils/__pycache__/common.cpython-311.pyc +0 -0
  40. third_party/cosyvoice/utils/__pycache__/file_utils.cpython-311.pyc +0 -0
  41. third_party/cosyvoice/utils/__pycache__/frontend_utils.cpython-311.pyc +0 -0
  42. third_party/cosyvoice/utils/__pycache__/mask.cpython-311.pyc +0 -0
  43. third_party/cosyvoice/utils/class_utils.py +83 -0
  44. third_party/cosyvoice/utils/common.py +166 -0
  45. third_party/cosyvoice/utils/executor.py +172 -0
  46. third_party/cosyvoice/utils/file_utils.py +89 -0
  47. third_party/cosyvoice/utils/frontend_utils.py +136 -0
  48. third_party/cosyvoice/utils/losses.py +20 -0
  49. third_party/cosyvoice/utils/mask.py +267 -0
  50. third_party/cosyvoice/utils/scheduler.py +738 -0
.gitattributes CHANGED
@@ -44,3 +44,11 @@ zero_shot_0.wav filter=lfs diff=lfs merge=lfs -text
44
  zero_1_0.wav filter=lfs diff=lfs merge=lfs -text
45
  zero_3_0.wav filter=lfs diff=lfs merge=lfs -text
46
  zero_0_0.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
44
  zero_1_0.wav filter=lfs diff=lfs merge=lfs -text
45
  zero_3_0.wav filter=lfs diff=lfs merge=lfs -text
46
  zero_0_0.wav filter=lfs diff=lfs merge=lfs -text
47
+ third_party/new_zero_0_0.wav filter=lfs diff=lfs merge=lfs -text
48
+ third_party/new_zero_3_0.wav filter=lfs diff=lfs merge=lfs -text
49
+ third_party/new_zero_2_0.wav filter=lfs diff=lfs merge=lfs -text
50
+ third_party/1new_zero_0_0.wav filter=lfs diff=lfs merge=lfs -text
51
+ CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.v100.plan filter=lfs diff=lfs merge=lfs -text
52
+ CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.l20.plan filter=lfs diff=lfs merge=lfs -text
53
+ third_party/new_zero_1_0.wav filter=lfs diff=lfs merge=lfs -text
54
+ CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.a10.plan filter=lfs diff=lfs merge=lfs -text
CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.a10.plan ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f6b9073bd9e7b8ac5bef0a21431391cbc32376b9265ec73935d6f28a0d32d01
3
+ size 168597292
CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.l20.plan ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:012df9e730e36e1cb61bf2780378c15ae92c536ae87518b7a54a90026cb99385
3
+ size 166520788
CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp16.v100.plan ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f231edf01451fafbc3dc0498a51feb3a264afad43275536c8151fff954ef3c56
3
+ size 161799540
CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.decoder.estimator.fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51aed3efa2c153898ea53a780893c920e968dab1d7aec25402bd6c9815d94702
3
+ size 286521895
CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.encoder.fp32.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32ac668741e1358123d3c066cfd1f68a81bd386028755be9831509e304bfd98c
3
+ size 192365750
CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/flow.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff4c2f867674411e0a08cee702996df13fa67c1cd864c06108da88d16d088541
3
+ size 450575567
CosyVoice2-0.5B-RWKV-7-1.5B-Instruct-CHENJPKO/speech_tokenizer_v2.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d43342aa12163a80bf07bffb94c9de2e120a8df2f9917cd2f642e7f4219c6f71
3
+ size 496082973
third_party/1new_zero_0_0.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29edea6d2bc42701b6905a23fd607129fbe5a377368d90fdce1b3a5a98ecff60
3
+ size 871758
third_party/cosyvoice/cli/__init__.py ADDED
File without changes
third_party/cosyvoice/cli/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (163 Bytes). View file
 
third_party/cosyvoice/cli/__pycache__/cosyvoice.cpython-311.pyc ADDED
Binary file (16.7 kB). View file
 
third_party/cosyvoice/cli/__pycache__/frontend.cpython-311.pyc ADDED
Binary file (17 kB). View file
 
third_party/cosyvoice/cli/__pycache__/model.cpython-311.pyc ADDED
Binary file (30.3 kB). View file
 
third_party/cosyvoice/cli/cosyvoice.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ import time
16
+ from typing import Generator
17
+ from tqdm import tqdm
18
+ from hyperpyyaml import load_hyperpyyaml
19
+ from modelscope import snapshot_download
20
+ import torch
21
+ from cosyvoice.cli.frontend import CosyVoiceFrontEnd
22
+ from cosyvoice.cli.model import CosyVoiceModel, CosyVoice2Model
23
+ from cosyvoice.utils.file_utils import logging
24
+ from cosyvoice.utils.class_utils import get_model_type
25
+
26
+
27
+ class CosyVoice:
28
+
29
+ def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False):
30
+ self.instruct = True if '-Instruct' in model_dir else False
31
+ self.model_dir = model_dir
32
+ self.fp16 = fp16
33
+ if not os.path.exists(model_dir):
34
+ model_dir = snapshot_download(model_dir)
35
+ with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f:
36
+ configs = load_hyperpyyaml(f)
37
+ assert get_model_type(configs) != CosyVoice2Model, 'do not use {} for CosyVoice initialization!'.format(model_dir)
38
+ self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
39
+ configs['feat_extractor'],
40
+ '{}/campplus.onnx'.format(model_dir),
41
+ '{}/speech_tokenizer_v1.onnx'.format(model_dir),
42
+ '{}/spk2info.pt'.format(model_dir),
43
+ configs['allowed_special'])
44
+ self.sample_rate = configs['sample_rate']
45
+ if torch.cuda.is_available() is False and (load_jit is True or load_trt is True or fp16 is True):
46
+ load_jit, load_trt, fp16 = False, False, False
47
+ logging.warning('no cuda device, set load_jit/load_trt/fp16 to False')
48
+ self.model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'], fp16)
49
+ self.model.load('{}/llm.pt'.format(model_dir),
50
+ '{}/flow.pt'.format(model_dir),
51
+ '{}/hift.pt'.format(model_dir))
52
+ if load_jit:
53
+ self.model.load_jit('{}/llm.text_encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
54
+ '{}/llm.llm.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
55
+ '{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
56
+ if load_trt:
57
+ self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
58
+ '{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
59
+ self.fp16)
60
+ del configs
61
+
62
+ def list_available_spks(self):
63
+ spks = list(self.frontend.spk2info.keys())
64
+ return spks
65
+
66
+ def inference_sft(self, tts_text, spk_id, stream=False, speed=1.0, text_frontend=True):
67
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
68
+ model_input = self.frontend.frontend_sft(i, spk_id)
69
+ start_time = time.time()
70
+ logging.info('synthesis text {}'.format(i))
71
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
72
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
73
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
74
+ yield model_output
75
+ start_time = time.time()
76
+
77
+ def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, stream=False, speed=1.0, text_frontend=True):
78
+ if prompt_text is not None:
79
+ prompt_text = self.frontend.text_normalize(prompt_text, split=False, text_frontend=text_frontend)
80
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
81
+ if (not isinstance(i, Generator)) and len(i) < 0.5 * len(prompt_text):
82
+ logging.warning('synthesis text {} too short than prompt text {}, this may lead to bad performance'.format(i, prompt_text))
83
+ model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k, self.sample_rate)
84
+ start_time = time.time()
85
+ logging.info('synthesis text {}'.format(i))
86
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
87
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
88
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
89
+ yield model_output
90
+ start_time = time.time()
91
+ else:
92
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
93
+ if (not isinstance(i, Generator)) and len(i) < 0.5 * len(prompt_text):
94
+ logging.warning('synthesis text {} too short than prompt text {}, this may lead to bad performance'.format(i, prompt_text))
95
+ model_input = self.frontend.frontend_tts(i)
96
+ start_time = time.time()
97
+ logging.info('synthesis text {}'.format(i))
98
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
99
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
100
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
101
+ yield model_output
102
+ start_time = time.time()
103
+
104
+ def inference_cross_lingual(self, tts_text, prompt_speech_16k, stream=False, speed=1.0, text_frontend=True):
105
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
106
+ model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k, self.sample_rate)
107
+ start_time = time.time()
108
+ logging.info('synthesis text {}'.format(i))
109
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
110
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
111
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
112
+ yield model_output
113
+ start_time = time.time()
114
+
115
+ def inference_instruct(self, tts_text, spk_id, instruct_text, stream=False, speed=1.0, text_frontend=True):
116
+ assert isinstance(self.model, CosyVoiceModel), 'inference_instruct is only implemented for CosyVoice!'
117
+ if self.instruct is False:
118
+ raise ValueError('{} do not support instruct inference'.format(self.model_dir))
119
+ instruct_text = self.frontend.text_normalize(instruct_text, split=False, text_frontend=text_frontend)
120
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
121
+ model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
122
+ start_time = time.time()
123
+ logging.info('synthesis text {}'.format(i))
124
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
125
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
126
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
127
+ yield model_output
128
+ start_time = time.time()
129
+
130
+ def inference_vc(self, source_speech_16k, prompt_speech_16k, stream=False, speed=1.0):
131
+ model_input = self.frontend.frontend_vc(source_speech_16k, prompt_speech_16k, self.sample_rate)
132
+ start_time = time.time()
133
+ for model_output in self.model.vc(**model_input, stream=stream, speed=speed):
134
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
135
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
136
+ yield model_output
137
+ start_time = time.time()
138
+
139
+
140
+ class CosyVoice2(CosyVoice):
141
+
142
+ def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False,device:str='cuda:0'):
143
+ self.instruct = True if '-Instruct' in model_dir else False
144
+ self.model_dir = model_dir
145
+ self.fp16 = fp16
146
+ self.device = device
147
+ if not os.path.exists(model_dir):
148
+ model_dir = snapshot_download(model_dir)
149
+ with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f:
150
+ configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': os.path.join(model_dir, 'CosyVoice-BlankEN')})
151
+ # assert get_model_type(configs) == CosyVoice2Model, 'do not use {} for CosyVoice2 initialization!'.format(model_dir)
152
+ self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
153
+ configs['feat_extractor'],
154
+ '{}/campplus.onnx'.format(model_dir),
155
+ '{}/speech_tokenizer_v2.onnx'.format(model_dir),
156
+ '{}/spk2info.pt'.format(model_dir),
157
+ configs['allowed_special'],
158
+ device)
159
+ self.sample_rate = configs['sample_rate']
160
+ if torch.cuda.is_available() is False and (load_jit is True or load_trt is True or fp16 is True):
161
+ load_jit, load_trt, fp16 = False, False, False
162
+ logging.warning('no cuda device, set load_jit/load_trt/fp16 to False')
163
+ self.model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift'], fp16,device)
164
+ self.model.load('{}/llm.pt'.format(model_dir),
165
+ '{}/flow.pt'.format(model_dir),
166
+ '{}/hift.pt'.format(model_dir))
167
+ if load_jit:
168
+ self.model.load_jit('{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
169
+ if load_trt:
170
+ self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
171
+ '{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
172
+ self.fp16)
173
+ del configs
174
+
175
+ def inference_instruct(self, *args, **kwargs):
176
+ raise NotImplementedError('inference_instruct is not implemented for CosyVoice2!')
177
+
178
+ def inference_instruct2(self, tts_text, instruct_text, prompt_speech_16k, stream=False, speed=1.0, text_frontend=True):
179
+ assert isinstance(self.model, CosyVoice2Model), 'inference_instruct2 is only implemented for CosyVoice2!'
180
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
181
+ model_input = self.frontend.frontend_instruct2(i, instruct_text, prompt_speech_16k, self.sample_rate)
182
+ start_time = time.time()
183
+ logging.info('synthesis text {}'.format(i))
184
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
185
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
186
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
187
+ yield model_output
188
+ start_time = time.time()
third_party/cosyvoice/cli/frontend.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from functools import partial
15
+ from typing import Generator
16
+ import json
17
+ import onnxruntime
18
+ import torch
19
+ import numpy as np
20
+ import whisper
21
+ from typing import Callable
22
+ import torchaudio.compliance.kaldi as kaldi
23
+ import torchaudio
24
+ import os
25
+ import re
26
+ import inflect
27
+ try:
28
+ import ttsfrd
29
+ use_ttsfrd = True
30
+ except ImportError:
31
+ print("failed to import ttsfrd, use WeTextProcessing instead")
32
+ from tn.chinese.normalizer import Normalizer as ZhNormalizer
33
+ from tn.english.normalizer import Normalizer as EnNormalizer
34
+ use_ttsfrd = False
35
+ from cosyvoice.utils.file_utils import logging
36
+ from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph, is_only_punctuation
37
+
38
+
39
+ class CosyVoiceFrontEnd:
40
+
41
+ def __init__(self,
42
+ get_tokenizer: Callable,
43
+ feat_extractor: Callable,
44
+ campplus_model: str,
45
+ speech_tokenizer_model: str,
46
+ spk2info: str = '',
47
+ allowed_special: str = 'all',
48
+ device: str = None):
49
+ self.tokenizer = get_tokenizer()
50
+ self.feat_extractor = feat_extractor
51
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else torch.device(device)
52
+ option = onnxruntime.SessionOptions()
53
+ option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
54
+ option.intra_op_num_threads = 1
55
+ self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
56
+ cuda_idx = int(device.split(':')[-1] if device is not None and 'cuda' in device else '0')
57
+
58
+ self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option,
59
+ providers=[("CUDAExecutionProvider", {"device_id": cuda_idx}) if torch.cuda.is_available() else
60
+ "CPUExecutionProvider"])
61
+ if os.path.exists(spk2info):
62
+ self.spk2info = torch.load(spk2info, map_location=self.device)
63
+ else:
64
+ self.spk2info = {}
65
+ self.allowed_special = allowed_special
66
+ self.use_ttsfrd = use_ttsfrd
67
+ if self.use_ttsfrd:
68
+ self.frd = ttsfrd.TtsFrontendEngine()
69
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
70
+ assert self.frd.initialize('{}/../../pretrained_models/CosyVoice-ttsfrd/resource'.format(ROOT_DIR)) is True, \
71
+ 'failed to initialize ttsfrd resource'
72
+ self.frd.set_lang_type('pinyinvg')
73
+ else:
74
+ self.zh_tn_model = ZhNormalizer(remove_erhua=False, full_to_half=False, overwrite_cache=True, remove_interjections=False)
75
+ self.en_tn_model = EnNormalizer()
76
+ self.inflect_parser = inflect.engine()
77
+
78
+ def _extract_text_token(self, text):
79
+ if isinstance(text, Generator):
80
+ logging.info('get tts_text generator, will return _extract_text_token_generator!')
81
+ # NOTE add a dummy text_token_len for compatibility
82
+ return self._extract_text_token_generator(text), torch.tensor([0], dtype=torch.int32).to(self.device)
83
+ else:
84
+ text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
85
+ text_token = torch.tensor([text_token], dtype=torch.int32).to(self.device)
86
+ text_token_len = torch.tensor([text_token.shape[1]], dtype=torch.int32).to(self.device)
87
+ return text_token, text_token_len
88
+
89
+ def _extract_text_token_generator(self, text_generator):
90
+ for text in text_generator:
91
+ text_token, _ = self._extract_text_token(text)
92
+ for i in range(text_token.shape[1]):
93
+ yield text_token[:, i: i + 1]
94
+
95
+ def _extract_speech_token(self, speech):
96
+ assert speech.shape[1] / 16000 <= 30, 'do not support extract speech token for audio longer than 30s'
97
+ feat = whisper.log_mel_spectrogram(speech, n_mels=128)
98
+ speech_token = self.speech_tokenizer_session.run(None,
99
+ {self.speech_tokenizer_session.get_inputs()[0].name:
100
+ feat.detach().cpu().numpy(),
101
+ self.speech_tokenizer_session.get_inputs()[1].name:
102
+ np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()
103
+ speech_token = torch.tensor([speech_token], dtype=torch.int32).to(self.device)
104
+ speech_token_len = torch.tensor([speech_token.shape[1]], dtype=torch.int32).to(self.device)
105
+ return speech_token, speech_token_len
106
+
107
+ def _extract_spk_embedding(self, speech):
108
+ feat = kaldi.fbank(speech,
109
+ num_mel_bins=80,
110
+ dither=0,
111
+ sample_frequency=16000)
112
+ feat = feat - feat.mean(dim=0, keepdim=True)
113
+ embedding = self.campplus_session.run(None,
114
+ {self.campplus_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
115
+ embedding = torch.tensor([embedding]).to(self.device)
116
+ return embedding
117
+
118
+ def _extract_speech_feat(self, speech):
119
+ speech_feat = self.feat_extractor(speech).squeeze(dim=0).transpose(0, 1).to(self.device)
120
+ speech_feat = speech_feat.unsqueeze(dim=0)
121
+ speech_feat_len = torch.tensor([speech_feat.shape[1]], dtype=torch.int32).to(self.device)
122
+ return speech_feat, speech_feat_len
123
+
124
+ def text_normalize(self, text, split=True, text_frontend=True):
125
+ if isinstance(text, Generator):
126
+ logging.info('get tts_text generator, will skip text_normalize!')
127
+ return [text]
128
+ if text_frontend is False:
129
+ return [text] if split is True else text
130
+ text = text.strip()
131
+ if self.use_ttsfrd:
132
+ texts = [i["text"] for i in json.loads(self.frd.do_voicegen_frd(text))["sentences"]]
133
+ text = ''.join(texts)
134
+ else:
135
+ if contains_chinese(text):
136
+ text = self.zh_tn_model.normalize(text)
137
+ text = text.replace("\n", "")
138
+ text = replace_blank(text)
139
+ text = replace_corner_mark(text)
140
+ text = text.replace(".", "。")
141
+ text = text.replace(" - ", ",")
142
+ text = remove_bracket(text)
143
+ text = re.sub(r'[,,、]+$', '。', text)
144
+ texts = list(split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "zh", token_max_n=80,
145
+ token_min_n=60, merge_len=20, comma_split=False))
146
+ else:
147
+ text = self.en_tn_model.normalize(text)
148
+ text = spell_out_number(text, self.inflect_parser)
149
+ texts = list(split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
150
+ token_min_n=60, merge_len=20, comma_split=False))
151
+ texts = [i for i in texts if not is_only_punctuation(i)]
152
+ return texts if split is True else text
153
+
154
+ def frontend_sft(self, tts_text, spk_id):
155
+ tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
156
+ embedding = self.spk2info[spk_id]['embedding']
157
+ model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, 'llm_embedding': embedding, 'flow_embedding': embedding}
158
+ return model_input
159
+
160
+ def frontend_tts(self, tts_text):
161
+ tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
162
+ model_input = {'text': tts_text_token, 'text_len': tts_text_token_len}
163
+ return model_input
164
+ def frontend_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, resample_rate):
165
+ tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
166
+ prompt_text_token, prompt_text_token_len = self._extract_text_token(prompt_text)
167
+ prompt_speech_resample = torchaudio.transforms.Resample(orig_freq=16000, new_freq=resample_rate)(prompt_speech_16k)
168
+ speech_feat, speech_feat_len = self._extract_speech_feat(prompt_speech_resample)
169
+ speech_token, speech_token_len = self._extract_speech_token(prompt_speech_16k)
170
+ if resample_rate == 24000:
171
+ # cosyvoice2, force speech_feat % speech_token = 2
172
+ token_len = min(int(speech_feat.shape[1] / 2), speech_token.shape[1])
173
+ speech_feat, speech_feat_len[:] = speech_feat[:, :2 * token_len], 2 * token_len
174
+ speech_token, speech_token_len[:] = speech_token[:, :token_len], token_len
175
+ embedding = self._extract_spk_embedding(prompt_speech_16k)
176
+ model_input = {'text': tts_text_token, 'text_len': tts_text_token_len,
177
+ 'prompt_text': prompt_text_token, 'prompt_text_len': prompt_text_token_len,
178
+ 'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
179
+ 'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
180
+ 'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
181
+ 'llm_embedding': embedding, 'flow_embedding': embedding}
182
+ return model_input
183
+
184
+ def frontend_cross_lingual(self, tts_text, prompt_speech_16k, resample_rate):
185
+ model_input = self.frontend_zero_shot(tts_text, '', prompt_speech_16k, resample_rate)
186
+ # in cross lingual mode, we remove prompt in llm
187
+ del model_input['prompt_text']
188
+ del model_input['prompt_text_len']
189
+ del model_input['llm_prompt_speech_token']
190
+ del model_input['llm_prompt_speech_token_len']
191
+ return model_input
192
+
193
+ def frontend_instruct(self, tts_text, spk_id, instruct_text):
194
+ model_input = self.frontend_sft(tts_text, spk_id)
195
+ # in instruct mode, we remove spk_embedding in llm due to information leakage
196
+ del model_input['llm_embedding']
197
+ instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '<endofprompt>')
198
+ model_input['prompt_text'] = instruct_text_token
199
+ model_input['prompt_text_len'] = instruct_text_token_len
200
+ return model_input
201
+
202
+ def frontend_instruct2(self, tts_text, instruct_text, prompt_speech_16k, resample_rate):
203
+ model_input = self.frontend_zero_shot(tts_text, instruct_text + '<|endofprompt|>', prompt_speech_16k, resample_rate)
204
+ del model_input['llm_prompt_speech_token']
205
+ del model_input['llm_prompt_speech_token_len']
206
+ return model_input
207
+
208
+ def frontend_vc(self, source_speech_16k, prompt_speech_16k, resample_rate):
209
+ prompt_speech_token, prompt_speech_token_len = self._extract_speech_token(prompt_speech_16k)
210
+ prompt_speech_resample = torchaudio.transforms.Resample(orig_freq=16000, new_freq=resample_rate)(prompt_speech_16k)
211
+ prompt_speech_feat, prompt_speech_feat_len = self._extract_speech_feat(prompt_speech_resample)
212
+ embedding = self._extract_spk_embedding(prompt_speech_16k)
213
+ source_speech_token, source_speech_token_len = self._extract_speech_token(source_speech_16k)
214
+ model_input = {'source_speech_token': source_speech_token, 'source_speech_token_len': source_speech_token_len,
215
+ 'flow_prompt_speech_token': prompt_speech_token, 'flow_prompt_speech_token_len': prompt_speech_token_len,
216
+ 'prompt_speech_feat': prompt_speech_feat, 'prompt_speech_feat_len': prompt_speech_feat_len,
217
+ 'flow_embedding': embedding}
218
+ return model_input
third_party/cosyvoice/cli/model.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ from typing import Generator
16
+ import torch
17
+ import numpy as np
18
+ import threading
19
+ import time
20
+ from torch.nn import functional as F
21
+ from contextlib import nullcontext
22
+ import uuid
23
+ from cosyvoice.utils.common import fade_in_out
24
+ from cosyvoice.utils.file_utils import convert_onnx_to_trt
25
+
26
+
27
+ class CosyVoiceModel:
28
+
29
+ def __init__(self,
30
+ llm: torch.nn.Module,
31
+ flow: torch.nn.Module,
32
+ hift: torch.nn.Module,
33
+ fp16: bool):
34
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
35
+ self.llm = llm
36
+ self.flow = flow
37
+ self.hift = hift
38
+ self.fp16 = fp16
39
+ self.llm.fp16 = fp16
40
+ self.flow.fp16 = fp16
41
+ if self.fp16 is True:
42
+ self.llm.half()
43
+ self.flow.half()
44
+ self.token_min_hop_len = 2 * self.flow.input_frame_rate
45
+ self.token_max_hop_len = 4 * self.flow.input_frame_rate
46
+ self.token_overlap_len = 20
47
+ # here we fix set flow.decoder.estimator.static_chunk_size = 0 for compatibability
48
+ self.flow.decoder.estimator.static_chunk_size = 0
49
+ # mel fade in out
50
+ self.mel_overlap_len = int(self.token_overlap_len / self.flow.input_frame_rate * 22050 / 256)
51
+ self.mel_window = np.hamming(2 * self.mel_overlap_len)
52
+ # hift cache
53
+ self.mel_cache_len = 20
54
+ self.source_cache_len = int(self.mel_cache_len * 256)
55
+ # speech fade in out
56
+ self.speech_window = np.hamming(2 * self.source_cache_len)
57
+ # rtf and decoding related
58
+ self.stream_scale_factor = 1
59
+ assert self.stream_scale_factor >= 1, 'stream_scale_factor should be greater than 1, change it according to your actual rtf'
60
+ self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
61
+ self.lock = threading.Lock()
62
+ # dict used to store session related variable
63
+ self.tts_speech_token_dict = {}
64
+ self.llm_end_dict = {}
65
+ self.mel_overlap_dict = {}
66
+ self.flow_cache_dict = {}
67
+ self.hift_cache_dict = {}
68
+
69
+ def load(self, llm_model, flow_model, hift_model):
70
+ self.llm.load_state_dict(torch.load(llm_model, map_location=self.device), strict=True)
71
+ self.llm.to(self.device).eval()
72
+ self.flow.load_state_dict(torch.load(flow_model, map_location=self.device), strict=True)
73
+ self.flow.to(self.device).eval()
74
+ # in case hift_model is a hifigan model
75
+ hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()}
76
+ self.hift.load_state_dict(hift_state_dict, strict=True)
77
+ self.hift.to(self.device).eval()
78
+
79
+ def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
80
+ llm_text_encoder = torch.jit.load(llm_text_encoder_model, map_location=self.device)
81
+ self.llm.text_encoder = llm_text_encoder
82
+ llm_llm = torch.jit.load(llm_llm_model, map_location=self.device)
83
+ self.llm.llm = llm_llm
84
+ flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
85
+ self.flow.encoder = flow_encoder
86
+
87
+ def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, fp16):
88
+ assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
89
+ if not os.path.exists(flow_decoder_estimator_model):
90
+ convert_onnx_to_trt(flow_decoder_estimator_model, flow_decoder_onnx_model, fp16)
91
+ if os.path.getsize(flow_decoder_estimator_model) == 0:
92
+ raise ValueError('{} is empty file, delete it and export again!'.format(flow_decoder_estimator_model))
93
+ del self.flow.decoder.estimator
94
+ import tensorrt as trt
95
+ with open(flow_decoder_estimator_model, 'rb') as f:
96
+ self.flow.decoder.estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
97
+ if self.flow.decoder.estimator_engine is None:
98
+ raise ValueError('failed to load trt {}'.format(flow_decoder_estimator_model))
99
+ self.flow.decoder.estimator = self.flow.decoder.estimator_engine.create_execution_context()
100
+
101
+ def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
102
+ with self.llm_context:
103
+ if isinstance(text, Generator):
104
+ assert isinstance(self, CosyVoice2Model), 'streaming input text is only implemented for CosyVoice2!'
105
+ for i in self.llm.inference_bistream(text=text,
106
+ prompt_text=prompt_text.to(self.device),
107
+ prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
108
+ prompt_speech_token=llm_prompt_speech_token.to(self.device),
109
+ prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
110
+ embedding=llm_embedding.to(self.device)):
111
+ self.tts_speech_token_dict[uuid].append(i)
112
+ else:
113
+ for i in self.llm.inference(text=text.to(self.device),
114
+ text_len=torch.tensor([text.shape[1]], dtype=torch.int32).to(self.device),
115
+ prompt_text=prompt_text.to(self.device),
116
+ prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
117
+ prompt_speech_token=llm_prompt_speech_token.to(self.device),
118
+ prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
119
+ embedding=llm_embedding.to(self.device)):
120
+ self.tts_speech_token_dict[uuid].append(i)
121
+ self.llm_end_dict[uuid] = True
122
+
123
+ def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False, speed=1.0):
124
+ tts_mel, flow_cache = self.flow.inference(token=token.to(self.device),
125
+ token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
126
+ prompt_token=prompt_token.to(self.device),
127
+ prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
128
+ prompt_feat=prompt_feat.to(self.device),
129
+ prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
130
+ embedding=embedding.to(self.device),
131
+ flow_cache=self.flow_cache_dict[uuid])
132
+ self.flow_cache_dict[uuid] = flow_cache
133
+
134
+ # mel overlap fade in out
135
+ if self.mel_overlap_dict[uuid].shape[2] != 0:
136
+ tts_mel = fade_in_out(tts_mel, self.mel_overlap_dict[uuid], self.mel_window)
137
+ # append hift cache
138
+ if self.hift_cache_dict[uuid] is not None:
139
+ hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
140
+ tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
141
+ else:
142
+ hift_cache_source = torch.zeros(1, 1, 0)
143
+ # keep overlap mel and hift cache
144
+ if finalize is False:
145
+ self.mel_overlap_dict[uuid] = tts_mel[:, :, -self.mel_overlap_len:]
146
+ tts_mel = tts_mel[:, :, :-self.mel_overlap_len]
147
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
148
+ if self.hift_cache_dict[uuid] is not None:
149
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
150
+ self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
151
+ 'source': tts_source[:, :, -self.source_cache_len:],
152
+ 'speech': tts_speech[:, -self.source_cache_len:]}
153
+ tts_speech = tts_speech[:, :-self.source_cache_len]
154
+ else:
155
+ if speed != 1.0:
156
+ assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
157
+ tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
158
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
159
+ if self.hift_cache_dict[uuid] is not None:
160
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
161
+ return tts_speech
162
+
163
+ def tts(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
164
+ prompt_text=torch.zeros(1, 0, dtype=torch.int32),
165
+ llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
166
+ flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
167
+ prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
168
+ # this_uuid is used to track variables related to this inference thread
169
+ this_uuid = str(uuid.uuid1())
170
+ with self.lock:
171
+ self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
172
+ self.hift_cache_dict[this_uuid] = None
173
+ self.mel_overlap_dict[this_uuid] = torch.zeros(1, 80, 0)
174
+ self.flow_cache_dict[this_uuid] = torch.zeros(1, 80, 0, 2)
175
+ p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
176
+ p.start()
177
+ if stream is True:
178
+ token_hop_len = self.token_min_hop_len
179
+ while True:
180
+ time.sleep(0.1)
181
+ if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
182
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
183
+ .unsqueeze(dim=0)
184
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
185
+ prompt_token=flow_prompt_speech_token,
186
+ prompt_feat=prompt_speech_feat,
187
+ embedding=flow_embedding,
188
+ uuid=this_uuid,
189
+ finalize=False)
190
+ yield {'tts_speech': this_tts_speech.cpu()}
191
+ with self.lock:
192
+ self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
193
+ # increase token_hop_len for better speech quality
194
+ token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
195
+ if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
196
+ break
197
+ p.join()
198
+ # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
199
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
200
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
201
+ prompt_token=flow_prompt_speech_token,
202
+ prompt_feat=prompt_speech_feat,
203
+ embedding=flow_embedding,
204
+ uuid=this_uuid,
205
+ finalize=True)
206
+ yield {'tts_speech': this_tts_speech.cpu()}
207
+ else:
208
+ # deal with all tokens
209
+ p.join()
210
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
211
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
212
+ prompt_token=flow_prompt_speech_token,
213
+ prompt_feat=prompt_speech_feat,
214
+ embedding=flow_embedding,
215
+ uuid=this_uuid,
216
+ finalize=True,
217
+ speed=speed)
218
+ yield {'tts_speech': this_tts_speech.cpu()}
219
+ with self.lock:
220
+ self.tts_speech_token_dict.pop(this_uuid)
221
+ self.llm_end_dict.pop(this_uuid)
222
+ self.mel_overlap_dict.pop(this_uuid)
223
+ self.hift_cache_dict.pop(this_uuid)
224
+ self.flow_cache_dict.pop(this_uuid)
225
+ torch.cuda.empty_cache()
226
+
227
+ def vc(self, source_speech_token, flow_prompt_speech_token, prompt_speech_feat, flow_embedding, stream=False, speed=1.0, **kwargs):
228
+ # this_uuid is used to track variables related to this inference thread
229
+ this_uuid = str(uuid.uuid1())
230
+ with self.lock:
231
+ self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = source_speech_token.flatten().tolist(), True
232
+ self.hift_cache_dict[this_uuid] = None
233
+ self.mel_overlap_dict[this_uuid] = torch.zeros(1, 80, 0)
234
+ self.flow_cache_dict[this_uuid] = torch.zeros(1, 80, 0, 2)
235
+ if stream is True:
236
+ token_hop_len = self.token_min_hop_len
237
+ while True:
238
+ if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
239
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
240
+ .unsqueeze(dim=0)
241
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
242
+ prompt_token=flow_prompt_speech_token,
243
+ prompt_feat=prompt_speech_feat,
244
+ embedding=flow_embedding,
245
+ uuid=this_uuid,
246
+ finalize=False)
247
+ yield {'tts_speech': this_tts_speech.cpu()}
248
+ with self.lock:
249
+ self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
250
+ # increase token_hop_len for better speech quality
251
+ token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
252
+ if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
253
+ break
254
+ # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
255
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
256
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
257
+ prompt_token=flow_prompt_speech_token,
258
+ prompt_feat=prompt_speech_feat,
259
+ embedding=flow_embedding,
260
+ uuid=this_uuid,
261
+ finalize=True)
262
+ yield {'tts_speech': this_tts_speech.cpu()}
263
+ else:
264
+ # deal with all tokens
265
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
266
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
267
+ prompt_token=flow_prompt_speech_token,
268
+ prompt_feat=prompt_speech_feat,
269
+ embedding=flow_embedding,
270
+ uuid=this_uuid,
271
+ finalize=True,
272
+ speed=speed)
273
+ yield {'tts_speech': this_tts_speech.cpu()}
274
+ with self.lock:
275
+ self.tts_speech_token_dict.pop(this_uuid)
276
+ self.llm_end_dict.pop(this_uuid)
277
+ self.mel_overlap_dict.pop(this_uuid)
278
+ self.hift_cache_dict.pop(this_uuid)
279
+ torch.cuda.empty_cache()
280
+
281
+
282
+ class CosyVoice2Model(CosyVoiceModel):
283
+
284
+ def __init__(self,
285
+ llm: torch.nn.Module,
286
+ flow: torch.nn.Module,
287
+ hift: torch.nn.Module,
288
+ fp16: bool,
289
+ device: str):
290
+ self.device = torch.device(device)
291
+ self.llm = llm
292
+ self.flow = flow
293
+ self.hift = hift
294
+ self.fp16 = fp16
295
+ self.llm.fp16 = fp16
296
+ self.flow.fp16 = fp16
297
+ if self.fp16 is True:
298
+ self.llm.half()
299
+ self.flow.half()
300
+ self.token_hop_len = 2 * self.flow.input_frame_rate
301
+ # here we fix flow encoder/decoder decoding_chunk_size, in the future we will send it as arguments, or use cache
302
+ self.flow.encoder.static_chunk_size = 2 * self.flow.input_frame_rate
303
+ self.flow.decoder.estimator.static_chunk_size = 2 * self.flow.input_frame_rate * self.flow.token_mel_ratio
304
+ # hift cache
305
+ self.mel_cache_len = 8
306
+ self.source_cache_len = int(self.mel_cache_len * 480)
307
+ # speech fade in out
308
+ self.speech_window = np.hamming(2 * self.source_cache_len)
309
+ # rtf and decoding related
310
+ self.stream_scale_factor = 1
311
+ self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
312
+ self.lock = threading.Lock()
313
+ # dict used to store session related variable
314
+ self.tts_speech_token_dict = {}
315
+ self.llm_end_dict = {}
316
+ self.hift_cache_dict = {}
317
+
318
+ def load_jit(self, flow_encoder_model):
319
+ flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
320
+ self.flow.encoder = flow_encoder
321
+
322
+ def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, token_offset, finalize=False, speed=1.0):
323
+ tts_mel, _ = self.flow.inference(token=token.to(self.device),
324
+ token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
325
+ prompt_token=prompt_token.to(self.device),
326
+ prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
327
+ prompt_feat=prompt_feat.to(self.device),
328
+ prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
329
+ embedding=embedding.to(self.device),
330
+ finalize=finalize)
331
+ tts_mel = tts_mel[:, :, token_offset * self.flow.token_mel_ratio:]
332
+ # append hift cache
333
+ if self.hift_cache_dict[uuid] is not None:
334
+ hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
335
+ tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
336
+ else:
337
+ hift_cache_source = torch.zeros(1, 1, 0)
338
+ # keep overlap mel and hift cache
339
+ if finalize is False:
340
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
341
+ if self.hift_cache_dict[uuid] is not None:
342
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
343
+ self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
344
+ 'source': tts_source[:, :, -self.source_cache_len:],
345
+ 'speech': tts_speech[:, -self.source_cache_len:]}
346
+ tts_speech = tts_speech[:, :-self.source_cache_len]
347
+ else:
348
+ if speed != 1.0:
349
+ assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
350
+ tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
351
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
352
+ if self.hift_cache_dict[uuid] is not None:
353
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
354
+ return tts_speech
355
+
356
+ def tts(self, text, flow_embedding, llm_embedding=torch.zeros(0, 192),
357
+ prompt_text=torch.zeros(1, 0, dtype=torch.int32),
358
+ llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
359
+ flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
360
+ prompt_speech_feat=torch.zeros(1, 0, 80), stream=False, speed=1.0, **kwargs):
361
+ # this_uuid is used to track variables related to this inference thread
362
+ this_uuid = str(uuid.uuid1())
363
+ with self.lock:
364
+ self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
365
+ self.hift_cache_dict[this_uuid] = None
366
+ p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
367
+ p.start()
368
+ if stream is True:
369
+ token_offset = 0
370
+ while True:
371
+ time.sleep(0.1)
372
+ if len(self.tts_speech_token_dict[this_uuid]) - token_offset >= self.token_hop_len + self.flow.pre_lookahead_len:
373
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_offset + self.token_hop_len + self.flow.pre_lookahead_len]).unsqueeze(dim=0)
374
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
375
+ prompt_token=flow_prompt_speech_token,
376
+ prompt_feat=prompt_speech_feat,
377
+ embedding=flow_embedding,
378
+ uuid=this_uuid,
379
+ token_offset=token_offset,
380
+ finalize=False)
381
+ token_offset += self.token_hop_len
382
+ yield {'tts_speech': this_tts_speech.cpu()}
383
+ if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) - token_offset < self.token_hop_len + self.flow.pre_lookahead_len:
384
+ break
385
+ p.join()
386
+ # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
387
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
388
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
389
+ prompt_token=flow_prompt_speech_token,
390
+ prompt_feat=prompt_speech_feat,
391
+ embedding=flow_embedding,
392
+ uuid=this_uuid,
393
+ token_offset=token_offset,
394
+ finalize=True)
395
+ yield {'tts_speech': this_tts_speech.cpu()}
396
+ else:
397
+ # deal with all tokens
398
+ p.join()
399
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
400
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
401
+ prompt_token=flow_prompt_speech_token,
402
+ prompt_feat=prompt_speech_feat,
403
+ embedding=flow_embedding,
404
+ uuid=this_uuid,
405
+ token_offset=0,
406
+ finalize=True,
407
+ speed=speed)
408
+ yield {'tts_speech': this_tts_speech.cpu()}
409
+ with self.lock:
410
+ self.tts_speech_token_dict.pop(this_uuid)
411
+ self.llm_end_dict.pop(this_uuid)
412
+ torch.cuda.empty_cache()
third_party/cosyvoice/llm/__pycache__/llm.cpython-311.pyc ADDED
Binary file (27 kB). View file
 
third_party/cosyvoice/llm/llm.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, Optional, Callable, List, Generator
15
+ import torch
16
+ from torch import nn
17
+ import torch.nn.functional as F
18
+ from transformers import Qwen2ForCausalLM
19
+ from torch.nn.utils.rnn import pad_sequence, unpad_sequence
20
+ from cosyvoice.utils.common import IGNORE_ID
21
+ from cosyvoice.transformer.label_smoothing_loss import LabelSmoothingLoss
22
+ from cosyvoice.utils.common import th_accuracy
23
+ from cosyvoice.utils.file_utils import logging
24
+
25
+
26
+ class TransformerLM(torch.nn.Module):
27
+ def __init__(
28
+ self,
29
+ text_encoder_input_size: int,
30
+ llm_input_size: int,
31
+ llm_output_size: int,
32
+ text_token_size: int,
33
+ speech_token_size: int,
34
+ text_encoder: torch.nn.Module,
35
+ llm: torch.nn.Module,
36
+ sampling: Callable,
37
+ length_normalized_loss: bool = True,
38
+ lsm_weight: float = 0.0,
39
+ spk_embed_dim: int = 192,
40
+ ):
41
+ super().__init__()
42
+ self.llm_input_size = llm_input_size
43
+ self.speech_token_size = speech_token_size
44
+ # 1. build text token inputs related modules
45
+ self.text_embedding = torch.nn.Embedding(text_token_size, text_encoder_input_size)
46
+ self.text_encoder = text_encoder
47
+ self.text_encoder_affine_layer = nn.Linear(
48
+ self.text_encoder.output_size(),
49
+ llm_input_size
50
+ )
51
+
52
+ # 2. build speech token language model related modules
53
+ self.sos_eos = 0
54
+ self.task_id = 1
55
+ self.llm_embedding = torch.nn.Embedding(2, llm_input_size)
56
+ self.llm = llm
57
+ self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 1)
58
+ self.criterion_ce = LabelSmoothingLoss(
59
+ size=speech_token_size + 1,
60
+ padding_idx=IGNORE_ID,
61
+ smoothing=lsm_weight,
62
+ normalize_length=length_normalized_loss,
63
+ )
64
+
65
+ # 3. [Optional] build speech token related modules
66
+ self.speech_embedding = torch.nn.Embedding(speech_token_size, llm_input_size)
67
+ self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, llm_input_size)
68
+
69
+ # 4. sampling method
70
+ self.sampling = sampling
71
+
72
+ def encode(
73
+ self,
74
+ text: torch.Tensor,
75
+ text_lengths: torch.Tensor,
76
+ ):
77
+ encoder_out, encoder_mask = self.text_encoder(text, text_lengths, decoding_chunk_size=1, num_decoding_left_chunks=-1)
78
+ encoder_out_lens = encoder_mask.squeeze(1).sum(1)
79
+ encoder_out = self.text_encoder_affine_layer(encoder_out)
80
+ return encoder_out, encoder_out_lens
81
+
82
+ def pad_unpad_sequence(self, sos_eos_emb, embedding, text_token, text_token_len, task_id_emb, speech_token, speech_token_len):
83
+ text_token = unpad_sequence(text_token, text_token_len.cpu(), batch_first=True)
84
+ speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True)
85
+ lm_input = [torch.concat([sos_eos_emb.squeeze(dim=0), embedding[i], text_token[i], task_id_emb.squeeze(dim=0), speech_token[i]], dim=0)
86
+ for i in range(len(text_token))]
87
+ lm_input_len = torch.tensor([i.size(0) for i in lm_input], dtype=torch.int32)
88
+ lm_input = pad_sequence(lm_input, batch_first=True, padding_value=IGNORE_ID)
89
+ return lm_input, lm_input_len
90
+
91
+ def forward(
92
+ self,
93
+ batch: dict,
94
+ device: torch.device,
95
+ ) -> Dict[str, Optional[torch.Tensor]]:
96
+ """
97
+ Args:
98
+ text: (B, L, D)
99
+ text_lengths: (B,)
100
+ audio: (B, T, N) or (B, T)
101
+ audio_lengths: (B,)
102
+ """
103
+ text_token = batch['text_token'].to(device)
104
+ text_token_len = batch['text_token_len'].to(device)
105
+ speech_token = batch['speech_token'].to(device)
106
+ speech_token_len = batch['speech_token_len'].to(device)
107
+ embedding = batch['embedding'].to(device)
108
+
109
+ # 1. prepare llm_target
110
+ lm_target = [torch.tensor([IGNORE_ID] * (2 + text_token_len[i]) + speech_token[i, :speech_token_len[i]].tolist() +
111
+ [self.speech_token_size]) for i in range(text_token.size(0))]
112
+ lm_target = pad_sequence(lm_target, batch_first=True, padding_value=IGNORE_ID).to(device)
113
+
114
+ # 1. encode text_token
115
+ text_token = self.text_embedding(text_token)
116
+ text_token, text_token_len = self.encode(text_token, text_token_len)
117
+
118
+ # 2. embedding projection
119
+ embedding = F.normalize(embedding, dim=1)
120
+ embedding = self.spk_embed_affine_layer(embedding)
121
+ embedding = embedding.unsqueeze(1)
122
+
123
+ # 3. eos and task_id
124
+ sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
125
+ task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
126
+
127
+ # 4. encode speech_token
128
+ speech_token = self.speech_embedding(speech_token)
129
+
130
+ # 5. unpad and pad
131
+ lm_input, lm_input_len = self.pad_unpad_sequence(sos_eos_emb, embedding, text_token, text_token_len,
132
+ task_id_emb, speech_token, speech_token_len)
133
+
134
+ # 6. run lm forward
135
+ lm_output, lm_output_mask = self.llm(lm_input, lm_input_len.to(device))
136
+ logits = self.llm_decoder(lm_output)
137
+ loss = self.criterion_ce(logits, lm_target)
138
+ acc = th_accuracy(logits.view(-1, self.speech_token_size + 1), lm_target, ignore_label=IGNORE_ID)
139
+ return {'loss': loss, 'acc': acc}
140
+
141
+ def sampling_ids(
142
+ self,
143
+ weighted_scores: torch.Tensor,
144
+ decoded_tokens: List,
145
+ sampling: int,
146
+ ignore_eos: bool = True,
147
+ ):
148
+ num_trials, max_trials = 0, 100
149
+ while True:
150
+ top_ids = self.sampling(weighted_scores, decoded_tokens, sampling)
151
+ if (not ignore_eos) or (self.speech_token_size not in top_ids):
152
+ break
153
+ num_trials += 1
154
+ if num_trials > max_trials:
155
+ raise RuntimeError('sampling reaches max_trials {} and still get eos when ignore_eos is True, check your input!'.format(max_trials))
156
+ return top_ids
157
+
158
+ @torch.inference_mode()
159
+ def inference(
160
+ self,
161
+ text: torch.Tensor,
162
+ text_len: torch.Tensor,
163
+ prompt_text: torch.Tensor,
164
+ prompt_text_len: torch.Tensor,
165
+ prompt_speech_token: torch.Tensor,
166
+ prompt_speech_token_len: torch.Tensor,
167
+ embedding: torch.Tensor,
168
+ sampling: int = 25,
169
+ max_token_text_ratio: float = 20,
170
+ min_token_text_ratio: float = 2,
171
+ ) -> Generator[torch.Tensor, None, None]:
172
+ if self.fp16 is True:
173
+ embedding = embedding.half()
174
+
175
+ device = text.device
176
+ text = torch.concat([prompt_text, text], dim=1)
177
+ text_len += prompt_text_len
178
+ text = self.text_embedding(text)
179
+
180
+ # 1. encode text
181
+ text, text_len = self.encode(text, text_len)
182
+
183
+ # 2. encode embedding
184
+ if embedding.shape[0] != 0:
185
+ embedding = F.normalize(embedding, dim=1)
186
+ embedding = self.spk_embed_affine_layer(embedding)
187
+ embedding = embedding.unsqueeze(dim=1)
188
+ else:
189
+ embedding = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device).to(text.dtype)
190
+
191
+ # 3. concat llm_input
192
+ sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
193
+ task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
194
+ if prompt_speech_token_len != 0:
195
+ prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
196
+ else:
197
+ prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device)
198
+ lm_input = torch.concat([sos_eos_emb, embedding, text, task_id_emb, prompt_speech_token_emb], dim=1)
199
+
200
+ # 4. cal min/max_length
201
+ min_len = int((text_len - prompt_text_len) * min_token_text_ratio)
202
+ max_len = int((text_len - prompt_text_len) * max_token_text_ratio)
203
+
204
+ # 5. step by step decode
205
+ out_tokens = []
206
+ offset = 0
207
+ att_cache, cnn_cache = torch.zeros((0, 0, 0, 0), device=lm_input.device), torch.zeros((0, 0, 0, 0), device=lm_input.device)
208
+ for i in range(max_len):
209
+ y_pred, att_cache, cnn_cache = self.llm.forward_chunk(lm_input, offset=offset, required_cache_size=-1,
210
+ att_cache=att_cache, cnn_cache=cnn_cache,
211
+ att_mask=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]),
212
+ device=lm_input.device)).to(torch.bool))
213
+ logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
214
+ # force continue decode first token
215
+ if i == 0:
216
+ logp[:, self.speech_token_size] = -float('inf')
217
+ top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True if i < min_len else False).item()
218
+ if top_ids == self.speech_token_size:
219
+ break
220
+ # in stream mode, yield token one by one
221
+ yield top_ids
222
+ out_tokens.append(top_ids)
223
+ offset += lm_input.size(1)
224
+ lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
225
+
226
+
227
+ class Qwen2Encoder(torch.nn.Module):
228
+ def __init__(self, pretrain_path):
229
+ super().__init__()
230
+ self.model = Qwen2ForCausalLM.from_pretrained(pretrain_path)
231
+
232
+ def forward_one_step(self, xs, masks, cache=None):
233
+ input_masks = masks[:, -1, :]
234
+ outs = self.model(
235
+ inputs_embeds=xs,
236
+ attention_mask=input_masks,
237
+ output_hidden_states=True,
238
+ return_dict=True,
239
+ use_cache=True,
240
+ past_key_values=cache,
241
+ )
242
+ xs = outs.hidden_states[-1]
243
+ new_cache = outs.past_key_values
244
+ return xs, new_cache
245
+
246
+
247
+ class Qwen2LM(TransformerLM):
248
+ def __init__(
249
+ self,
250
+ llm_input_size: int,
251
+ llm_output_size: int,
252
+ speech_token_size: int,
253
+ llm: torch.nn.Module,
254
+ sampling: Callable,
255
+ length_normalized_loss: bool = True,
256
+ lsm_weight: float = 0.0,
257
+ mix_ratio: List[int] = [5, 15],
258
+ ):
259
+ torch.nn.Module.__init__(self)
260
+ self.llm_input_size = llm_input_size
261
+ self.llm_output_size = llm_output_size
262
+ self.speech_token_size = speech_token_size
263
+
264
+ # 2. build speech token language model related modules
265
+ self.sos_eos = 0
266
+ self.task_id = 1
267
+ self.fill_token = 2
268
+
269
+ self.llm_embedding = torch.nn.Embedding(2, llm_input_size)
270
+ self.llm = llm
271
+ self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 3)
272
+ self.criterion_ce = LabelSmoothingLoss(
273
+ size=speech_token_size + 3,
274
+ padding_idx=IGNORE_ID,
275
+ smoothing=lsm_weight,
276
+ normalize_length=length_normalized_loss,
277
+ )
278
+
279
+ # 3. [Optional] build speech token related modules
280
+ self.speech_embedding = torch.nn.Embedding(speech_token_size + 3, llm_input_size)
281
+
282
+ # 4. sampling method
283
+ self.sampling = sampling
284
+ self.mix_ratio = mix_ratio
285
+
286
+ @torch.inference_mode()
287
+ def inference(
288
+ self,
289
+ text: torch.Tensor,
290
+ text_len: torch.Tensor,
291
+ prompt_text: torch.Tensor,
292
+ prompt_text_len: torch.Tensor,
293
+ prompt_speech_token: torch.Tensor,
294
+ prompt_speech_token_len: torch.Tensor,
295
+ embedding: torch.Tensor,
296
+ sampling: int = 25,
297
+ max_token_text_ratio: float = 20,
298
+ min_token_text_ratio: float = 2,
299
+ ) -> Generator[torch.Tensor, None, None]:
300
+ device = text.device
301
+ text = torch.concat([prompt_text, text], dim=1)
302
+ text_len += prompt_text_len
303
+ text = self.llm.model.model.embed_tokens(text)
304
+
305
+ # 3. concat llm_input
306
+ sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
307
+ task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
308
+ if prompt_speech_token_len != 0:
309
+ prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
310
+ else:
311
+ prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device)
312
+ lm_input = torch.concat([sos_eos_emb, text, task_id_emb, prompt_speech_token_emb], dim=1)
313
+
314
+ # 4. cal min/max_length
315
+ min_len = int((text_len - prompt_text_len) * min_token_text_ratio)
316
+ max_len = int((text_len - prompt_text_len) * max_token_text_ratio)
317
+
318
+ # 5. step by step decode
319
+ out_tokens = []
320
+ cache = None
321
+ for i in range(max_len):
322
+ y_pred, cache = self.llm.forward_one_step(lm_input,
323
+ masks=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]), device=lm_input.device)).to(torch.bool),
324
+ cache=cache)
325
+ logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
326
+ top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True if i < min_len else False).item()
327
+ if top_ids == self.speech_token_size:
328
+ break
329
+ if top_ids > self.speech_token_size:
330
+ continue
331
+ # in stream mode, yield token one by one
332
+ yield top_ids
333
+ out_tokens.append(top_ids)
334
+ lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
335
+
336
+ @torch.inference_mode()
337
+ def inference_bistream(
338
+ self,
339
+ text: Generator,
340
+ prompt_text: torch.Tensor,
341
+ prompt_text_len: torch.Tensor,
342
+ prompt_speech_token: torch.Tensor,
343
+ prompt_speech_token_len: torch.Tensor,
344
+ embedding: torch.Tensor,
345
+ sampling: int = 25,
346
+ max_token_text_ratio: float = 20,
347
+ min_token_text_ratio: float = 2,
348
+ ) -> Generator[torch.Tensor, None, None]:
349
+
350
+ device = prompt_text.device
351
+ # 1. prepare input
352
+ sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
353
+ task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
354
+ if prompt_speech_token_len != 0:
355
+ prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
356
+ else:
357
+ prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=prompt_text.dtype).to(device)
358
+ lm_input = torch.concat([sos_eos_emb], dim=1)
359
+
360
+ # 2. iterate text
361
+ out_tokens = []
362
+ cache = None
363
+ # NOTE init prompt_text as text_cache as it is basically impossible prompt_speech_token/prompt_text < 15/5
364
+ text_cache = self.llm.model.model.embed_tokens(prompt_text)
365
+ next_fill_index = -1
366
+ for this_text in text:
367
+ text_cache = torch.concat([text_cache, self.llm.model.model.embed_tokens(this_text)], dim=1)
368
+ # prompt_speech_token_emb not empty, try append to lm_input
369
+ while prompt_speech_token_emb.size(1) != 0:
370
+ if text_cache.size(1) >= self.mix_ratio[0]:
371
+ lm_input_text, lm_input_speech = text_cache[:, :self.mix_ratio[0]], prompt_speech_token_emb[:, :self.mix_ratio[1]]
372
+ logging.info('append {} text token {} speech token'.format(lm_input_text.size(1), lm_input_speech.size(1)))
373
+ lm_input = torch.concat([lm_input, lm_input_text, lm_input_speech], dim=1)
374
+ text_cache, prompt_speech_token_emb = text_cache[:, self.mix_ratio[0]:], prompt_speech_token_emb[:, self.mix_ratio[1]:]
375
+ else:
376
+ logging.info('not enough text token to decode, wait for more')
377
+ break
378
+ # no prompt_speech_token_emb remain, can decode some speech token
379
+ if prompt_speech_token_emb.size(1) == 0:
380
+ if (len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2) or (len(out_tokens) == 0 and lm_input.size(1) == 1):
381
+ logging.info('get fill token, need to append more text token')
382
+ if text_cache.size(1) >= self.mix_ratio[0]:
383
+ lm_input_text = text_cache[:, :self.mix_ratio[0]]
384
+ logging.info('append {} text token'.format(lm_input_text.size(1)))
385
+ if len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2:
386
+ lm_input = lm_input_text
387
+ else:
388
+ lm_input = torch.concat([lm_input, lm_input_text], dim=1)
389
+ text_cache = text_cache[:, self.mix_ratio[0]:]
390
+ else:
391
+ logging.info('not enough text token to decode, wait for more')
392
+ continue
393
+ while True:
394
+ seq_len = lm_input.shape[1] if cache is None else lm_input.shape[1] + cache[0][0].size(2)
395
+ y_pred, cache = self.llm.forward_one_step(lm_input,
396
+ masks=torch.tril(torch.ones((1, seq_len, seq_len), device=lm_input.device)).to(torch.bool),
397
+ cache=cache)
398
+ logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
399
+ if next_fill_index != -1 and len(out_tokens) == next_fill_index:
400
+ top_ids = self.speech_token_size + 2
401
+ next_fill_index += (self.mix_ratio[1] + 1)
402
+ else:
403
+ top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True).item()
404
+ if top_ids == self.speech_token_size + 2:
405
+ next_fill_index = len(out_tokens) + self.mix_ratio[1] + 1
406
+ logging.info('fill_token index {} next fill_token index {}'.format(len(out_tokens), next_fill_index))
407
+ out_tokens.append(top_ids)
408
+ if top_ids >= self.speech_token_size:
409
+ if top_ids == self.speech_token_size + 2:
410
+ break
411
+ else:
412
+ raise ValueError('should not get token {}'.format(top_ids))
413
+ yield top_ids
414
+ lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
415
+
416
+ # 3. final decode
417
+ lm_input = torch.concat([lm_input, text_cache, task_id_emb], dim=1)
418
+ logging.info('no more text token, decode until met eos')
419
+ while True:
420
+ seq_len = lm_input.shape[1] if cache is None else lm_input.shape[1] + cache[0][0].size(2)
421
+ y_pred, cache = self.llm.forward_one_step(lm_input,
422
+ masks=torch.tril(torch.ones((1, seq_len, seq_len), device=lm_input.device)).to(torch.bool),
423
+ cache=cache)
424
+ logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
425
+ top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=False).item()
426
+ out_tokens.append(top_ids)
427
+ if top_ids >= self.speech_token_size:
428
+ if top_ids == self.speech_token_size:
429
+ break
430
+ else:
431
+ raise ValueError('should not get token {}'.format(top_ids))
432
+ # in stream mode, yield token one by one
433
+ yield top_ids
434
+ lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
third_party/cosyvoice/tokenizer/tokenizer.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import os
3
+ from functools import lru_cache
4
+ from typing import Optional
5
+ import torch
6
+ from transformers import AutoTokenizer
7
+ from whisper.tokenizer import Tokenizer
8
+
9
+ import tiktoken
10
+
11
+ LANGUAGES = {
12
+ "en": "english",
13
+ "zh": "chinese",
14
+ "de": "german",
15
+ "es": "spanish",
16
+ "ru": "russian",
17
+ "ko": "korean",
18
+ "fr": "french",
19
+ "ja": "japanese",
20
+ "pt": "portuguese",
21
+ "tr": "turkish",
22
+ "pl": "polish",
23
+ "ca": "catalan",
24
+ "nl": "dutch",
25
+ "ar": "arabic",
26
+ "sv": "swedish",
27
+ "it": "italian",
28
+ "id": "indonesian",
29
+ "hi": "hindi",
30
+ "fi": "finnish",
31
+ "vi": "vietnamese",
32
+ "he": "hebrew",
33
+ "uk": "ukrainian",
34
+ "el": "greek",
35
+ "ms": "malay",
36
+ "cs": "czech",
37
+ "ro": "romanian",
38
+ "da": "danish",
39
+ "hu": "hungarian",
40
+ "ta": "tamil",
41
+ "no": "norwegian",
42
+ "th": "thai",
43
+ "ur": "urdu",
44
+ "hr": "croatian",
45
+ "bg": "bulgarian",
46
+ "lt": "lithuanian",
47
+ "la": "latin",
48
+ "mi": "maori",
49
+ "ml": "malayalam",
50
+ "cy": "welsh",
51
+ "sk": "slovak",
52
+ "te": "telugu",
53
+ "fa": "persian",
54
+ "lv": "latvian",
55
+ "bn": "bengali",
56
+ "sr": "serbian",
57
+ "az": "azerbaijani",
58
+ "sl": "slovenian",
59
+ "kn": "kannada",
60
+ "et": "estonian",
61
+ "mk": "macedonian",
62
+ "br": "breton",
63
+ "eu": "basque",
64
+ "is": "icelandic",
65
+ "hy": "armenian",
66
+ "ne": "nepali",
67
+ "mn": "mongolian",
68
+ "bs": "bosnian",
69
+ "kk": "kazakh",
70
+ "sq": "albanian",
71
+ "sw": "swahili",
72
+ "gl": "galician",
73
+ "mr": "marathi",
74
+ "pa": "punjabi",
75
+ "si": "sinhala",
76
+ "km": "khmer",
77
+ "sn": "shona",
78
+ "yo": "yoruba",
79
+ "so": "somali",
80
+ "af": "afrikaans",
81
+ "oc": "occitan",
82
+ "ka": "georgian",
83
+ "be": "belarusian",
84
+ "tg": "tajik",
85
+ "sd": "sindhi",
86
+ "gu": "gujarati",
87
+ "am": "amharic",
88
+ "yi": "yiddish",
89
+ "lo": "lao",
90
+ "uz": "uzbek",
91
+ "fo": "faroese",
92
+ "ht": "haitian creole",
93
+ "ps": "pashto",
94
+ "tk": "turkmen",
95
+ "nn": "nynorsk",
96
+ "mt": "maltese",
97
+ "sa": "sanskrit",
98
+ "lb": "luxembourgish",
99
+ "my": "myanmar",
100
+ "bo": "tibetan",
101
+ "tl": "tagalog",
102
+ "mg": "malagasy",
103
+ "as": "assamese",
104
+ "tt": "tatar",
105
+ "haw": "hawaiian",
106
+ "ln": "lingala",
107
+ "ha": "hausa",
108
+ "ba": "bashkir",
109
+ "jw": "javanese",
110
+ "su": "sundanese",
111
+ "yue": "cantonese",
112
+ "minnan": "minnan",
113
+ "wuyu": "wuyu",
114
+ "dialect": "dialect",
115
+ "zh/en": "zh/en",
116
+ "en/zh": "en/zh",
117
+ }
118
+
119
+ # language code lookup by name, with a few language aliases
120
+ TO_LANGUAGE_CODE = {
121
+ **{language: code for code, language in LANGUAGES.items()},
122
+ "burmese": "my",
123
+ "valencian": "ca",
124
+ "flemish": "nl",
125
+ "haitian": "ht",
126
+ "letzeburgesch": "lb",
127
+ "pushto": "ps",
128
+ "panjabi": "pa",
129
+ "moldavian": "ro",
130
+ "moldovan": "ro",
131
+ "sinhalese": "si",
132
+ "castilian": "es",
133
+ "mandarin": "zh",
134
+ }
135
+
136
+ AUDIO_EVENT = {
137
+ "ASR": "ASR",
138
+ "AED": "AED",
139
+ "SER": "SER",
140
+ "Speech": "Speech",
141
+ "/Speech": "/Speech",
142
+ "BGM": "BGM",
143
+ "/BGM": "/BGM",
144
+ "Laughter": "Laughter",
145
+ "/Laughter": "/Laughter",
146
+ "Applause": "Applause",
147
+ "/Applause": "/Applause",
148
+ }
149
+
150
+ EMOTION = {
151
+ "HAPPY": "HAPPY",
152
+ "SAD": "SAD",
153
+ "ANGRY": "ANGRY",
154
+ "NEUTRAL": "NEUTRAL",
155
+ }
156
+
157
+ TTS_Vocal_Token = {
158
+ "TTS/B": "TTS/B",
159
+ "TTS/O": "TTS/O",
160
+ "TTS/Q": "TTS/Q",
161
+ "TTS/A": "TTS/A",
162
+ "TTS/CO": "TTS/CO",
163
+ "TTS/CL": "TTS/CL",
164
+ "TTS/H": "TTS/H",
165
+ **{f"TTS/SP{i:02d}": f"TTS/SP{i:02d}" for i in range(1, 14)}
166
+ }
167
+
168
+
169
+ @lru_cache(maxsize=None)
170
+ def get_encoding(name: str = "gpt2", num_languages: int = 99):
171
+ vocab_path = os.path.join(os.path.dirname(__file__), "assets", f"{name}.tiktoken")
172
+ ranks = {
173
+ base64.b64decode(token): int(rank)
174
+ for token, rank in (line.split() for line in open(vocab_path) if line)
175
+ }
176
+ n_vocab = len(ranks)
177
+ special_tokens = {}
178
+
179
+ specials = [
180
+ "<|endoftext|>",
181
+ "<|startoftranscript|>",
182
+ *[f"<|{lang}|>" for lang in list(LANGUAGES.keys())[:num_languages]],
183
+ *[f"<|{audio_event}|>" for audio_event in list(AUDIO_EVENT.keys())],
184
+ *[f"<|{emotion}|>" for emotion in list(EMOTION.keys())],
185
+ "<|translate|>",
186
+ "<|transcribe|>",
187
+ "<|startoflm|>",
188
+ "<|startofprev|>",
189
+ "<|nospeech|>",
190
+ "<|notimestamps|>",
191
+ *[f"<|SPECIAL_TOKEN_{i}|>" for i in range(1, 31)], # register special tokens for ASR
192
+ *[f"<|{tts}|>" for tts in list(TTS_Vocal_Token.keys())], # register special tokens for TTS
193
+ *[f"<|{i * 0.02:.2f}|>" for i in range(1501)],
194
+ ]
195
+
196
+ for token in specials:
197
+ special_tokens[token] = n_vocab
198
+ n_vocab += 1
199
+
200
+ return tiktoken.Encoding(
201
+ name=os.path.basename(vocab_path),
202
+ explicit_n_vocab=n_vocab,
203
+ pat_str=r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""",
204
+ mergeable_ranks=ranks,
205
+ special_tokens=special_tokens,
206
+ )
207
+
208
+
209
+ @lru_cache(maxsize=None)
210
+ def get_tokenizer(
211
+ multilingual: bool,
212
+ *,
213
+ num_languages: int = 99,
214
+ language: Optional[str] = None,
215
+ task: Optional[str] = None, # Literal["transcribe", "translate", None]
216
+ ) -> Tokenizer:
217
+ if language is not None:
218
+ language = language.lower()
219
+ if language not in LANGUAGES:
220
+ if language in TO_LANGUAGE_CODE:
221
+ language = TO_LANGUAGE_CODE[language]
222
+ else:
223
+ raise ValueError(f"Unsupported language: {language}")
224
+
225
+ if multilingual:
226
+ encoding_name = "multilingual_zh_ja_yue_char_del"
227
+ language = language or "en"
228
+ task = task or "transcribe"
229
+ else:
230
+ encoding_name = "gpt2"
231
+ language = None
232
+ task = None
233
+
234
+ encoding = get_encoding(name=encoding_name, num_languages=num_languages)
235
+
236
+ return Tokenizer(
237
+ encoding=encoding, num_languages=num_languages, language=language, task=task
238
+ )
239
+
240
+
241
+ class QwenTokenizer():
242
+ def __init__(self, token_path, skip_special_tokens=True):
243
+ super().__init__()
244
+ # NOTE: non-chat model, all these special tokens keep randomly initialized.
245
+ special_tokens = {
246
+ 'eos_token': '<|endoftext|>',
247
+ 'pad_token': '<|endoftext|>',
248
+ 'additional_special_tokens': [
249
+ '<|im_start|>', '<|im_end|>', '<|endofprompt|>',
250
+ '[breath]', '<strong>', '</strong>', '[noise]',
251
+ '[laughter]', '[cough]', '[clucking]', '[accent]',
252
+ '[quick_breath]',
253
+ "<laughter>", "</laughter>",
254
+ "[hissing]", "[sigh]", "[vocalized-noise]",
255
+ "[lipsmack]", "[mn]"
256
+ ]
257
+ }
258
+ self.special_tokens = special_tokens
259
+ self.tokenizer = AutoTokenizer.from_pretrained(token_path)
260
+ self.tokenizer.add_special_tokens(special_tokens)
261
+ self.skip_special_tokens = skip_special_tokens
262
+
263
+ def encode(self, text, **kwargs):
264
+ tokens = self.tokenizer([text], return_tensors="pt")
265
+ tokens = tokens["input_ids"][0].cpu().tolist()
266
+ return tokens
267
+
268
+ def decode(self, tokens):
269
+ tokens = torch.tensor(tokens, dtype=torch.int64)
270
+ text = self.tokenizer.batch_decode([tokens], skip_special_tokens=self.skip_special_tokens)[0]
271
+ return text
272
+
273
+
274
+ @lru_cache(maxsize=None)
275
+ def get_qwen_tokenizer(
276
+ token_path: str,
277
+ skip_special_tokens: bool
278
+ ) -> QwenTokenizer:
279
+ return QwenTokenizer(token_path=token_path, skip_special_tokens=skip_special_tokens)
third_party/cosyvoice/transformer/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (171 Bytes). View file
 
third_party/cosyvoice/transformer/__pycache__/activation.cpython-311.pyc ADDED
Binary file (3.56 kB). View file
 
third_party/cosyvoice/transformer/__pycache__/attention.cpython-311.pyc ADDED
Binary file (15.6 kB). View file
 
third_party/cosyvoice/transformer/__pycache__/convolution.cpython-311.pyc ADDED
Binary file (5.52 kB). View file
 
third_party/cosyvoice/transformer/__pycache__/embedding.cpython-311.pyc ADDED
Binary file (17.3 kB). View file
 
third_party/cosyvoice/transformer/__pycache__/encoder_layer.cpython-311.pyc ADDED
Binary file (11.2 kB). View file
 
third_party/cosyvoice/transformer/__pycache__/label_smoothing_loss.cpython-311.pyc ADDED
Binary file (4.36 kB). View file
 
third_party/cosyvoice/transformer/__pycache__/positionwise_feed_forward.cpython-311.pyc ADDED
Binary file (6.22 kB). View file
 
third_party/cosyvoice/transformer/__pycache__/subsampling.cpython-311.pyc ADDED
Binary file (18.9 kB). View file
 
third_party/cosyvoice/transformer/__pycache__/upsample_encoder.cpython-311.pyc ADDED
Binary file (15.6 kB). View file
 
third_party/cosyvoice/transformer/attention.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ # 2022 Xingchen Song ([email protected])
4
+ # 2024 Alibaba Inc (Xiang Lyu)
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """Multi-Head Attention layer definition."""
18
+
19
+ import math
20
+ from typing import Tuple
21
+
22
+ import torch
23
+ from torch import nn
24
+
25
+
26
+ class MultiHeadedAttention(nn.Module):
27
+ """Multi-Head Attention layer.
28
+
29
+ Args:
30
+ n_head (int): The number of heads.
31
+ n_feat (int): The number of features.
32
+ dropout_rate (float): Dropout rate.
33
+
34
+ """
35
+
36
+ def __init__(self,
37
+ n_head: int,
38
+ n_feat: int,
39
+ dropout_rate: float,
40
+ key_bias: bool = True):
41
+ """Construct an MultiHeadedAttention object."""
42
+ super().__init__()
43
+ assert n_feat % n_head == 0
44
+ # We assume d_v always equals d_k
45
+ self.d_k = n_feat // n_head
46
+ self.h = n_head
47
+ self.linear_q = nn.Linear(n_feat, n_feat)
48
+ self.linear_k = nn.Linear(n_feat, n_feat, bias=key_bias)
49
+ self.linear_v = nn.Linear(n_feat, n_feat)
50
+ self.linear_out = nn.Linear(n_feat, n_feat)
51
+ self.dropout = nn.Dropout(p=dropout_rate)
52
+
53
+ def forward_qkv(
54
+ self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
55
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
56
+ """Transform query, key and value.
57
+
58
+ Args:
59
+ query (torch.Tensor): Query tensor (#batch, time1, size).
60
+ key (torch.Tensor): Key tensor (#batch, time2, size).
61
+ value (torch.Tensor): Value tensor (#batch, time2, size).
62
+
63
+ Returns:
64
+ torch.Tensor: Transformed query tensor, size
65
+ (#batch, n_head, time1, d_k).
66
+ torch.Tensor: Transformed key tensor, size
67
+ (#batch, n_head, time2, d_k).
68
+ torch.Tensor: Transformed value tensor, size
69
+ (#batch, n_head, time2, d_k).
70
+
71
+ """
72
+ n_batch = query.size(0)
73
+ q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
74
+ k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
75
+ v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
76
+ q = q.transpose(1, 2) # (batch, head, time1, d_k)
77
+ k = k.transpose(1, 2) # (batch, head, time2, d_k)
78
+ v = v.transpose(1, 2) # (batch, head, time2, d_k)
79
+
80
+ return q, k, v
81
+
82
+ def forward_attention(
83
+ self,
84
+ value: torch.Tensor,
85
+ scores: torch.Tensor,
86
+ mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool)
87
+ ) -> torch.Tensor:
88
+ """Compute attention context vector.
89
+
90
+ Args:
91
+ value (torch.Tensor): Transformed value, size
92
+ (#batch, n_head, time2, d_k).
93
+ scores (torch.Tensor): Attention score, size
94
+ (#batch, n_head, time1, time2).
95
+ mask (torch.Tensor): Mask, size (#batch, 1, time2) or
96
+ (#batch, time1, time2), (0, 0, 0) means fake mask.
97
+
98
+ Returns:
99
+ torch.Tensor: Transformed value (#batch, time1, d_model)
100
+ weighted by the attention score (#batch, time1, time2).
101
+
102
+ """
103
+ n_batch = value.size(0)
104
+ # NOTE(xcsong): When will `if mask.size(2) > 0` be True?
105
+ # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the
106
+ # 1st chunk to ease the onnx export.]
107
+ # 2. pytorch training
108
+ if mask.size(2) > 0: # time2 > 0
109
+ mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
110
+ # For last chunk, time2 might be larger than scores.size(-1)
111
+ mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2)
112
+ scores = scores.masked_fill(mask, -float('inf'))
113
+ attn = torch.softmax(scores, dim=-1).masked_fill(
114
+ mask, 0.0) # (batch, head, time1, time2)
115
+ # NOTE(xcsong): When will `if mask.size(2) > 0` be False?
116
+ # 1. onnx(16/-1, -1/-1, 16/0)
117
+ # 2. jit (16/-1, -1/-1, 16/0, 16/4)
118
+ else:
119
+ attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
120
+
121
+ p_attn = self.dropout(attn)
122
+ x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
123
+ x = (x.transpose(1, 2).contiguous().view(n_batch, -1,
124
+ self.h * self.d_k)
125
+ ) # (batch, time1, d_model)
126
+
127
+ return self.linear_out(x) # (batch, time1, d_model)
128
+
129
+ def forward(
130
+ self,
131
+ query: torch.Tensor,
132
+ key: torch.Tensor,
133
+ value: torch.Tensor,
134
+ mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
135
+ pos_emb: torch.Tensor = torch.empty(0),
136
+ cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
137
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
138
+ """Compute scaled dot product attention.
139
+
140
+ Args:
141
+ query (torch.Tensor): Query tensor (#batch, time1, size).
142
+ key (torch.Tensor): Key tensor (#batch, time2, size).
143
+ value (torch.Tensor): Value tensor (#batch, time2, size).
144
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
145
+ (#batch, time1, time2).
146
+ 1.When applying cross attention between decoder and encoder,
147
+ the batch padding mask for input is in (#batch, 1, T) shape.
148
+ 2.When applying self attention of encoder,
149
+ the mask is in (#batch, T, T) shape.
150
+ 3.When applying self attention of decoder,
151
+ the mask is in (#batch, L, L) shape.
152
+ 4.If the different position in decoder see different block
153
+ of the encoder, such as Mocha, the passed in mask could be
154
+ in (#batch, L, T) shape. But there is no such case in current
155
+ CosyVoice.
156
+ cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
157
+ where `cache_t == chunk_size * num_decoding_left_chunks`
158
+ and `head * d_k == size`
159
+
160
+
161
+ Returns:
162
+ torch.Tensor: Output tensor (#batch, time1, d_model).
163
+ torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
164
+ where `cache_t == chunk_size * num_decoding_left_chunks`
165
+ and `head * d_k == size`
166
+
167
+ """
168
+ q, k, v = self.forward_qkv(query, key, value)
169
+
170
+ # NOTE(xcsong):
171
+ # when export onnx model, for 1st chunk, we feed
172
+ # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
173
+ # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
174
+ # In all modes, `if cache.size(0) > 0` will alwayse be `True`
175
+ # and we will always do splitting and
176
+ # concatnation(this will simplify onnx export). Note that
177
+ # it's OK to concat & split zero-shaped tensors(see code below).
178
+ # when export jit model, for 1st chunk, we always feed
179
+ # cache(0, 0, 0, 0) since jit supports dynamic if-branch.
180
+ # >>> a = torch.ones((1, 2, 0, 4))
181
+ # >>> b = torch.ones((1, 2, 3, 4))
182
+ # >>> c = torch.cat((a, b), dim=2)
183
+ # >>> torch.equal(b, c) # True
184
+ # >>> d = torch.split(a, 2, dim=-1)
185
+ # >>> torch.equal(d[0], d[1]) # True
186
+ if cache.size(0) > 0:
187
+ key_cache, value_cache = torch.split(cache,
188
+ cache.size(-1) // 2,
189
+ dim=-1)
190
+ k = torch.cat([key_cache, k], dim=2)
191
+ v = torch.cat([value_cache, v], dim=2)
192
+ # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
193
+ # non-trivial to calculate `next_cache_start` here.
194
+ new_cache = torch.cat((k, v), dim=-1)
195
+
196
+ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
197
+ return self.forward_attention(v, scores, mask), new_cache
198
+
199
+
200
+ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
201
+ """Multi-Head Attention layer with relative position encoding.
202
+ Paper: https://arxiv.org/abs/1901.02860
203
+ Args:
204
+ n_head (int): The number of heads.
205
+ n_feat (int): The number of features.
206
+ dropout_rate (float): Dropout rate.
207
+ """
208
+
209
+ def __init__(self,
210
+ n_head: int,
211
+ n_feat: int,
212
+ dropout_rate: float,
213
+ key_bias: bool = True):
214
+ """Construct an RelPositionMultiHeadedAttention object."""
215
+ super().__init__(n_head, n_feat, dropout_rate, key_bias)
216
+ # linear transformation for positional encoding
217
+ self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
218
+ # these two learnable bias are used in matrix c and matrix d
219
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
220
+ self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
221
+ self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
222
+ torch.nn.init.xavier_uniform_(self.pos_bias_u)
223
+ torch.nn.init.xavier_uniform_(self.pos_bias_v)
224
+
225
+ def rel_shift(self, x: torch.Tensor) -> torch.Tensor:
226
+ """Compute relative positional encoding.
227
+
228
+ Args:
229
+ x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
230
+ time1 means the length of query vector.
231
+
232
+ Returns:
233
+ torch.Tensor: Output tensor.
234
+
235
+ """
236
+ zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
237
+ device=x.device,
238
+ dtype=x.dtype)
239
+ x_padded = torch.cat([zero_pad, x], dim=-1)
240
+
241
+ x_padded = x_padded.view(x.size()[0],
242
+ x.size()[1],
243
+ x.size(3) + 1, x.size(2))
244
+ x = x_padded[:, :, 1:].view_as(x)[
245
+ :, :, :, : x.size(-1) // 2 + 1
246
+ ] # only keep the positions from 0 to time2
247
+ return x
248
+
249
+ def forward(
250
+ self,
251
+ query: torch.Tensor,
252
+ key: torch.Tensor,
253
+ value: torch.Tensor,
254
+ mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
255
+ pos_emb: torch.Tensor = torch.empty(0),
256
+ cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
257
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
258
+ """Compute 'Scaled Dot Product Attention' with rel. positional encoding.
259
+ Args:
260
+ query (torch.Tensor): Query tensor (#batch, time1, size).
261
+ key (torch.Tensor): Key tensor (#batch, time2, size).
262
+ value (torch.Tensor): Value tensor (#batch, time2, size).
263
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
264
+ (#batch, time1, time2), (0, 0, 0) means fake mask.
265
+ pos_emb (torch.Tensor): Positional embedding tensor
266
+ (#batch, time2, size).
267
+ cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
268
+ where `cache_t == chunk_size * num_decoding_left_chunks`
269
+ and `head * d_k == size`
270
+ Returns:
271
+ torch.Tensor: Output tensor (#batch, time1, d_model).
272
+ torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
273
+ where `cache_t == chunk_size * num_decoding_left_chunks`
274
+ and `head * d_k == size`
275
+ """
276
+ q, k, v = self.forward_qkv(query, key, value)
277
+ q = q.transpose(1, 2) # (batch, time1, head, d_k)
278
+
279
+ # NOTE(xcsong):
280
+ # when export onnx model, for 1st chunk, we feed
281
+ # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
282
+ # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
283
+ # In all modes, `if cache.size(0) > 0` will alwayse be `True`
284
+ # and we will always do splitting and
285
+ # concatnation(this will simplify onnx export). Note that
286
+ # it's OK to concat & split zero-shaped tensors(see code below).
287
+ # when export jit model, for 1st chunk, we always feed
288
+ # cache(0, 0, 0, 0) since jit supports dynamic if-branch.
289
+ # >>> a = torch.ones((1, 2, 0, 4))
290
+ # >>> b = torch.ones((1, 2, 3, 4))
291
+ # >>> c = torch.cat((a, b), dim=2)
292
+ # >>> torch.equal(b, c) # True
293
+ # >>> d = torch.split(a, 2, dim=-1)
294
+ # >>> torch.equal(d[0], d[1]) # True
295
+ if cache.size(0) > 0:
296
+ key_cache, value_cache = torch.split(cache,
297
+ cache.size(-1) // 2,
298
+ dim=-1)
299
+ k = torch.cat([key_cache, k], dim=2)
300
+ v = torch.cat([value_cache, v], dim=2)
301
+ # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
302
+ # non-trivial to calculate `next_cache_start` here.
303
+ new_cache = torch.cat((k, v), dim=-1)
304
+
305
+ n_batch_pos = pos_emb.size(0)
306
+ p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
307
+ p = p.transpose(1, 2) # (batch, head, time1, d_k)
308
+
309
+ # (batch, head, time1, d_k)
310
+ q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
311
+ # (batch, head, time1, d_k)
312
+ q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
313
+
314
+ # compute attention score
315
+ # first compute matrix a and matrix c
316
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
317
+ # (batch, head, time1, time2)
318
+ matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
319
+
320
+ # compute matrix b and matrix d
321
+ # (batch, head, time1, time2)
322
+ matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
323
+ # NOTE(Xiang Lyu): Keep rel_shift since espnet rel_pos_emb is used
324
+ if matrix_ac.shape != matrix_bd.shape:
325
+ matrix_bd = self.rel_shift(matrix_bd)
326
+
327
+ scores = (matrix_ac + matrix_bd) / math.sqrt(
328
+ self.d_k) # (batch, head, time1, time2)
329
+
330
+ return self.forward_attention(v, scores, mask), new_cache
third_party/cosyvoice/transformer/decoder.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
2
+ # 2024 Alibaba Inc (Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """Decoder definition."""
17
+ from typing import Tuple, List, Optional
18
+
19
+ import torch
20
+ import torch.utils.checkpoint as ckpt
21
+ import logging
22
+
23
+ from cosyvoice.transformer.decoder_layer import DecoderLayer
24
+ from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
25
+ from cosyvoice.utils.class_utils import (
26
+ COSYVOICE_EMB_CLASSES,
27
+ COSYVOICE_ATTENTION_CLASSES,
28
+ COSYVOICE_ACTIVATION_CLASSES,
29
+ )
30
+ from cosyvoice.utils.mask import (subsequent_mask, make_pad_mask)
31
+
32
+
33
+ class TransformerDecoder(torch.nn.Module):
34
+ """Base class of Transfomer decoder module.
35
+ Args:
36
+ vocab_size: output dim
37
+ encoder_output_size: dimension of attention
38
+ attention_heads: the number of heads of multi head attention
39
+ linear_units: the hidden units number of position-wise feedforward
40
+ num_blocks: the number of decoder blocks
41
+ dropout_rate: dropout rate
42
+ self_attention_dropout_rate: dropout rate for attention
43
+ input_layer: input layer type
44
+ use_output_layer: whether to use output layer
45
+ pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
46
+ normalize_before:
47
+ True: use layer_norm before each sub-block of a layer.
48
+ False: use layer_norm after each sub-block of a layer.
49
+ src_attention: if false, encoder-decoder cross attention is not
50
+ applied, such as CIF model
51
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
52
+ gradient_checkpointing: rerunning a forward-pass segment for each
53
+ checkpointed segment during backward.
54
+ tie_word_embedding: Tie or clone module weights depending of whether we are
55
+ using TorchScript or not
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ vocab_size: int,
61
+ encoder_output_size: int,
62
+ attention_heads: int = 4,
63
+ linear_units: int = 2048,
64
+ num_blocks: int = 6,
65
+ dropout_rate: float = 0.1,
66
+ positional_dropout_rate: float = 0.1,
67
+ self_attention_dropout_rate: float = 0.0,
68
+ src_attention_dropout_rate: float = 0.0,
69
+ input_layer: str = "embed",
70
+ use_output_layer: bool = True,
71
+ normalize_before: bool = True,
72
+ src_attention: bool = True,
73
+ key_bias: bool = True,
74
+ activation_type: str = "relu",
75
+ gradient_checkpointing: bool = False,
76
+ tie_word_embedding: bool = False,
77
+ ):
78
+ super().__init__()
79
+ attention_dim = encoder_output_size
80
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
81
+
82
+ self.embed = torch.nn.Sequential(
83
+ torch.nn.Identity() if input_layer == "no_pos" else
84
+ torch.nn.Embedding(vocab_size, attention_dim),
85
+ COSYVOICE_EMB_CLASSES[input_layer](attention_dim,
86
+ positional_dropout_rate),
87
+ )
88
+
89
+ self.normalize_before = normalize_before
90
+ self.after_norm = torch.nn.LayerNorm(attention_dim, eps=1e-5)
91
+ self.use_output_layer = use_output_layer
92
+ if use_output_layer:
93
+ self.output_layer = torch.nn.Linear(attention_dim, vocab_size)
94
+ else:
95
+ self.output_layer = torch.nn.Identity()
96
+ self.num_blocks = num_blocks
97
+ self.decoders = torch.nn.ModuleList([
98
+ DecoderLayer(
99
+ attention_dim,
100
+ COSYVOICE_ATTENTION_CLASSES["selfattn"](
101
+ attention_heads, attention_dim,
102
+ self_attention_dropout_rate, key_bias),
103
+ COSYVOICE_ATTENTION_CLASSES["selfattn"](
104
+ attention_heads, attention_dim, src_attention_dropout_rate,
105
+ key_bias) if src_attention else None,
106
+ PositionwiseFeedForward(attention_dim, linear_units,
107
+ dropout_rate, activation),
108
+ dropout_rate,
109
+ normalize_before,
110
+ ) for _ in range(self.num_blocks)
111
+ ])
112
+
113
+ self.gradient_checkpointing = gradient_checkpointing
114
+ self.tie_word_embedding = tie_word_embedding
115
+
116
+ def forward(
117
+ self,
118
+ memory: torch.Tensor,
119
+ memory_mask: torch.Tensor,
120
+ ys_in_pad: torch.Tensor,
121
+ ys_in_lens: torch.Tensor,
122
+ r_ys_in_pad: torch.Tensor = torch.empty(0),
123
+ reverse_weight: float = 0.0,
124
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
125
+ """Forward decoder.
126
+ Args:
127
+ memory: encoded memory, float32 (batch, maxlen_in, feat)
128
+ memory_mask: encoder memory mask, (batch, 1, maxlen_in)
129
+ ys_in_pad: padded input token ids, int64 (batch, maxlen_out)
130
+ ys_in_lens: input lengths of this batch (batch)
131
+ r_ys_in_pad: not used in transformer decoder, in order to unify api
132
+ with bidirectional decoder
133
+ reverse_weight: not used in transformer decoder, in order to unify
134
+ api with bidirectional decode
135
+ Returns:
136
+ (tuple): tuple containing:
137
+ x: decoded token score before softmax (batch, maxlen_out,
138
+ vocab_size) if use_output_layer is True,
139
+ torch.tensor(0.0), in order to unify api with bidirectional decoder
140
+ olens: (batch, )
141
+ NOTE(xcsong):
142
+ We pass the `__call__` method of the modules instead of `forward` to the
143
+ checkpointing API because `__call__` attaches all the hooks of the module.
144
+ https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
145
+ """
146
+ tgt = ys_in_pad
147
+ maxlen = tgt.size(1)
148
+ # tgt_mask: (B, 1, L)
149
+ tgt_mask = ~make_pad_mask(ys_in_lens, maxlen).unsqueeze(1)
150
+ tgt_mask = tgt_mask.to(tgt.device)
151
+ # m: (1, L, L)
152
+ m = subsequent_mask(tgt_mask.size(-1),
153
+ device=tgt_mask.device).unsqueeze(0)
154
+ # tgt_mask: (B, L, L)
155
+ tgt_mask = tgt_mask & m
156
+ x, _ = self.embed(tgt)
157
+ if self.gradient_checkpointing and self.training:
158
+ x = self.forward_layers_checkpointed(x, tgt_mask, memory,
159
+ memory_mask)
160
+ else:
161
+ x = self.forward_layers(x, tgt_mask, memory, memory_mask)
162
+ if self.normalize_before:
163
+ x = self.after_norm(x)
164
+ if self.use_output_layer:
165
+ x = self.output_layer(x)
166
+ olens = tgt_mask.sum(1)
167
+ return x, torch.tensor(0.0), olens
168
+
169
+ def forward_layers(self, x: torch.Tensor, tgt_mask: torch.Tensor,
170
+ memory: torch.Tensor,
171
+ memory_mask: torch.Tensor) -> torch.Tensor:
172
+ for layer in self.decoders:
173
+ x, tgt_mask, memory, memory_mask = layer(x, tgt_mask, memory,
174
+ memory_mask)
175
+ return x
176
+
177
+ @torch.jit.unused
178
+ def forward_layers_checkpointed(self, x: torch.Tensor,
179
+ tgt_mask: torch.Tensor,
180
+ memory: torch.Tensor,
181
+ memory_mask: torch.Tensor) -> torch.Tensor:
182
+ for layer in self.decoders:
183
+ x, tgt_mask, memory, memory_mask = ckpt.checkpoint(
184
+ layer.__call__, x, tgt_mask, memory, memory_mask)
185
+ return x
186
+
187
+ def forward_one_step(
188
+ self,
189
+ memory: torch.Tensor,
190
+ memory_mask: torch.Tensor,
191
+ tgt: torch.Tensor,
192
+ tgt_mask: torch.Tensor,
193
+ cache: Optional[List[torch.Tensor]] = None,
194
+ ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
195
+ """Forward one step.
196
+ This is only used for decoding.
197
+ Args:
198
+ memory: encoded memory, float32 (batch, maxlen_in, feat)
199
+ memory_mask: encoded memory mask, (batch, 1, maxlen_in)
200
+ tgt: input token ids, int64 (batch, maxlen_out)
201
+ tgt_mask: input token mask, (batch, maxlen_out)
202
+ dtype=torch.uint8 in PyTorch 1.2-
203
+ dtype=torch.bool in PyTorch 1.2+ (include 1.2)
204
+ cache: cached output list of (batch, max_time_out-1, size)
205
+ Returns:
206
+ y, cache: NN output value and cache per `self.decoders`.
207
+ y.shape` is (batch, maxlen_out, token)
208
+ """
209
+ x, _ = self.embed(tgt)
210
+ new_cache = []
211
+ for i, decoder in enumerate(self.decoders):
212
+ if cache is None:
213
+ c = None
214
+ else:
215
+ c = cache[i]
216
+ x, tgt_mask, memory, memory_mask = decoder(x,
217
+ tgt_mask,
218
+ memory,
219
+ memory_mask,
220
+ cache=c)
221
+ new_cache.append(x)
222
+ if self.normalize_before:
223
+ y = self.after_norm(x[:, -1])
224
+ else:
225
+ y = x[:, -1]
226
+ if self.use_output_layer:
227
+ y = torch.log_softmax(self.output_layer(y), dim=-1)
228
+ return y, new_cache
229
+
230
+ def tie_or_clone_weights(self, jit_mode: bool = True):
231
+ """Tie or clone module weights (between word_emb and output_layer)
232
+ depending of whether we are using TorchScript or not"""
233
+ if not self.use_output_layer:
234
+ return
235
+ if jit_mode:
236
+ logging.info("clone emb.weight to output.weight")
237
+ self.output_layer.weight = torch.nn.Parameter(
238
+ self.embed[0].weight.clone())
239
+ else:
240
+ logging.info("tie emb.weight with output.weight")
241
+ self.output_layer.weight = self.embed[0].weight
242
+
243
+ if getattr(self.output_layer, "bias", None) is not None:
244
+ self.output_layer.bias.data = torch.nn.functional.pad(
245
+ self.output_layer.bias.data,
246
+ (
247
+ 0,
248
+ self.output_layer.weight.shape[0] -
249
+ self.output_layer.bias.shape[0],
250
+ ),
251
+ "constant",
252
+ 0,
253
+ )
254
+
255
+
256
+ class BiTransformerDecoder(torch.nn.Module):
257
+ """Base class of Transfomer decoder module.
258
+ Args:
259
+ vocab_size: output dim
260
+ encoder_output_size: dimension of attention
261
+ attention_heads: the number of heads of multi head attention
262
+ linear_units: the hidden units number of position-wise feedforward
263
+ num_blocks: the number of decoder blocks
264
+ r_num_blocks: the number of right to left decoder blocks
265
+ dropout_rate: dropout rate
266
+ self_attention_dropout_rate: dropout rate for attention
267
+ input_layer: input layer type
268
+ use_output_layer: whether to use output layer
269
+ pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
270
+ normalize_before:
271
+ True: use layer_norm before each sub-block of a layer.
272
+ False: use layer_norm after each sub-block of a layer.
273
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
274
+ """
275
+
276
+ def __init__(
277
+ self,
278
+ vocab_size: int,
279
+ encoder_output_size: int,
280
+ attention_heads: int = 4,
281
+ linear_units: int = 2048,
282
+ num_blocks: int = 6,
283
+ r_num_blocks: int = 0,
284
+ dropout_rate: float = 0.1,
285
+ positional_dropout_rate: float = 0.1,
286
+ self_attention_dropout_rate: float = 0.0,
287
+ src_attention_dropout_rate: float = 0.0,
288
+ input_layer: str = "embed",
289
+ use_output_layer: bool = True,
290
+ normalize_before: bool = True,
291
+ key_bias: bool = True,
292
+ gradient_checkpointing: bool = False,
293
+ tie_word_embedding: bool = False,
294
+ ):
295
+
296
+ super().__init__()
297
+ self.tie_word_embedding = tie_word_embedding
298
+ self.left_decoder = TransformerDecoder(
299
+ vocab_size,
300
+ encoder_output_size,
301
+ attention_heads,
302
+ linear_units,
303
+ num_blocks,
304
+ dropout_rate,
305
+ positional_dropout_rate,
306
+ self_attention_dropout_rate,
307
+ src_attention_dropout_rate,
308
+ input_layer,
309
+ use_output_layer,
310
+ normalize_before,
311
+ key_bias=key_bias,
312
+ gradient_checkpointing=gradient_checkpointing,
313
+ tie_word_embedding=tie_word_embedding)
314
+
315
+ self.right_decoder = TransformerDecoder(
316
+ vocab_size,
317
+ encoder_output_size,
318
+ attention_heads,
319
+ linear_units,
320
+ r_num_blocks,
321
+ dropout_rate,
322
+ positional_dropout_rate,
323
+ self_attention_dropout_rate,
324
+ src_attention_dropout_rate,
325
+ input_layer,
326
+ use_output_layer,
327
+ normalize_before,
328
+ key_bias=key_bias,
329
+ gradient_checkpointing=gradient_checkpointing,
330
+ tie_word_embedding=tie_word_embedding)
331
+
332
+ def forward(
333
+ self,
334
+ memory: torch.Tensor,
335
+ memory_mask: torch.Tensor,
336
+ ys_in_pad: torch.Tensor,
337
+ ys_in_lens: torch.Tensor,
338
+ r_ys_in_pad: torch.Tensor,
339
+ reverse_weight: float = 0.0,
340
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
341
+ """Forward decoder.
342
+ Args:
343
+ memory: encoded memory, float32 (batch, maxlen_in, feat)
344
+ memory_mask: encoder memory mask, (batch, 1, maxlen_in)
345
+ ys_in_pad: padded input token ids, int64 (batch, maxlen_out)
346
+ ys_in_lens: input lengths of this batch (batch)
347
+ r_ys_in_pad: padded input token ids, int64 (batch, maxlen_out),
348
+ used for right to left decoder
349
+ reverse_weight: used for right to left decoder
350
+ Returns:
351
+ (tuple): tuple containing:
352
+ x: decoded token score before softmax (batch, maxlen_out,
353
+ vocab_size) if use_output_layer is True,
354
+ r_x: x: decoded token score (right to left decoder)
355
+ before softmax (batch, maxlen_out, vocab_size)
356
+ if use_output_layer is True,
357
+ olens: (batch, )
358
+ """
359
+ l_x, _, olens = self.left_decoder(memory, memory_mask, ys_in_pad,
360
+ ys_in_lens)
361
+ r_x = torch.tensor(0.0)
362
+ if reverse_weight > 0.0:
363
+ r_x, _, olens = self.right_decoder(memory, memory_mask,
364
+ r_ys_in_pad, ys_in_lens)
365
+ return l_x, r_x, olens
366
+
367
+ def forward_one_step(
368
+ self,
369
+ memory: torch.Tensor,
370
+ memory_mask: torch.Tensor,
371
+ tgt: torch.Tensor,
372
+ tgt_mask: torch.Tensor,
373
+ cache: Optional[List[torch.Tensor]] = None,
374
+ ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
375
+ """Forward one step.
376
+ This is only used for decoding.
377
+ Args:
378
+ memory: encoded memory, float32 (batch, maxlen_in, feat)
379
+ memory_mask: encoded memory mask, (batch, 1, maxlen_in)
380
+ tgt: input token ids, int64 (batch, maxlen_out)
381
+ tgt_mask: input token mask, (batch, maxlen_out)
382
+ dtype=torch.uint8 in PyTorch 1.2-
383
+ dtype=torch.bool in PyTorch 1.2+ (include 1.2)
384
+ cache: cached output list of (batch, max_time_out-1, size)
385
+ Returns:
386
+ y, cache: NN output value and cache per `self.decoders`.
387
+ y.shape` is (batch, maxlen_out, token)
388
+ """
389
+ return self.left_decoder.forward_one_step(memory, memory_mask, tgt,
390
+ tgt_mask, cache)
391
+
392
+ def tie_or_clone_weights(self, jit_mode: bool = True):
393
+ """Tie or clone module weights (between word_emb and output_layer)
394
+ depending of whether we are using TorchScript or not"""
395
+ self.left_decoder.tie_or_clone_weights(jit_mode)
396
+ self.right_decoder.tie_or_clone_weights(jit_mode)
third_party/cosyvoice/transformer/decoder_layer.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Decoder self-attention layer definition."""
16
+ from typing import Optional, Tuple
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+
22
+ class DecoderLayer(nn.Module):
23
+ """Single decoder layer module.
24
+
25
+ Args:
26
+ size (int): Input dimension.
27
+ self_attn (torch.nn.Module): Self-attention module instance.
28
+ `MultiHeadedAttention` instance can be used as the argument.
29
+ src_attn (torch.nn.Module): Inter-attention module instance.
30
+ `MultiHeadedAttention` instance can be used as the argument.
31
+ If `None` is passed, Inter-attention is not used, such as
32
+ CIF, GPT, and other decoder only model.
33
+ feed_forward (torch.nn.Module): Feed-forward module instance.
34
+ `PositionwiseFeedForward` instance can be used as the argument.
35
+ dropout_rate (float): Dropout rate.
36
+ normalize_before (bool):
37
+ True: use layer_norm before each sub-block.
38
+ False: to use layer_norm after each sub-block.
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ size: int,
44
+ self_attn: nn.Module,
45
+ src_attn: Optional[nn.Module],
46
+ feed_forward: nn.Module,
47
+ dropout_rate: float,
48
+ normalize_before: bool = True,
49
+ ):
50
+ """Construct an DecoderLayer object."""
51
+ super().__init__()
52
+ self.size = size
53
+ self.self_attn = self_attn
54
+ self.src_attn = src_attn
55
+ self.feed_forward = feed_forward
56
+ self.norm1 = nn.LayerNorm(size, eps=1e-5)
57
+ self.norm2 = nn.LayerNorm(size, eps=1e-5)
58
+ self.norm3 = nn.LayerNorm(size, eps=1e-5)
59
+ self.dropout = nn.Dropout(dropout_rate)
60
+ self.normalize_before = normalize_before
61
+
62
+ def forward(
63
+ self,
64
+ tgt: torch.Tensor,
65
+ tgt_mask: torch.Tensor,
66
+ memory: torch.Tensor,
67
+ memory_mask: torch.Tensor,
68
+ cache: Optional[torch.Tensor] = None
69
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
70
+ """Compute decoded features.
71
+
72
+ Args:
73
+ tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
74
+ tgt_mask (torch.Tensor): Mask for input tensor
75
+ (#batch, maxlen_out).
76
+ memory (torch.Tensor): Encoded memory
77
+ (#batch, maxlen_in, size).
78
+ memory_mask (torch.Tensor): Encoded memory mask
79
+ (#batch, maxlen_in).
80
+ cache (torch.Tensor): cached tensors.
81
+ (#batch, maxlen_out - 1, size).
82
+
83
+ Returns:
84
+ torch.Tensor: Output tensor (#batch, maxlen_out, size).
85
+ torch.Tensor: Mask for output tensor (#batch, maxlen_out).
86
+ torch.Tensor: Encoded memory (#batch, maxlen_in, size).
87
+ torch.Tensor: Encoded memory mask (#batch, maxlen_in).
88
+
89
+ """
90
+ residual = tgt
91
+ if self.normalize_before:
92
+ tgt = self.norm1(tgt)
93
+
94
+ if cache is None:
95
+ tgt_q = tgt
96
+ tgt_q_mask = tgt_mask
97
+ else:
98
+ # compute only the last frame query keeping dim: max_time_out -> 1
99
+ assert cache.shape == (
100
+ tgt.shape[0],
101
+ tgt.shape[1] - 1,
102
+ self.size,
103
+ ), "{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
104
+ tgt_q = tgt[:, -1:, :]
105
+ residual = residual[:, -1:, :]
106
+ tgt_q_mask = tgt_mask[:, -1:, :]
107
+
108
+ x = residual + self.dropout(
109
+ self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0])
110
+ if not self.normalize_before:
111
+ x = self.norm1(x)
112
+
113
+ if self.src_attn is not None:
114
+ residual = x
115
+ if self.normalize_before:
116
+ x = self.norm2(x)
117
+ x = residual + self.dropout(
118
+ self.src_attn(x, memory, memory, memory_mask)[0])
119
+ if not self.normalize_before:
120
+ x = self.norm2(x)
121
+
122
+ residual = x
123
+ if self.normalize_before:
124
+ x = self.norm3(x)
125
+ x = residual + self.dropout(self.feed_forward(x))
126
+ if not self.normalize_before:
127
+ x = self.norm3(x)
128
+
129
+ if cache is not None:
130
+ x = torch.cat([cache, x], dim=1)
131
+
132
+ return x, tgt_mask, memory, memory_mask
third_party/cosyvoice/transformer/embedding.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
2
+ # 2024 Alibaba Inc (Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """Positonal Encoding Module."""
17
+
18
+ import math
19
+ from typing import Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn.functional as F
23
+ import numpy as np
24
+
25
+
26
+ class PositionalEncoding(torch.nn.Module):
27
+ """Positional encoding.
28
+
29
+ :param int d_model: embedding dim
30
+ :param float dropout_rate: dropout rate
31
+ :param int max_len: maximum input length
32
+
33
+ PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
34
+ PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
35
+ """
36
+
37
+ def __init__(self,
38
+ d_model: int,
39
+ dropout_rate: float,
40
+ max_len: int = 5000,
41
+ reverse: bool = False):
42
+ """Construct an PositionalEncoding object."""
43
+ super().__init__()
44
+ self.d_model = d_model
45
+ self.xscale = math.sqrt(self.d_model)
46
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
47
+ self.max_len = max_len
48
+
49
+ self.pe = torch.zeros(self.max_len, self.d_model)
50
+ position = torch.arange(0, self.max_len,
51
+ dtype=torch.float32).unsqueeze(1)
52
+ div_term = torch.exp(
53
+ torch.arange(0, self.d_model, 2, dtype=torch.float32) *
54
+ -(math.log(10000.0) / self.d_model))
55
+ self.pe[:, 0::2] = torch.sin(position * div_term)
56
+ self.pe[:, 1::2] = torch.cos(position * div_term)
57
+ self.pe = self.pe.unsqueeze(0)
58
+
59
+ def forward(self,
60
+ x: torch.Tensor,
61
+ offset: Union[int, torch.Tensor] = 0) \
62
+ -> Tuple[torch.Tensor, torch.Tensor]:
63
+ """Add positional encoding.
64
+
65
+ Args:
66
+ x (torch.Tensor): Input. Its shape is (batch, time, ...)
67
+ offset (int, torch.tensor): position offset
68
+
69
+ Returns:
70
+ torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
71
+ torch.Tensor: for compatibility to RelPositionalEncoding
72
+ """
73
+
74
+ self.pe = self.pe.to(x.device)
75
+ pos_emb = self.position_encoding(offset, x.size(1), False)
76
+ x = x * self.xscale + pos_emb
77
+ return self.dropout(x), self.dropout(pos_emb)
78
+
79
+ def position_encoding(self,
80
+ offset: Union[int, torch.Tensor],
81
+ size: int,
82
+ apply_dropout: bool = True) -> torch.Tensor:
83
+ """ For getting encoding in a streaming fashion
84
+
85
+ Attention!!!!!
86
+ we apply dropout only once at the whole utterance level in a none
87
+ streaming way, but will call this function several times with
88
+ increasing input size in a streaming scenario, so the dropout will
89
+ be applied several times.
90
+
91
+ Args:
92
+ offset (int or torch.tensor): start offset
93
+ size (int): required size of position encoding
94
+
95
+ Returns:
96
+ torch.Tensor: Corresponding encoding
97
+ """
98
+ # How to subscript a Union type:
99
+ # https://github.com/pytorch/pytorch/issues/69434
100
+ if isinstance(offset, int):
101
+ assert offset + size <= self.max_len
102
+ pos_emb = self.pe[:, offset:offset + size]
103
+ elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar
104
+ assert offset + size <= self.max_len
105
+ pos_emb = self.pe[:, offset:offset + size]
106
+ else: # for batched streaming decoding on GPU
107
+ assert torch.max(offset) + size <= self.max_len
108
+ index = offset.unsqueeze(1) + \
109
+ torch.arange(0, size).to(offset.device) # B X T
110
+ flag = index > 0
111
+ # remove negative offset
112
+ index = index * flag
113
+ pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model
114
+
115
+ if apply_dropout:
116
+ pos_emb = self.dropout(pos_emb)
117
+ return pos_emb
118
+
119
+
120
+ class RelPositionalEncoding(PositionalEncoding):
121
+ """Relative positional encoding module.
122
+ See : Appendix B in https://arxiv.org/abs/1901.02860
123
+ Args:
124
+ d_model (int): Embedding dimension.
125
+ dropout_rate (float): Dropout rate.
126
+ max_len (int): Maximum input length.
127
+ """
128
+
129
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
130
+ """Initialize class."""
131
+ super().__init__(d_model, dropout_rate, max_len, reverse=True)
132
+
133
+ def forward(self,
134
+ x: torch.Tensor,
135
+ offset: Union[int, torch.Tensor] = 0) \
136
+ -> Tuple[torch.Tensor, torch.Tensor]:
137
+ """Compute positional encoding.
138
+ Args:
139
+ x (torch.Tensor): Input tensor (batch, time, `*`).
140
+ Returns:
141
+ torch.Tensor: Encoded tensor (batch, time, `*`).
142
+ torch.Tensor: Positional embedding tensor (1, time, `*`).
143
+ """
144
+ self.pe = self.pe.to(x.device)
145
+ x = x * self.xscale
146
+ pos_emb = self.position_encoding(offset, x.size(1), False)
147
+ return self.dropout(x), self.dropout(pos_emb)
148
+
149
+
150
+ class WhisperPositionalEncoding(PositionalEncoding):
151
+ """ Sinusoids position encoding used in openai-whisper.encoder
152
+ """
153
+
154
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 1500):
155
+ super().__init__(d_model, dropout_rate, max_len)
156
+ self.xscale = 1.0
157
+ log_timescale_increment = np.log(10000) / (d_model // 2 - 1)
158
+ inv_timescales = torch.exp(-log_timescale_increment *
159
+ torch.arange(d_model // 2))
160
+ scaled_time = torch.arange(max_len)[:, np.newaxis] * \
161
+ inv_timescales[np.newaxis, :]
162
+ pe = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
163
+ delattr(self, "pe")
164
+ self.register_buffer("pe", pe.unsqueeze(0))
165
+
166
+
167
+ class LearnablePositionalEncoding(PositionalEncoding):
168
+ """ Learnable position encoding used in openai-whisper.decoder
169
+ """
170
+
171
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 448):
172
+ super().__init__(d_model, dropout_rate, max_len)
173
+ # NOTE(xcsong): overwrite self.pe & self.xscale
174
+ self.pe = torch.nn.Parameter(torch.empty(1, max_len, d_model))
175
+ self.xscale = 1.0
176
+
177
+
178
+ class NoPositionalEncoding(torch.nn.Module):
179
+ """ No position encoding
180
+ """
181
+
182
+ def __init__(self, d_model: int, dropout_rate: float):
183
+ super().__init__()
184
+ self.d_model = d_model
185
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
186
+
187
+ def forward(self,
188
+ x: torch.Tensor,
189
+ offset: Union[int, torch.Tensor] = 0) \
190
+ -> Tuple[torch.Tensor, torch.Tensor]:
191
+ """ Just return zero vector for interface compatibility
192
+ """
193
+ pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)
194
+ return self.dropout(x), pos_emb
195
+
196
+ def position_encoding(self, offset: Union[int, torch.Tensor],
197
+ size: int) -> torch.Tensor:
198
+ return torch.zeros(1, size, self.d_model)
199
+
200
+
201
+ class EspnetRelPositionalEncoding(torch.nn.Module):
202
+ """Relative positional encoding module (new implementation).
203
+
204
+ Details can be found in https://github.com/espnet/espnet/pull/2816.
205
+
206
+ See : Appendix B in https://arxiv.org/abs/1901.02860
207
+
208
+ Args:
209
+ d_model (int): Embedding dimension.
210
+ dropout_rate (float): Dropout rate.
211
+ max_len (int): Maximum input length.
212
+
213
+ """
214
+
215
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
216
+ """Construct an PositionalEncoding object."""
217
+ super(EspnetRelPositionalEncoding, self).__init__()
218
+ self.d_model = d_model
219
+ self.xscale = math.sqrt(self.d_model)
220
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
221
+ self.pe = None
222
+ self.extend_pe(torch.tensor(0.0).expand(1, max_len))
223
+
224
+ def extend_pe(self, x: torch.Tensor):
225
+ """Reset the positional encodings."""
226
+ if self.pe is not None:
227
+ # self.pe contains both positive and negative parts
228
+ # the length of self.pe is 2 * input_len - 1
229
+ if self.pe.size(1) >= x.size(1) * 2 - 1:
230
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
231
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
232
+ return
233
+ # Suppose `i` means to the position of query vecotr and `j` means the
234
+ # position of key vector. We use position relative positions when keys
235
+ # are to the left (i>j) and negative relative positions otherwise (i<j).
236
+ pe_positive = torch.zeros(x.size(1), self.d_model)
237
+ pe_negative = torch.zeros(x.size(1), self.d_model)
238
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
239
+ div_term = torch.exp(
240
+ torch.arange(0, self.d_model, 2, dtype=torch.float32)
241
+ * -(math.log(10000.0) / self.d_model)
242
+ )
243
+ pe_positive[:, 0::2] = torch.sin(position * div_term)
244
+ pe_positive[:, 1::2] = torch.cos(position * div_term)
245
+ pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
246
+ pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
247
+
248
+ # Reserve the order of positive indices and concat both positive and
249
+ # negative indices. This is used to support the shifting trick
250
+ # as in https://arxiv.org/abs/1901.02860
251
+ pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
252
+ pe_negative = pe_negative[1:].unsqueeze(0)
253
+ pe = torch.cat([pe_positive, pe_negative], dim=1)
254
+ self.pe = pe.to(device=x.device, dtype=x.dtype)
255
+
256
+ def forward(self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0) \
257
+ -> Tuple[torch.Tensor, torch.Tensor]:
258
+ """Add positional encoding.
259
+
260
+ Args:
261
+ x (torch.Tensor): Input tensor (batch, time, `*`).
262
+
263
+ Returns:
264
+ torch.Tensor: Encoded tensor (batch, time, `*`).
265
+
266
+ """
267
+ self.extend_pe(x)
268
+ x = x * self.xscale
269
+ pos_emb = self.position_encoding(size=x.size(1), offset=offset)
270
+ return self.dropout(x), self.dropout(pos_emb)
271
+
272
+ def position_encoding(self,
273
+ offset: Union[int, torch.Tensor],
274
+ size: int) -> torch.Tensor:
275
+ """ For getting encoding in a streaming fashion
276
+
277
+ Attention!!!!!
278
+ we apply dropout only once at the whole utterance level in a none
279
+ streaming way, but will call this function several times with
280
+ increasing input size in a streaming scenario, so the dropout will
281
+ be applied several times.
282
+
283
+ Args:
284
+ offset (int or torch.tensor): start offset
285
+ size (int): required size of position encoding
286
+
287
+ Returns:
288
+ torch.Tensor: Corresponding encoding
289
+ """
290
+ pos_emb = self.pe[
291
+ :,
292
+ self.pe.size(1) // 2 - size + 1: self.pe.size(1) // 2 + size,
293
+ ]
294
+ return pos_emb
third_party/cosyvoice/transformer/encoder.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
2
+ # 2022 Xingchen Song ([email protected])
3
+ # 2024 Alibaba Inc (Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Modified from ESPnet(https://github.com/espnet/espnet)
17
+ """Encoder definition."""
18
+ from typing import Tuple
19
+
20
+ import torch
21
+ import torch.utils.checkpoint as ckpt
22
+
23
+ from cosyvoice.transformer.convolution import ConvolutionModule
24
+ from cosyvoice.transformer.encoder_layer import TransformerEncoderLayer
25
+ from cosyvoice.transformer.encoder_layer import ConformerEncoderLayer
26
+ from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
27
+ from cosyvoice.utils.class_utils import (
28
+ COSYVOICE_EMB_CLASSES,
29
+ COSYVOICE_SUBSAMPLE_CLASSES,
30
+ COSYVOICE_ATTENTION_CLASSES,
31
+ COSYVOICE_ACTIVATION_CLASSES,
32
+ )
33
+ from cosyvoice.utils.mask import make_pad_mask
34
+ from cosyvoice.utils.mask import add_optional_chunk_mask
35
+
36
+
37
+ class BaseEncoder(torch.nn.Module):
38
+
39
+ def __init__(
40
+ self,
41
+ input_size: int,
42
+ output_size: int = 256,
43
+ attention_heads: int = 4,
44
+ linear_units: int = 2048,
45
+ num_blocks: int = 6,
46
+ dropout_rate: float = 0.1,
47
+ positional_dropout_rate: float = 0.1,
48
+ attention_dropout_rate: float = 0.0,
49
+ input_layer: str = "conv2d",
50
+ pos_enc_layer_type: str = "abs_pos",
51
+ normalize_before: bool = True,
52
+ static_chunk_size: int = 0,
53
+ use_dynamic_chunk: bool = False,
54
+ global_cmvn: torch.nn.Module = None,
55
+ use_dynamic_left_chunk: bool = False,
56
+ gradient_checkpointing: bool = False,
57
+ ):
58
+ """
59
+ Args:
60
+ input_size (int): input dim
61
+ output_size (int): dimension of attention
62
+ attention_heads (int): the number of heads of multi head attention
63
+ linear_units (int): the hidden units number of position-wise feed
64
+ forward
65
+ num_blocks (int): the number of decoder blocks
66
+ dropout_rate (float): dropout rate
67
+ attention_dropout_rate (float): dropout rate in attention
68
+ positional_dropout_rate (float): dropout rate after adding
69
+ positional encoding
70
+ input_layer (str): input layer type.
71
+ optional [linear, conv2d, conv2d6, conv2d8]
72
+ pos_enc_layer_type (str): Encoder positional encoding layer type.
73
+ opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
74
+ normalize_before (bool):
75
+ True: use layer_norm before each sub-block of a layer.
76
+ False: use layer_norm after each sub-block of a layer.
77
+ static_chunk_size (int): chunk size for static chunk training and
78
+ decoding
79
+ use_dynamic_chunk (bool): whether use dynamic chunk size for
80
+ training or not, You can only use fixed chunk(chunk_size > 0)
81
+ or dyanmic chunk size(use_dynamic_chunk = True)
82
+ global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
83
+ use_dynamic_left_chunk (bool): whether use dynamic left chunk in
84
+ dynamic chunk training
85
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
86
+ gradient_checkpointing: rerunning a forward-pass segment for each
87
+ checkpointed segment during backward.
88
+ """
89
+ super().__init__()
90
+ self._output_size = output_size
91
+
92
+ self.global_cmvn = global_cmvn
93
+ self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
94
+ input_size,
95
+ output_size,
96
+ dropout_rate,
97
+ COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
98
+ positional_dropout_rate),
99
+ )
100
+
101
+ self.normalize_before = normalize_before
102
+ self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
103
+ self.static_chunk_size = static_chunk_size
104
+ self.use_dynamic_chunk = use_dynamic_chunk
105
+ self.use_dynamic_left_chunk = use_dynamic_left_chunk
106
+ self.gradient_checkpointing = gradient_checkpointing
107
+
108
+ def output_size(self) -> int:
109
+ return self._output_size
110
+
111
+ def forward(
112
+ self,
113
+ xs: torch.Tensor,
114
+ xs_lens: torch.Tensor,
115
+ decoding_chunk_size: int = 0,
116
+ num_decoding_left_chunks: int = -1,
117
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
118
+ """Embed positions in tensor.
119
+
120
+ Args:
121
+ xs: padded input tensor (B, T, D)
122
+ xs_lens: input length (B)
123
+ decoding_chunk_size: decoding chunk size for dynamic chunk
124
+ 0: default for training, use random dynamic chunk.
125
+ <0: for decoding, use full chunk.
126
+ >0: for decoding, use fixed chunk size as set.
127
+ num_decoding_left_chunks: number of left chunks, this is for decoding,
128
+ the chunk size is decoding_chunk_size.
129
+ >=0: use num_decoding_left_chunks
130
+ <0: use all left chunks
131
+ Returns:
132
+ encoder output tensor xs, and subsampled masks
133
+ xs: padded output tensor (B, T' ~= T/subsample_rate, D)
134
+ masks: torch.Tensor batch padding mask after subsample
135
+ (B, 1, T' ~= T/subsample_rate)
136
+ NOTE(xcsong):
137
+ We pass the `__call__` method of the modules instead of `forward` to the
138
+ checkpointing API because `__call__` attaches all the hooks of the module.
139
+ https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
140
+ """
141
+ T = xs.size(1)
142
+ masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
143
+ if self.global_cmvn is not None:
144
+ xs = self.global_cmvn(xs)
145
+ xs, pos_emb, masks = self.embed(xs, masks)
146
+ mask_pad = masks # (B, 1, T/subsample_rate)
147
+ chunk_masks = add_optional_chunk_mask(xs, masks,
148
+ self.use_dynamic_chunk,
149
+ self.use_dynamic_left_chunk,
150
+ decoding_chunk_size,
151
+ self.static_chunk_size,
152
+ num_decoding_left_chunks)
153
+ if self.gradient_checkpointing and self.training:
154
+ xs = self.forward_layers_checkpointed(xs, chunk_masks, pos_emb,
155
+ mask_pad)
156
+ else:
157
+ xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
158
+ if self.normalize_before:
159
+ xs = self.after_norm(xs)
160
+ # Here we assume the mask is not changed in encoder layers, so just
161
+ # return the masks before encoder layers, and the masks will be used
162
+ # for cross attention with decoder later
163
+ return xs, masks
164
+
165
+ def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
166
+ pos_emb: torch.Tensor,
167
+ mask_pad: torch.Tensor) -> torch.Tensor:
168
+ for layer in self.encoders:
169
+ xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
170
+ return xs
171
+
172
+ @torch.jit.unused
173
+ def forward_layers_checkpointed(self, xs: torch.Tensor,
174
+ chunk_masks: torch.Tensor,
175
+ pos_emb: torch.Tensor,
176
+ mask_pad: torch.Tensor) -> torch.Tensor:
177
+ for layer in self.encoders:
178
+ xs, chunk_masks, _, _ = ckpt.checkpoint(layer.__call__, xs,
179
+ chunk_masks, pos_emb,
180
+ mask_pad)
181
+ return xs
182
+
183
+ @torch.jit.export
184
+ def forward_chunk(
185
+ self,
186
+ xs: torch.Tensor,
187
+ offset: int,
188
+ required_cache_size: int,
189
+ att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
190
+ cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
191
+ att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
192
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
193
+ """ Forward just one chunk
194
+
195
+ Args:
196
+ xs (torch.Tensor): chunk input, with shape (b=1, time, mel-dim),
197
+ where `time == (chunk_size - 1) * subsample_rate + \
198
+ subsample.right_context + 1`
199
+ offset (int): current offset in encoder output time stamp
200
+ required_cache_size (int): cache size required for next chunk
201
+ compuation
202
+ >=0: actual cache size
203
+ <0: means all history cache is required
204
+ att_cache (torch.Tensor): cache tensor for KEY & VALUE in
205
+ transformer/conformer attention, with shape
206
+ (elayers, head, cache_t1, d_k * 2), where
207
+ `head * d_k == hidden-dim` and
208
+ `cache_t1 == chunk_size * num_decoding_left_chunks`.
209
+ cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer,
210
+ (elayers, b=1, hidden-dim, cache_t2), where
211
+ `cache_t2 == cnn.lorder - 1`
212
+
213
+ Returns:
214
+ torch.Tensor: output of current input xs,
215
+ with shape (b=1, chunk_size, hidden-dim).
216
+ torch.Tensor: new attention cache required for next chunk, with
217
+ dynamic shape (elayers, head, ?, d_k * 2)
218
+ depending on required_cache_size.
219
+ torch.Tensor: new conformer cnn cache required for next chunk, with
220
+ same shape as the original cnn_cache.
221
+
222
+ """
223
+ assert xs.size(0) == 1
224
+ # tmp_masks is just for interface compatibility
225
+ tmp_masks = torch.ones(1,
226
+ xs.size(1),
227
+ device=xs.device,
228
+ dtype=torch.bool)
229
+ tmp_masks = tmp_masks.unsqueeze(1)
230
+ if self.global_cmvn is not None:
231
+ xs = self.global_cmvn(xs)
232
+ # NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim)
233
+ xs, pos_emb, _ = self.embed(xs, tmp_masks, offset)
234
+ # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim)
235
+ elayers, cache_t1 = att_cache.size(0), att_cache.size(2)
236
+ chunk_size = xs.size(1)
237
+ attention_key_size = cache_t1 + chunk_size
238
+ pos_emb = self.embed.position_encoding(offset=offset - cache_t1,
239
+ size=attention_key_size)
240
+ if required_cache_size < 0:
241
+ next_cache_start = 0
242
+ elif required_cache_size == 0:
243
+ next_cache_start = attention_key_size
244
+ else:
245
+ next_cache_start = max(attention_key_size - required_cache_size, 0)
246
+ r_att_cache = []
247
+ r_cnn_cache = []
248
+ for i, layer in enumerate(self.encoders):
249
+ # NOTE(xcsong): Before layer.forward
250
+ # shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2),
251
+ # shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2)
252
+ xs, _, new_att_cache, new_cnn_cache = layer(
253
+ xs,
254
+ att_mask,
255
+ pos_emb,
256
+ att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache,
257
+ cnn_cache=cnn_cache[i] if cnn_cache.size(0) > 0 else cnn_cache)
258
+ # NOTE(xcsong): After layer.forward
259
+ # shape(new_att_cache) is (1, head, attention_key_size, d_k * 2),
260
+ # shape(new_cnn_cache) is (b=1, hidden-dim, cache_t2)
261
+ r_att_cache.append(new_att_cache[:, :, next_cache_start:, :])
262
+ r_cnn_cache.append(new_cnn_cache.unsqueeze(0))
263
+ if self.normalize_before:
264
+ xs = self.after_norm(xs)
265
+
266
+ # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2),
267
+ # ? may be larger than cache_t1, it depends on required_cache_size
268
+ r_att_cache = torch.cat(r_att_cache, dim=0)
269
+ # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2)
270
+ r_cnn_cache = torch.cat(r_cnn_cache, dim=0)
271
+
272
+ return (xs, r_att_cache, r_cnn_cache)
273
+
274
+ @torch.jit.unused
275
+ def forward_chunk_by_chunk(
276
+ self,
277
+ xs: torch.Tensor,
278
+ decoding_chunk_size: int,
279
+ num_decoding_left_chunks: int = -1,
280
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
281
+ """ Forward input chunk by chunk with chunk_size like a streaming
282
+ fashion
283
+
284
+ Here we should pay special attention to computation cache in the
285
+ streaming style forward chunk by chunk. Three things should be taken
286
+ into account for computation in the current network:
287
+ 1. transformer/conformer encoder layers output cache
288
+ 2. convolution in conformer
289
+ 3. convolution in subsampling
290
+
291
+ However, we don't implement subsampling cache for:
292
+ 1. We can control subsampling module to output the right result by
293
+ overlapping input instead of cache left context, even though it
294
+ wastes some computation, but subsampling only takes a very
295
+ small fraction of computation in the whole model.
296
+ 2. Typically, there are several covolution layers with subsampling
297
+ in subsampling module, it is tricky and complicated to do cache
298
+ with different convolution layers with different subsampling
299
+ rate.
300
+ 3. Currently, nn.Sequential is used to stack all the convolution
301
+ layers in subsampling, we need to rewrite it to make it work
302
+ with cache, which is not preferred.
303
+ Args:
304
+ xs (torch.Tensor): (1, max_len, dim)
305
+ chunk_size (int): decoding chunk size
306
+ """
307
+ assert decoding_chunk_size > 0
308
+ # The model is trained by static or dynamic chunk
309
+ assert self.static_chunk_size > 0 or self.use_dynamic_chunk
310
+ subsampling = self.embed.subsampling_rate
311
+ context = self.embed.right_context + 1 # Add current frame
312
+ stride = subsampling * decoding_chunk_size
313
+ decoding_window = (decoding_chunk_size - 1) * subsampling + context
314
+ num_frames = xs.size(1)
315
+ att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)
316
+ cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)
317
+ outputs = []
318
+ offset = 0
319
+ required_cache_size = decoding_chunk_size * num_decoding_left_chunks
320
+
321
+ # Feed forward overlap input step by step
322
+ for cur in range(0, num_frames - context + 1, stride):
323
+ end = min(cur + decoding_window, num_frames)
324
+ chunk_xs = xs[:, cur:end, :]
325
+ (y, att_cache,
326
+ cnn_cache) = self.forward_chunk(chunk_xs, offset,
327
+ required_cache_size, att_cache,
328
+ cnn_cache)
329
+ outputs.append(y)
330
+ offset += y.size(1)
331
+ ys = torch.cat(outputs, 1)
332
+ masks = torch.ones((1, 1, ys.size(1)),
333
+ device=ys.device,
334
+ dtype=torch.bool)
335
+ return ys, masks
336
+
337
+
338
+ class TransformerEncoder(BaseEncoder):
339
+ """Transformer encoder module."""
340
+
341
+ def __init__(
342
+ self,
343
+ input_size: int,
344
+ output_size: int = 256,
345
+ attention_heads: int = 4,
346
+ linear_units: int = 2048,
347
+ num_blocks: int = 6,
348
+ dropout_rate: float = 0.1,
349
+ positional_dropout_rate: float = 0.1,
350
+ attention_dropout_rate: float = 0.0,
351
+ input_layer: str = "conv2d",
352
+ pos_enc_layer_type: str = "abs_pos",
353
+ normalize_before: bool = True,
354
+ static_chunk_size: int = 0,
355
+ use_dynamic_chunk: bool = False,
356
+ global_cmvn: torch.nn.Module = None,
357
+ use_dynamic_left_chunk: bool = False,
358
+ key_bias: bool = True,
359
+ selfattention_layer_type: str = "selfattn",
360
+ activation_type: str = "relu",
361
+ gradient_checkpointing: bool = False,
362
+ ):
363
+ """ Construct TransformerEncoder
364
+
365
+ See Encoder for the meaning of each parameter.
366
+ """
367
+ super().__init__(input_size, output_size, attention_heads,
368
+ linear_units, num_blocks, dropout_rate,
369
+ positional_dropout_rate, attention_dropout_rate,
370
+ input_layer, pos_enc_layer_type, normalize_before,
371
+ static_chunk_size, use_dynamic_chunk, global_cmvn,
372
+ use_dynamic_left_chunk, gradient_checkpointing)
373
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
374
+ self.encoders = torch.nn.ModuleList([
375
+ TransformerEncoderLayer(
376
+ output_size,
377
+ COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](attention_heads,
378
+ output_size,
379
+ attention_dropout_rate,
380
+ key_bias),
381
+ PositionwiseFeedForward(output_size, linear_units,
382
+ dropout_rate, activation),
383
+ dropout_rate, normalize_before) for _ in range(num_blocks)
384
+ ])
385
+
386
+
387
+ class ConformerEncoder(BaseEncoder):
388
+ """Conformer encoder module."""
389
+
390
+ def __init__(
391
+ self,
392
+ input_size: int,
393
+ output_size: int = 256,
394
+ attention_heads: int = 4,
395
+ linear_units: int = 2048,
396
+ num_blocks: int = 6,
397
+ dropout_rate: float = 0.1,
398
+ positional_dropout_rate: float = 0.1,
399
+ attention_dropout_rate: float = 0.0,
400
+ input_layer: str = "conv2d",
401
+ pos_enc_layer_type: str = "rel_pos",
402
+ normalize_before: bool = True,
403
+ static_chunk_size: int = 0,
404
+ use_dynamic_chunk: bool = False,
405
+ global_cmvn: torch.nn.Module = None,
406
+ use_dynamic_left_chunk: bool = False,
407
+ positionwise_conv_kernel_size: int = 1,
408
+ macaron_style: bool = True,
409
+ selfattention_layer_type: str = "rel_selfattn",
410
+ activation_type: str = "swish",
411
+ use_cnn_module: bool = True,
412
+ cnn_module_kernel: int = 15,
413
+ causal: bool = False,
414
+ cnn_module_norm: str = "batch_norm",
415
+ key_bias: bool = True,
416
+ gradient_checkpointing: bool = False,
417
+ ):
418
+ """Construct ConformerEncoder
419
+
420
+ Args:
421
+ input_size to use_dynamic_chunk, see in BaseEncoder
422
+ positionwise_conv_kernel_size (int): Kernel size of positionwise
423
+ conv1d layer.
424
+ macaron_style (bool): Whether to use macaron style for
425
+ positionwise layer.
426
+ selfattention_layer_type (str): Encoder attention layer type,
427
+ the parameter has no effect now, it's just for configure
428
+ compatibility.
429
+ activation_type (str): Encoder activation function type.
430
+ use_cnn_module (bool): Whether to use convolution module.
431
+ cnn_module_kernel (int): Kernel size of convolution module.
432
+ causal (bool): whether to use causal convolution or not.
433
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
434
+ """
435
+ super().__init__(input_size, output_size, attention_heads,
436
+ linear_units, num_blocks, dropout_rate,
437
+ positional_dropout_rate, attention_dropout_rate,
438
+ input_layer, pos_enc_layer_type, normalize_before,
439
+ static_chunk_size, use_dynamic_chunk, global_cmvn,
440
+ use_dynamic_left_chunk, gradient_checkpointing)
441
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
442
+
443
+ # self-attention module definition
444
+ encoder_selfattn_layer_args = (
445
+ attention_heads,
446
+ output_size,
447
+ attention_dropout_rate,
448
+ key_bias,
449
+ )
450
+ # feed-forward module definition
451
+ positionwise_layer_args = (
452
+ output_size,
453
+ linear_units,
454
+ dropout_rate,
455
+ activation,
456
+ )
457
+ # convolution module definition
458
+ convolution_layer_args = (output_size, cnn_module_kernel, activation,
459
+ cnn_module_norm, causal)
460
+
461
+ self.encoders = torch.nn.ModuleList([
462
+ ConformerEncoderLayer(
463
+ output_size,
464
+ COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
465
+ *encoder_selfattn_layer_args),
466
+ PositionwiseFeedForward(*positionwise_layer_args),
467
+ PositionwiseFeedForward(
468
+ *positionwise_layer_args) if macaron_style else None,
469
+ ConvolutionModule(
470
+ *convolution_layer_args) if use_cnn_module else None,
471
+ dropout_rate,
472
+ normalize_before,
473
+ ) for _ in range(num_blocks)
474
+ ])
third_party/cosyvoice/utils/__init__.py ADDED
File without changes
third_party/cosyvoice/utils/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (165 Bytes). View file
 
third_party/cosyvoice/utils/__pycache__/class_utils.cpython-311.pyc ADDED
Binary file (3.18 kB). View file
 
third_party/cosyvoice/utils/__pycache__/common.cpython-311.pyc ADDED
Binary file (8.62 kB). View file
 
third_party/cosyvoice/utils/__pycache__/file_utils.cpython-311.pyc ADDED
Binary file (5.94 kB). View file
 
third_party/cosyvoice/utils/__pycache__/frontend_utils.cpython-311.pyc ADDED
Binary file (6.01 kB). View file
 
third_party/cosyvoice/utils/__pycache__/mask.cpython-311.pyc ADDED
Binary file (8.38 kB). View file
 
third_party/cosyvoice/utils/class_utils.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright [2023-11-28] <[email protected], Xingchen Song>
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import torch
16
+
17
+ from cosyvoice.transformer.activation import Swish
18
+ from cosyvoice.transformer.subsampling import (
19
+ LinearNoSubsampling,
20
+ EmbedinigNoSubsampling,
21
+ Conv1dSubsampling2,
22
+ Conv2dSubsampling4,
23
+ Conv2dSubsampling6,
24
+ Conv2dSubsampling8,
25
+ )
26
+ from cosyvoice.transformer.embedding import (PositionalEncoding,
27
+ RelPositionalEncoding,
28
+ WhisperPositionalEncoding,
29
+ LearnablePositionalEncoding,
30
+ NoPositionalEncoding)
31
+ from cosyvoice.transformer.attention import (MultiHeadedAttention,
32
+ RelPositionMultiHeadedAttention)
33
+ from cosyvoice.transformer.embedding import EspnetRelPositionalEncoding
34
+ from cosyvoice.transformer.subsampling import LegacyLinearNoSubsampling
35
+ from cosyvoice.llm.llm import TransformerLM, Qwen2LM
36
+ from cosyvoice.flow.flow import MaskedDiffWithXvec, CausalMaskedDiffWithXvec
37
+ from cosyvoice.hifigan.generator import HiFTGenerator
38
+ from cosyvoice.cli.model import CosyVoiceModel, CosyVoice2Model
39
+
40
+
41
+ COSYVOICE_ACTIVATION_CLASSES = {
42
+ "hardtanh": torch.nn.Hardtanh,
43
+ "tanh": torch.nn.Tanh,
44
+ "relu": torch.nn.ReLU,
45
+ "selu": torch.nn.SELU,
46
+ "swish": getattr(torch.nn, "SiLU", Swish),
47
+ "gelu": torch.nn.GELU,
48
+ }
49
+
50
+ COSYVOICE_SUBSAMPLE_CLASSES = {
51
+ "linear": LinearNoSubsampling,
52
+ "linear_legacy": LegacyLinearNoSubsampling,
53
+ "embed": EmbedinigNoSubsampling,
54
+ "conv1d2": Conv1dSubsampling2,
55
+ "conv2d": Conv2dSubsampling4,
56
+ "conv2d6": Conv2dSubsampling6,
57
+ "conv2d8": Conv2dSubsampling8,
58
+ 'paraformer_dummy': torch.nn.Identity
59
+ }
60
+
61
+ COSYVOICE_EMB_CLASSES = {
62
+ "embed": PositionalEncoding,
63
+ "abs_pos": PositionalEncoding,
64
+ "rel_pos": RelPositionalEncoding,
65
+ "rel_pos_espnet": EspnetRelPositionalEncoding,
66
+ "no_pos": NoPositionalEncoding,
67
+ "abs_pos_whisper": WhisperPositionalEncoding,
68
+ "embed_learnable_pe": LearnablePositionalEncoding,
69
+ }
70
+
71
+ COSYVOICE_ATTENTION_CLASSES = {
72
+ "selfattn": MultiHeadedAttention,
73
+ "rel_selfattn": RelPositionMultiHeadedAttention,
74
+ }
75
+
76
+
77
+ def get_model_type(configs):
78
+ # NOTE CosyVoice2Model inherits CosyVoiceModel
79
+ if isinstance(configs['llm'], TransformerLM) and isinstance(configs['flow'], MaskedDiffWithXvec) and isinstance(configs['hift'], HiFTGenerator):
80
+ return CosyVoiceModel
81
+ if isinstance(configs['llm'], Qwen2LM) and isinstance(configs['flow'], CausalMaskedDiffWithXvec) and isinstance(configs['hift'], HiFTGenerator):
82
+ return CosyVoice2Model
83
+ raise TypeError('No valid model type found!')
third_party/cosyvoice/utils/common.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """Unility functions for Transformer."""
17
+
18
+ import random
19
+ from typing import List
20
+
21
+ import numpy as np
22
+ import torch
23
+
24
+ IGNORE_ID = -1
25
+
26
+
27
+ def pad_list(xs: List[torch.Tensor], pad_value: int):
28
+ """Perform padding for the list of tensors.
29
+
30
+ Args:
31
+ xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
32
+ pad_value (float): Value for padding.
33
+
34
+ Returns:
35
+ Tensor: Padded tensor (B, Tmax, `*`).
36
+
37
+ Examples:
38
+ >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
39
+ >>> x
40
+ [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
41
+ >>> pad_list(x, 0)
42
+ tensor([[1., 1., 1., 1.],
43
+ [1., 1., 0., 0.],
44
+ [1., 0., 0., 0.]])
45
+
46
+ """
47
+ max_len = max([len(item) for item in xs])
48
+ batchs = len(xs)
49
+ ndim = xs[0].ndim
50
+ if ndim == 1:
51
+ pad_res = torch.zeros(batchs,
52
+ max_len,
53
+ dtype=xs[0].dtype,
54
+ device=xs[0].device)
55
+ elif ndim == 2:
56
+ pad_res = torch.zeros(batchs,
57
+ max_len,
58
+ xs[0].shape[1],
59
+ dtype=xs[0].dtype,
60
+ device=xs[0].device)
61
+ elif ndim == 3:
62
+ pad_res = torch.zeros(batchs,
63
+ max_len,
64
+ xs[0].shape[1],
65
+ xs[0].shape[2],
66
+ dtype=xs[0].dtype,
67
+ device=xs[0].device)
68
+ else:
69
+ raise ValueError(f"Unsupported ndim: {ndim}")
70
+ pad_res.fill_(pad_value)
71
+ for i in range(batchs):
72
+ pad_res[i, :len(xs[i])] = xs[i]
73
+ return pad_res
74
+
75
+
76
+ def th_accuracy(pad_outputs: torch.Tensor, pad_targets: torch.Tensor,
77
+ ignore_label: int) -> torch.Tensor:
78
+ """Calculate accuracy.
79
+
80
+ Args:
81
+ pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
82
+ pad_targets (LongTensor): Target label tensors (B, Lmax).
83
+ ignore_label (int): Ignore label id.
84
+
85
+ Returns:
86
+ torch.Tensor: Accuracy value (0.0 - 1.0).
87
+
88
+ """
89
+ pad_pred = pad_outputs.view(pad_targets.size(0), pad_targets.size(1),
90
+ pad_outputs.size(1)).argmax(2)
91
+ mask = pad_targets != ignore_label
92
+ numerator = torch.sum(
93
+ pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
94
+ denominator = torch.sum(mask)
95
+ return (numerator / denominator).detach()
96
+
97
+
98
+ def get_padding(kernel_size, dilation=1):
99
+ return int((kernel_size * dilation - dilation) / 2)
100
+
101
+
102
+ def init_weights(m, mean=0.0, std=0.01):
103
+ classname = m.__class__.__name__
104
+ if classname.find("Conv") != -1:
105
+ m.weight.data.normal_(mean, std)
106
+
107
+
108
+ # Repetition Aware Sampling in VALL-E 2
109
+ def ras_sampling(weighted_scores, decoded_tokens, sampling, top_p=0.8, top_k=25, win_size=10, tau_r=0.1):
110
+ top_ids = nucleus_sampling(weighted_scores, top_p=top_p, top_k=top_k)
111
+ rep_num = (torch.tensor(decoded_tokens[-win_size:]).to(weighted_scores.device) == top_ids).sum().item()
112
+ if rep_num >= win_size * tau_r:
113
+ top_ids = random_sampling(weighted_scores, decoded_tokens, sampling)
114
+ return top_ids
115
+
116
+
117
+ def nucleus_sampling(weighted_scores, top_p=0.8, top_k=25):
118
+ prob, indices = [], []
119
+ cum_prob = 0.0
120
+ sorted_value, sorted_idx = weighted_scores.softmax(dim=0).sort(descending=True, stable=True)
121
+ for i in range(len(sorted_idx)):
122
+ # sampling both top-p and numbers.
123
+ if cum_prob < top_p and len(prob) < top_k:
124
+ cum_prob += sorted_value[i]
125
+ prob.append(sorted_value[i])
126
+ indices.append(sorted_idx[i])
127
+ else:
128
+ break
129
+ prob = torch.tensor(prob).to(weighted_scores)
130
+ indices = torch.tensor(indices, dtype=torch.long).to(weighted_scores.device)
131
+ top_ids = indices[prob.multinomial(1, replacement=True)]
132
+ return top_ids
133
+
134
+
135
+ def random_sampling(weighted_scores, decoded_tokens, sampling):
136
+ top_ids = weighted_scores.softmax(dim=0).multinomial(1, replacement=True)
137
+ return top_ids
138
+
139
+
140
+ def fade_in_out(fade_in_mel, fade_out_mel, window):
141
+ device = fade_in_mel.device
142
+ fade_in_mel, fade_out_mel = fade_in_mel.cpu(), fade_out_mel.cpu()
143
+ mel_overlap_len = int(window.shape[0] / 2)
144
+ if fade_in_mel.device == torch.device('cpu'):
145
+ fade_in_mel = fade_in_mel.clone()
146
+ fade_in_mel[..., :mel_overlap_len] = fade_in_mel[..., :mel_overlap_len] * window[:mel_overlap_len] + \
147
+ fade_out_mel[..., -mel_overlap_len:] * window[mel_overlap_len:]
148
+ return fade_in_mel.to(device)
149
+
150
+
151
+ def set_all_random_seed(seed):
152
+ random.seed(seed)
153
+ np.random.seed(seed)
154
+ torch.manual_seed(seed)
155
+ torch.cuda.manual_seed_all(seed)
156
+
157
+
158
+ def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
159
+ assert mask.dtype == torch.bool
160
+ assert dtype in [torch.float32, torch.bfloat16, torch.float16]
161
+ mask = mask.to(dtype)
162
+ # attention mask bias
163
+ # NOTE(Mddct): torch.finfo jit issues
164
+ # chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
165
+ mask = (1.0 - mask) * -1.0e+10
166
+ return mask
third_party/cosyvoice/utils/executor.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+ from contextlib import nullcontext
18
+ import os
19
+
20
+ import torch
21
+ import torch.distributed as dist
22
+
23
+ from cosyvoice.utils.train_utils import update_parameter_and_lr, log_per_step, log_per_save, batch_forward, batch_backward, save_model, cosyvoice_join
24
+
25
+
26
+ class Executor:
27
+
28
+ def __init__(self, gan: bool = False):
29
+ self.gan = gan
30
+ self.step = 0
31
+ self.epoch = 0
32
+ self.rank = int(os.environ.get('RANK', 0))
33
+ self.device = torch.device('cuda:{}'.format(self.rank))
34
+
35
+ def train_one_epoc(self, model, optimizer, scheduler, train_data_loader, cv_data_loader, writer, info_dict, scaler, group_join):
36
+ ''' Train one epoch
37
+ '''
38
+
39
+ lr = optimizer.param_groups[0]['lr']
40
+ logging.info('Epoch {} TRAIN info lr {} rank {}'.format(self.epoch, lr, self.rank))
41
+ logging.info('using accumulate grad, new batch size is {} times'
42
+ ' larger than before'.format(info_dict['accum_grad']))
43
+ # A context manager to be used in conjunction with an instance of
44
+ # torch.nn.parallel.DistributedDataParallel to be able to train
45
+ # with uneven inputs across participating processes.
46
+ model.train()
47
+ model_context = model.join if info_dict['train_engine'] == 'torch_ddp' else nullcontext
48
+ with model_context():
49
+ for batch_idx, batch_dict in enumerate(train_data_loader):
50
+ info_dict["tag"] = "TRAIN"
51
+ info_dict["step"] = self.step
52
+ info_dict["epoch"] = self.epoch
53
+ info_dict["batch_idx"] = batch_idx
54
+ if cosyvoice_join(group_join, info_dict):
55
+ break
56
+
57
+ # Disable gradient synchronizations across DDP processes.
58
+ # Within this context, gradients will be accumulated on module
59
+ # variables, which will later be synchronized.
60
+ if info_dict['train_engine'] == 'torch_ddp' and (batch_idx + 1) % info_dict["accum_grad"] != 0:
61
+ context = model.no_sync
62
+ # Used for single gpu training and DDP gradient synchronization
63
+ # processes.
64
+ else:
65
+ context = nullcontext
66
+
67
+ with context():
68
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict)
69
+ info_dict = batch_backward(model, scaler, info_dict)
70
+
71
+ info_dict = update_parameter_and_lr(model, optimizer, scheduler, scaler, info_dict)
72
+ log_per_step(writer, info_dict)
73
+ # NOTE specify save_per_step in cosyvoice.yaml if you want to enable step save
74
+ if info_dict['save_per_step'] > 0 and (self.step + 1) % info_dict['save_per_step'] == 0 and \
75
+ (batch_idx + 1) % info_dict["accum_grad"] == 0:
76
+ dist.barrier()
77
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=False)
78
+ model.train()
79
+ if (batch_idx + 1) % info_dict["accum_grad"] == 0:
80
+ self.step += 1
81
+ dist.barrier()
82
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=True)
83
+
84
+ def train_one_epoc_gan(self, model, optimizer, scheduler, optimizer_d, scheduler_d, train_data_loader, cv_data_loader,
85
+ writer, info_dict, scaler, group_join):
86
+ ''' Train one epoch
87
+ '''
88
+
89
+ lr = optimizer.param_groups[0]['lr']
90
+ logging.info('Epoch {} TRAIN info lr {} rank {}'.format(self.epoch, lr, self.rank))
91
+ logging.info('using accumulate grad, new batch size is {} times'
92
+ ' larger than before'.format(info_dict['accum_grad']))
93
+ # A context manager to be used in conjunction with an instance of
94
+ # torch.nn.parallel.DistributedDataParallel to be able to train
95
+ # with uneven inputs across participating processes.
96
+ model.train()
97
+ model_context = model.join if info_dict['train_engine'] == 'torch_ddp' else nullcontext
98
+ with model_context():
99
+ for batch_idx, batch_dict in enumerate(train_data_loader):
100
+ info_dict["tag"] = "TRAIN"
101
+ info_dict["step"] = self.step
102
+ info_dict["epoch"] = self.epoch
103
+ info_dict["batch_idx"] = batch_idx
104
+ if cosyvoice_join(group_join, info_dict):
105
+ break
106
+
107
+ # Disable gradient synchronizations across DDP processes.
108
+ # Within this context, gradients will be accumulated on module
109
+ # variables, which will later be synchronized.
110
+ if info_dict['train_engine'] == 'torch_ddp' and (batch_idx + 1) % info_dict["accum_grad"] != 0:
111
+ context = model.no_sync
112
+ # Used for single gpu training and DDP gradient synchronization
113
+ # processes.
114
+ else:
115
+ context = nullcontext
116
+
117
+ with context():
118
+ batch_dict['turn'] = 'discriminator'
119
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict)
120
+ info_dict = batch_backward(model, scaler, info_dict)
121
+ info_dict = update_parameter_and_lr(model, optimizer_d, scheduler_d, scaler, info_dict)
122
+ optimizer.zero_grad()
123
+ log_per_step(writer, info_dict)
124
+ with context():
125
+ batch_dict['turn'] = 'generator'
126
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict)
127
+ info_dict = batch_backward(model, scaler, info_dict)
128
+ info_dict = update_parameter_and_lr(model, optimizer, scheduler, scaler, info_dict)
129
+ optimizer_d.zero_grad()
130
+ log_per_step(writer, info_dict)
131
+ # NOTE specify save_per_step in cosyvoice.yaml if you want to enable step save
132
+ if info_dict['save_per_step'] > 0 and (self.step + 1) % info_dict['save_per_step'] == 0 and \
133
+ (batch_idx + 1) % info_dict["accum_grad"] == 0:
134
+ dist.barrier()
135
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=False)
136
+ model.train()
137
+ if (batch_idx + 1) % info_dict["accum_grad"] == 0:
138
+ self.step += 1
139
+ dist.barrier()
140
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=True)
141
+
142
+ @torch.inference_mode()
143
+ def cv(self, model, cv_data_loader, writer, info_dict, on_batch_end=True):
144
+ ''' Cross validation on
145
+ '''
146
+ logging.info('Epoch {} Step {} on_batch_end {} CV rank {}'.format(self.epoch, self.step + 1, on_batch_end, self.rank))
147
+ model.eval()
148
+ total_num_utts, total_loss_dict = 0, {} # avoid division by 0
149
+ for batch_idx, batch_dict in enumerate(cv_data_loader):
150
+ info_dict["tag"] = "CV"
151
+ info_dict["step"] = self.step
152
+ info_dict["epoch"] = self.epoch
153
+ info_dict["batch_idx"] = batch_idx
154
+
155
+ num_utts = len(batch_dict["utts"])
156
+ total_num_utts += num_utts
157
+
158
+ if self.gan is True:
159
+ batch_dict['turn'] = 'generator'
160
+ info_dict = batch_forward(model, batch_dict, None, info_dict)
161
+
162
+ for k, v in info_dict['loss_dict'].items():
163
+ if k not in total_loss_dict:
164
+ total_loss_dict[k] = []
165
+ total_loss_dict[k].append(v.item() * num_utts)
166
+ log_per_step(None, info_dict)
167
+ for k, v in total_loss_dict.items():
168
+ total_loss_dict[k] = sum(v) / total_num_utts
169
+ info_dict['loss_dict'] = total_loss_dict
170
+ log_per_save(writer, info_dict)
171
+ model_name = 'epoch_{}_whole'.format(self.epoch) if on_batch_end else 'epoch_{}_step_{}'.format(self.epoch, self.step + 1)
172
+ save_model(model, model_name, info_dict)
third_party/cosyvoice/utils/file_utils.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu, Zetao Hu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import torchaudio
18
+ import logging
19
+ logging.getLogger('matplotlib').setLevel(logging.WARNING)
20
+ logging.basicConfig(level=logging.DEBUG,
21
+ format='%(asctime)s %(levelname)s %(message)s')
22
+
23
+
24
+ def read_lists(list_file):
25
+ lists = []
26
+ with open(list_file, 'r', encoding='utf8') as fin:
27
+ for line in fin:
28
+ lists.append(line.strip())
29
+ return lists
30
+
31
+
32
+ def read_json_lists(list_file):
33
+ lists = read_lists(list_file)
34
+ results = {}
35
+ for fn in lists:
36
+ with open(fn, 'r', encoding='utf8') as fin:
37
+ results.update(json.load(fin))
38
+ return results
39
+
40
+
41
+ def load_wav(wav, target_sr):
42
+ speech, sample_rate = torchaudio.load(wav, backend='soundfile')
43
+ speech = speech.mean(dim=0, keepdim=True)
44
+ if sample_rate != target_sr:
45
+ assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr)
46
+ speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech)
47
+ return speech
48
+
49
+
50
+ def convert_onnx_to_trt(trt_model, onnx_model, fp16):
51
+ import tensorrt as trt
52
+ _min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2,), (2, 80), (2, 80, 4)]
53
+ _opt_shape = [(2, 80, 193), (2, 1, 193), (2, 80, 193), (2,), (2, 80), (2, 80, 193)]
54
+ _max_shape = [(2, 80, 6800), (2, 1, 6800), (2, 80, 6800), (2,), (2, 80), (2, 80, 6800)]
55
+ input_names = ["x", "mask", "mu", "t", "spks", "cond"]
56
+
57
+ logging.info("Converting onnx to trt...")
58
+ network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
59
+ logger = trt.Logger(trt.Logger.INFO)
60
+ builder = trt.Builder(logger)
61
+ network = builder.create_network(network_flags)
62
+ parser = trt.OnnxParser(network, logger)
63
+ config = builder.create_builder_config()
64
+ config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 33) # 8GB
65
+ if fp16:
66
+ config.set_flag(trt.BuilderFlag.FP16)
67
+ profile = builder.create_optimization_profile()
68
+ # load onnx model
69
+ with open(onnx_model, "rb") as f:
70
+ if not parser.parse(f.read()):
71
+ for error in range(parser.num_errors):
72
+ print(parser.get_error(error))
73
+ raise ValueError('failed to parse {}'.format(onnx_model))
74
+ # set input shapes
75
+ for i in range(len(input_names)):
76
+ profile.set_shape(input_names[i], _min_shape[i], _opt_shape[i], _max_shape[i])
77
+ tensor_dtype = trt.DataType.HALF if fp16 else trt.DataType.FLOAT
78
+ # set input and output data type
79
+ for i in range(network.num_inputs):
80
+ input_tensor = network.get_input(i)
81
+ input_tensor.dtype = tensor_dtype
82
+ for i in range(network.num_outputs):
83
+ output_tensor = network.get_output(i)
84
+ output_tensor.dtype = tensor_dtype
85
+ config.add_optimization_profile(profile)
86
+ engine_bytes = builder.build_serialized_network(network, config)
87
+ # save trt engine
88
+ with open(trt_model, "wb") as f:
89
+ f.write(engine_bytes)
third_party/cosyvoice/utils/frontend_utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ import regex
17
+ chinese_char_pattern = re.compile(r'[\u4e00-\u9fff]+')
18
+
19
+
20
+ # whether contain chinese character
21
+ def contains_chinese(text):
22
+ return bool(chinese_char_pattern.search(text))
23
+
24
+
25
+ # replace special symbol
26
+ def replace_corner_mark(text):
27
+ text = text.replace('²', '平方')
28
+ text = text.replace('³', '立方')
29
+ return text
30
+
31
+
32
+ # remove meaningless symbol
33
+ def remove_bracket(text):
34
+ text = text.replace('(', '').replace(')', '')
35
+ text = text.replace('【', '').replace('】', '')
36
+ text = text.replace('`', '').replace('`', '')
37
+ text = text.replace("——", " ")
38
+ return text
39
+
40
+
41
+ # spell Arabic numerals
42
+ def spell_out_number(text: str, inflect_parser):
43
+ new_text = []
44
+ st = None
45
+ for i, c in enumerate(text):
46
+ if not c.isdigit():
47
+ if st is not None:
48
+ num_str = inflect_parser.number_to_words(text[st: i])
49
+ new_text.append(num_str)
50
+ st = None
51
+ new_text.append(c)
52
+ else:
53
+ if st is None:
54
+ st = i
55
+ if st is not None and st < len(text):
56
+ num_str = inflect_parser.number_to_words(text[st:])
57
+ new_text.append(num_str)
58
+ return ''.join(new_text)
59
+
60
+
61
+ # split paragrah logic:
62
+ # 1. per sentence max len token_max_n, min len token_min_n, merge if last sentence len less than merge_len
63
+ # 2. cal sentence len according to lang
64
+ # 3. split sentence according to puncatation
65
+ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=60, merge_len=20, comma_split=False):
66
+ def calc_utt_length(_text: str):
67
+ if lang == "zh":
68
+ return len(_text)
69
+ else:
70
+ return len(tokenize(_text))
71
+
72
+ def should_merge(_text: str):
73
+ if lang == "zh":
74
+ return len(_text) < merge_len
75
+ else:
76
+ return len(tokenize(_text)) < merge_len
77
+
78
+ if lang == "zh":
79
+ pounc = ['。', '?', '!', ';', ':', '、', '.', '?', '!', ';']
80
+ else:
81
+ pounc = ['.', '?', '!', ';', ':']
82
+ if comma_split:
83
+ pounc.extend([',', ','])
84
+
85
+ if text[-1] not in pounc:
86
+ if lang == "zh":
87
+ text += "。"
88
+ else:
89
+ text += "."
90
+
91
+ st = 0
92
+ utts = []
93
+ for i, c in enumerate(text):
94
+ if c in pounc:
95
+ if len(text[st: i]) > 0:
96
+ utts.append(text[st: i] + c)
97
+ if i + 1 < len(text) and text[i + 1] in ['"', '”']:
98
+ tmp = utts.pop(-1)
99
+ utts.append(tmp + text[i + 1])
100
+ st = i + 2
101
+ else:
102
+ st = i + 1
103
+
104
+ final_utts = []
105
+ cur_utt = ""
106
+ for utt in utts:
107
+ if calc_utt_length(cur_utt + utt) > token_max_n and calc_utt_length(cur_utt) > token_min_n:
108
+ final_utts.append(cur_utt)
109
+ cur_utt = ""
110
+ cur_utt = cur_utt + utt
111
+ if len(cur_utt) > 0:
112
+ if should_merge(cur_utt) and len(final_utts) != 0:
113
+ final_utts[-1] = final_utts[-1] + cur_utt
114
+ else:
115
+ final_utts.append(cur_utt)
116
+
117
+ return final_utts
118
+
119
+
120
+ # remove blank between chinese character
121
+ def replace_blank(text: str):
122
+ out_str = []
123
+ for i, c in enumerate(text):
124
+ if c == " ":
125
+ if ((text[i + 1].isascii() and text[i + 1] != " ") and
126
+ (text[i - 1].isascii() and text[i - 1] != " ")):
127
+ out_str.append(c)
128
+ else:
129
+ out_str.append(c)
130
+ return "".join(out_str)
131
+
132
+
133
+ def is_only_punctuation(text):
134
+ # Regular expression: Match strings that consist only of punctuation marks or are empty.
135
+ punctuation_pattern = r'^[\p{P}\p{S}]*$'
136
+ return bool(regex.fullmatch(punctuation_pattern, text))
third_party/cosyvoice/utils/losses.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+
5
+ def tpr_loss(disc_real_outputs, disc_generated_outputs, tau):
6
+ loss = 0
7
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
8
+ m_DG = torch.median((dr - dg))
9
+ L_rel = torch.mean((((dr - dg) - m_DG) ** 2)[dr < dg + m_DG])
10
+ loss += tau - F.relu(tau - L_rel)
11
+ return loss
12
+
13
+
14
+ def mel_loss(real_speech, generated_speech, mel_transforms):
15
+ loss = 0
16
+ for transform in mel_transforms:
17
+ mel_r = transform(real_speech)
18
+ mel_g = transform(generated_speech)
19
+ loss += F.l1_loss(mel_g, mel_r)
20
+ return loss
third_party/cosyvoice/utils/mask.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import torch
18
+ from cosyvoice.utils.file_utils import logging
19
+ '''
20
+ def subsequent_mask(
21
+ size: int,
22
+ device: torch.device = torch.device("cpu"),
23
+ ) -> torch.Tensor:
24
+ """Create mask for subsequent steps (size, size).
25
+
26
+ This mask is used only in decoder which works in an auto-regressive mode.
27
+ This means the current step could only do attention with its left steps.
28
+
29
+ In encoder, fully attention is used when streaming is not necessary and
30
+ the sequence is not long. In this case, no attention mask is needed.
31
+
32
+ When streaming is need, chunk-based attention is used in encoder. See
33
+ subsequent_chunk_mask for the chunk-based attention mask.
34
+
35
+ Args:
36
+ size (int): size of mask
37
+ str device (str): "cpu" or "cuda" or torch.Tensor.device
38
+ dtype (torch.device): result dtype
39
+
40
+ Returns:
41
+ torch.Tensor: mask
42
+
43
+ Examples:
44
+ >>> subsequent_mask(3)
45
+ [[1, 0, 0],
46
+ [1, 1, 0],
47
+ [1, 1, 1]]
48
+ """
49
+ ret = torch.ones(size, size, device=device, dtype=torch.bool)
50
+ return torch.tril(ret)
51
+ '''
52
+
53
+
54
+ def subsequent_mask(
55
+ size: int,
56
+ device: torch.device = torch.device("cpu"),
57
+ ) -> torch.Tensor:
58
+ """Create mask for subsequent steps (size, size).
59
+
60
+ This mask is used only in decoder which works in an auto-regressive mode.
61
+ This means the current step could only do attention with its left steps.
62
+
63
+ In encoder, fully attention is used when streaming is not necessary and
64
+ the sequence is not long. In this case, no attention mask is needed.
65
+
66
+ When streaming is need, chunk-based attention is used in encoder. See
67
+ subsequent_chunk_mask for the chunk-based attention mask.
68
+
69
+ Args:
70
+ size (int): size of mask
71
+ str device (str): "cpu" or "cuda" or torch.Tensor.device
72
+ dtype (torch.device): result dtype
73
+
74
+ Returns:
75
+ torch.Tensor: mask
76
+
77
+ Examples:
78
+ >>> subsequent_mask(3)
79
+ [[1, 0, 0],
80
+ [1, 1, 0],
81
+ [1, 1, 1]]
82
+ """
83
+ arange = torch.arange(size, device=device)
84
+ mask = arange.expand(size, size)
85
+ arange = arange.unsqueeze(-1)
86
+ mask = mask <= arange
87
+ return mask
88
+
89
+
90
+ def subsequent_chunk_mask_deprecated(
91
+ size: int,
92
+ chunk_size: int,
93
+ num_left_chunks: int = -1,
94
+ device: torch.device = torch.device("cpu"),
95
+ ) -> torch.Tensor:
96
+ """Create mask for subsequent steps (size, size) with chunk size,
97
+ this is for streaming encoder
98
+
99
+ Args:
100
+ size (int): size of mask
101
+ chunk_size (int): size of chunk
102
+ num_left_chunks (int): number of left chunks
103
+ <0: use full chunk
104
+ >=0: use num_left_chunks
105
+ device (torch.device): "cpu" or "cuda" or torch.Tensor.device
106
+
107
+ Returns:
108
+ torch.Tensor: mask
109
+
110
+ Examples:
111
+ >>> subsequent_chunk_mask(4, 2)
112
+ [[1, 1, 0, 0],
113
+ [1, 1, 0, 0],
114
+ [1, 1, 1, 1],
115
+ [1, 1, 1, 1]]
116
+ """
117
+ ret = torch.zeros(size, size, device=device, dtype=torch.bool)
118
+ for i in range(size):
119
+ if num_left_chunks < 0:
120
+ start = 0
121
+ else:
122
+ start = max((i // chunk_size - num_left_chunks) * chunk_size, 0)
123
+ ending = min((i // chunk_size + 1) * chunk_size, size)
124
+ ret[i, start:ending] = True
125
+ return ret
126
+
127
+
128
+ def subsequent_chunk_mask(
129
+ size: int,
130
+ chunk_size: int,
131
+ num_left_chunks: int = -1,
132
+ device: torch.device = torch.device("cpu"),
133
+ ) -> torch.Tensor:
134
+ """Create mask for subsequent steps (size, size) with chunk size,
135
+ this is for streaming encoder
136
+
137
+ Args:
138
+ size (int): size of mask
139
+ chunk_size (int): size of chunk
140
+ num_left_chunks (int): number of left chunks
141
+ <0: use full chunk
142
+ >=0: use num_left_chunks
143
+ device (torch.device): "cpu" or "cuda" or torch.Tensor.device
144
+
145
+ Returns:
146
+ torch.Tensor: mask
147
+
148
+ Examples:
149
+ >>> subsequent_chunk_mask(4, 2)
150
+ [[1, 1, 0, 0],
151
+ [1, 1, 0, 0],
152
+ [1, 1, 1, 1],
153
+ [1, 1, 1, 1]]
154
+ """
155
+ # NOTE this modified implementation meets onnx export requirements, but it doesn't support num_left_chunks
156
+ # actually this is not needed after we have inference cache implemented, will remove it later
157
+ pos_idx = torch.arange(size, device=device)
158
+ block_value = (torch.div(pos_idx, chunk_size, rounding_mode='trunc') + 1) * chunk_size
159
+ ret = pos_idx.unsqueeze(0) < block_value.unsqueeze(1)
160
+ return ret
161
+
162
+
163
+ def add_optional_chunk_mask(xs: torch.Tensor,
164
+ masks: torch.Tensor,
165
+ use_dynamic_chunk: bool,
166
+ use_dynamic_left_chunk: bool,
167
+ decoding_chunk_size: int,
168
+ static_chunk_size: int,
169
+ num_decoding_left_chunks: int,
170
+ enable_full_context: bool = True):
171
+ """ Apply optional mask for encoder.
172
+
173
+ Args:
174
+ xs (torch.Tensor): padded input, (B, L, D), L for max length
175
+ mask (torch.Tensor): mask for xs, (B, 1, L)
176
+ use_dynamic_chunk (bool): whether to use dynamic chunk or not
177
+ use_dynamic_left_chunk (bool): whether to use dynamic left chunk for
178
+ training.
179
+ decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's
180
+ 0: default for training, use random dynamic chunk.
181
+ <0: for decoding, use full chunk.
182
+ >0: for decoding, use fixed chunk size as set.
183
+ static_chunk_size (int): chunk size for static chunk training/decoding
184
+ if it's greater than 0, if use_dynamic_chunk is true,
185
+ this parameter will be ignored
186
+ num_decoding_left_chunks: number of left chunks, this is for decoding,
187
+ the chunk size is decoding_chunk_size.
188
+ >=0: use num_decoding_left_chunks
189
+ <0: use all left chunks
190
+ enable_full_context (bool):
191
+ True: chunk size is either [1, 25] or full context(max_len)
192
+ False: chunk size ~ U[1, 25]
193
+
194
+ Returns:
195
+ torch.Tensor: chunk mask of the input xs.
196
+ """
197
+ # Whether to use chunk mask or not
198
+ if use_dynamic_chunk:
199
+ max_len = xs.size(1)
200
+ if decoding_chunk_size < 0:
201
+ chunk_size = max_len
202
+ num_left_chunks = -1
203
+ elif decoding_chunk_size > 0:
204
+ chunk_size = decoding_chunk_size
205
+ num_left_chunks = num_decoding_left_chunks
206
+ else:
207
+ # chunk size is either [1, 25] or full context(max_len).
208
+ # Since we use 4 times subsampling and allow up to 1s(100 frames)
209
+ # delay, the maximum frame is 100 / 4 = 25.
210
+ chunk_size = torch.randint(1, max_len, (1, )).item()
211
+ num_left_chunks = -1
212
+ if chunk_size > max_len // 2 and enable_full_context:
213
+ chunk_size = max_len
214
+ else:
215
+ chunk_size = chunk_size % 25 + 1
216
+ if use_dynamic_left_chunk:
217
+ max_left_chunks = (max_len - 1) // chunk_size
218
+ num_left_chunks = torch.randint(0, max_left_chunks,
219
+ (1, )).item()
220
+ chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size,
221
+ num_left_chunks,
222
+ xs.device) # (L, L)
223
+ chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
224
+ chunk_masks = masks & chunk_masks # (B, L, L)
225
+ elif static_chunk_size > 0:
226
+ num_left_chunks = num_decoding_left_chunks
227
+ chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size,
228
+ num_left_chunks,
229
+ xs.device) # (L, L)
230
+ chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
231
+ chunk_masks = masks & chunk_masks # (B, L, L)
232
+ else:
233
+ chunk_masks = masks
234
+ assert chunk_masks.dtype == torch.bool
235
+ if (chunk_masks.sum(dim=-1) == 0).sum().item() != 0:
236
+ logging.warning('get chunk_masks all false at some timestep, force set to true, make sure they are masked in futuer computation!')
237
+ chunk_masks[chunk_masks.sum(dim=-1)==0] = True
238
+ return chunk_masks
239
+
240
+
241
+ def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
242
+ """Make mask tensor containing indices of padded part.
243
+
244
+ See description of make_non_pad_mask.
245
+
246
+ Args:
247
+ lengths (torch.Tensor): Batch of lengths (B,).
248
+ Returns:
249
+ torch.Tensor: Mask tensor containing indices of padded part.
250
+
251
+ Examples:
252
+ >>> lengths = [5, 3, 2]
253
+ >>> make_pad_mask(lengths)
254
+ masks = [[0, 0, 0, 0 ,0],
255
+ [0, 0, 0, 1, 1],
256
+ [0, 0, 1, 1, 1]]
257
+ """
258
+ batch_size = lengths.size(0)
259
+ max_len = max_len if max_len > 0 else lengths.max().item()
260
+ seq_range = torch.arange(0,
261
+ max_len,
262
+ dtype=torch.int64,
263
+ device=lengths.device)
264
+ seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
265
+ seq_length_expand = lengths.unsqueeze(-1)
266
+ mask = seq_range_expand >= seq_length_expand
267
+ return mask
third_party/cosyvoice/utils/scheduler.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2022 Ximalaya Inc (Yuguang Yang)
3
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Modified from ESPnet(https://github.com/espnet/espnet)
17
+ # NeMo(https://github.com/NVIDIA/NeMo)
18
+
19
+ from typing import Union
20
+
21
+ import math
22
+ import warnings
23
+ import torch
24
+ from torch.optim.lr_scheduler import _LRScheduler
25
+
26
+
27
+ class WarmupLR(_LRScheduler):
28
+ """The WarmupLR scheduler
29
+
30
+ This scheduler is almost same as NoamLR Scheduler except for following
31
+ difference:
32
+
33
+ NoamLR:
34
+ lr = optimizer.lr * model_size ** -0.5
35
+ * min(step ** -0.5, step * warmup_step ** -1.5)
36
+ WarmupLR:
37
+ lr = optimizer.lr * warmup_step ** 0.5
38
+ * min(step ** -0.5, step * warmup_step ** -1.5)
39
+
40
+ Note that the maximum lr equals to optimizer.lr in this scheduler.
41
+
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ optimizer: torch.optim.Optimizer,
47
+ warmup_steps: Union[int, float] = 25000,
48
+ last_epoch: int = -1,
49
+ ):
50
+ self.warmup_steps = warmup_steps
51
+
52
+ # __init__() must be invoked before setting field
53
+ # because step() is also invoked in __init__()
54
+ super().__init__(optimizer, last_epoch)
55
+
56
+ def __repr__(self):
57
+ return f"{self.__class__.__name__}(warmup_steps={self.warmup_steps})"
58
+
59
+ def get_lr(self):
60
+ step_num = self.last_epoch + 1
61
+ if self.warmup_steps == 0:
62
+ return [lr * step_num**-0.5 for lr in self.base_lrs]
63
+ else:
64
+ return [
65
+ lr * self.warmup_steps**0.5 *
66
+ min(step_num**-0.5, step_num * self.warmup_steps**-1.5)
67
+ for lr in self.base_lrs
68
+ ]
69
+
70
+ def set_step(self, step: int):
71
+ self.last_epoch = step
72
+
73
+
74
+ class WarmupPolicy(_LRScheduler):
75
+ """Adds warmup kwargs and warmup logic to lr policy.
76
+ All arguments should be passed as kwargs for clarity,
77
+ Args:
78
+ warmup_steps: Number of training steps in warmup stage
79
+ warmup_ratio: Ratio of warmup steps to total steps
80
+ max_steps: Total number of steps while training or `None` for
81
+ infinite training
82
+ """
83
+
84
+ def __init__(self,
85
+ optimizer,
86
+ *,
87
+ warmup_steps=None,
88
+ warmup_ratio=None,
89
+ max_steps=None,
90
+ min_lr=0.0,
91
+ last_epoch=-1):
92
+ assert not (warmup_steps is not None and warmup_ratio is not None),\
93
+ "Either use particular number of step or ratio"
94
+ assert warmup_ratio is None or max_steps is not None, \
95
+ "If there is a ratio, there should be a total steps"
96
+
97
+ # It is necessary to assign all attributes *before* __init__,
98
+ # as class is wrapped by an inner class.
99
+ self.max_steps = max_steps
100
+ if warmup_steps is not None:
101
+ self.warmup_steps = warmup_steps
102
+ elif warmup_ratio is not None:
103
+ self.warmup_steps = int(warmup_ratio * max_steps)
104
+ else:
105
+ self.warmup_steps = 0
106
+
107
+ self.min_lr = min_lr
108
+ super().__init__(optimizer, last_epoch)
109
+
110
+ def get_lr(self):
111
+ if not self._get_lr_called_within_step:
112
+ warnings.warn(
113
+ "To get the last learning rate computed "
114
+ "by the scheduler, please use `get_last_lr()`.",
115
+ UserWarning,
116
+ stacklevel=2)
117
+
118
+ step = self.last_epoch
119
+
120
+ if step <= self.warmup_steps and self.warmup_steps > 0:
121
+ return self._get_warmup_lr(step)
122
+
123
+ if step > self.max_steps:
124
+ return [self.min_lr for _ in self.base_lrs]
125
+
126
+ return self._get_lr(step)
127
+
128
+ def _get_warmup_lr(self, step):
129
+ lr_val = (step + 1) / (self.warmup_steps + 1)
130
+ return [initial_lr * lr_val for initial_lr in self.base_lrs]
131
+
132
+ def _get_lr(self, step):
133
+ """Simple const lr policy"""
134
+ return self.base_lrs
135
+
136
+
137
+ class SquareRootConstantPolicy(_LRScheduler):
138
+ """Adds warmup kwargs and warmup logic to lr policy.
139
+ All arguments should be passed as kwargs for clarity,
140
+ Args:
141
+ warmup_steps: Number of training steps in warmup stage
142
+ warmup_ratio: Ratio of warmup steps to total steps
143
+ max_steps: Total number of steps while training or `None` for
144
+ infinite training
145
+ """
146
+
147
+ def __init__(self,
148
+ optimizer,
149
+ *,
150
+ constant_steps=None,
151
+ constant_ratio=None,
152
+ max_steps=None,
153
+ min_lr=0.0,
154
+ last_epoch=-1):
155
+ assert not (constant_steps is not None
156
+ and constant_ratio is not None), \
157
+ "Either use particular number of step or ratio"
158
+ assert constant_ratio is None or max_steps is not None, \
159
+ "If there is a ratio, there should be a total steps"
160
+
161
+ # It is necessary to assign all attributes *before* __init__,
162
+ # as class is wrapped by an inner class.
163
+ self.max_steps = max_steps
164
+ if constant_steps is not None:
165
+ self.constant_steps = constant_steps
166
+ elif constant_ratio is not None:
167
+ self.constant_steps = int(constant_ratio * max_steps)
168
+ else:
169
+ self.constant_steps = 0
170
+
171
+ self.constant_lr = 1 / (constant_steps**0.5)
172
+ self.min_lr = min_lr
173
+ super().__init__(optimizer, last_epoch)
174
+
175
+ def get_lr(self):
176
+ if not self._get_lr_called_within_step:
177
+ warnings.warn(
178
+ "To get the last learning rate computed "
179
+ "by the scheduler, please use `get_last_lr()`.",
180
+ UserWarning,
181
+ stacklevel=2)
182
+
183
+ step = self.last_epoch
184
+
185
+ if step <= self.constant_steps:
186
+ return [self.constant_lr for _ in self.base_lrs]
187
+
188
+ if step > self.max_steps:
189
+ return [self.min_lr for _ in self.base_lrs]
190
+
191
+ return self._get_lr(step)
192
+
193
+ def _get_lr(self, step):
194
+ """Simple const lr policy"""
195
+ return self.base_lrs
196
+
197
+
198
+ class WarmupHoldPolicy(WarmupPolicy):
199
+ """Variant of WarmupPolicy which maintains high
200
+ learning rate for a defined number of steps.
201
+ All arguments should be passed as kwargs for clarity,
202
+ Args:
203
+ warmup_steps: Number of training steps in warmup stage
204
+ warmup_ratio: Ratio of warmup steps to total steps
205
+ hold_steps: Number of training steps to
206
+ hold the learning rate after warm up
207
+ hold_ratio: Ratio of hold steps to total steps
208
+ max_steps: Total number of steps while training or `None` for
209
+ infinite training
210
+ """
211
+
212
+ def __init__(
213
+ self,
214
+ optimizer,
215
+ *,
216
+ warmup_steps=None,
217
+ warmup_ratio=None,
218
+ hold_steps=None,
219
+ hold_ratio=None,
220
+ max_steps=None,
221
+ min_lr=0.0,
222
+ last_epoch=-1,
223
+ ):
224
+ assert not (hold_steps is not None and hold_ratio is not None), \
225
+ "Either use particular number of step or ratio"
226
+ assert hold_ratio is None or max_steps is not None, \
227
+ "If there is a ratio, there should be a total steps"
228
+
229
+ self.min_lr = min_lr
230
+ self._last_warmup_lr = 0.0
231
+
232
+ # Necessary to duplicate as class attributes are hidden in inner class
233
+ self.max_steps = max_steps
234
+ if warmup_steps is not None:
235
+ self.warmup_steps = warmup_steps
236
+ elif warmup_ratio is not None:
237
+ self.warmup_steps = int(warmup_ratio * max_steps)
238
+ else:
239
+ self.warmup_steps = 0
240
+
241
+ if hold_steps is not None:
242
+ self.hold_steps = hold_steps + self.warmup_steps
243
+ elif hold_ratio is not None:
244
+ self.hold_steps = int(hold_ratio * max_steps) + self.warmup_steps
245
+ else:
246
+ self.hold_steps = 0
247
+
248
+ super().__init__(
249
+ optimizer,
250
+ warmup_steps=warmup_steps,
251
+ warmup_ratio=warmup_ratio,
252
+ max_steps=max_steps,
253
+ last_epoch=last_epoch,
254
+ min_lr=min_lr,
255
+ )
256
+
257
+ def get_lr(self):
258
+ if not self._get_lr_called_within_step:
259
+ warnings.warn(
260
+ "To get the last learning rate computed by the scheduler,"
261
+ " "
262
+ "please use `get_last_lr()`.",
263
+ UserWarning,
264
+ stacklevel=2)
265
+
266
+ step = self.last_epoch
267
+
268
+ # Warmup phase
269
+ if step <= self.warmup_steps and self.warmup_steps > 0:
270
+ return self._get_warmup_lr(step)
271
+
272
+ # Hold phase
273
+ if (step >= self.warmup_steps) and (step < self.hold_steps):
274
+ return self.base_lrs
275
+
276
+ if step > self.max_steps:
277
+ return [self.min_lr for _ in self.base_lrs]
278
+
279
+ return self._get_lr(step)
280
+
281
+
282
+ class WarmupAnnealHoldPolicy(_LRScheduler):
283
+ """Adds warmup kwargs and warmup logic to lr policy.
284
+ All arguments should be passed as kwargs for clarity,
285
+ Args:
286
+ warmup_steps: Number of training steps in warmup stage
287
+ warmup_ratio: Ratio of warmup steps to total steps
288
+ max_steps: Total number of steps while training or `None` for
289
+ infinite training
290
+ min_lr: Minimum lr to hold the learning rate after decay at.
291
+ constant_steps: Number of steps to keep lr constant at.
292
+ constant_ratio: Ratio of steps to keep lr constant.
293
+ """
294
+
295
+ def __init__(
296
+ self,
297
+ optimizer,
298
+ *,
299
+ warmup_steps=None,
300
+ warmup_ratio=None,
301
+ constant_steps=None,
302
+ constant_ratio=None,
303
+ max_steps=None,
304
+ min_lr=0.0,
305
+ last_epoch=-1,
306
+ ):
307
+ assert not (warmup_steps is not None
308
+ and warmup_ratio is not None), \
309
+ "Either use particular number of step or ratio"
310
+ assert not (constant_steps is not None
311
+ and constant_ratio is not None), \
312
+ "Either use constant_steps or constant_ratio"
313
+ assert warmup_ratio is None or max_steps is not None, \
314
+ "If there is a ratio, there should be a total steps"
315
+
316
+ # It is necessary to assign all attributes *before* __init__,
317
+ # as class is wrapped by an inner class.
318
+ self.max_steps = max_steps
319
+
320
+ if warmup_steps is not None:
321
+ self.warmup_steps = warmup_steps
322
+ elif warmup_ratio is not None:
323
+ self.warmup_steps = int(warmup_ratio * max_steps)
324
+ else:
325
+ self.warmup_steps = 0
326
+
327
+ if constant_steps is not None:
328
+ self.constant_steps = constant_steps
329
+ elif constant_ratio is not None:
330
+ self.constant_steps = int(constant_ratio * max_steps)
331
+ else:
332
+ self.constant_steps = 0
333
+
334
+ self.decay_steps = max_steps - (self.constant_steps +
335
+ self.warmup_steps)
336
+
337
+ self.min_lr = min_lr
338
+ super().__init__(optimizer, last_epoch)
339
+
340
+ def get_lr(self):
341
+ if not self._get_lr_called_within_step:
342
+ warnings.warn(
343
+ "To get the last learning rate computed "
344
+ "by the scheduler, please use `get_last_lr()`.",
345
+ UserWarning,
346
+ stacklevel=2)
347
+
348
+ step = self.last_epoch
349
+
350
+ # Warmup steps
351
+ if self.warmup_steps > 0 and step <= self.warmup_steps:
352
+ return self._get_warmup_lr(step)
353
+
354
+ # Constant steps after warmup and decay
355
+ if self.constant_steps > 0 and (
356
+ self.warmup_steps + self.decay_steps) < step <= self.max_steps:
357
+ return self._get_constant_lr(step)
358
+
359
+ # Min lr after max steps of updates
360
+ if step > self.max_steps:
361
+ return [self.min_lr for _ in self.base_lrs]
362
+
363
+ return self._get_lr(step)
364
+
365
+ def _get_warmup_lr(self, step):
366
+ lr_val = (step + 1) / (self.warmup_steps + 1)
367
+ return [initial_lr * lr_val for initial_lr in self.base_lrs]
368
+
369
+ def _get_constant_lr(self, step):
370
+ return [self.min_lr for _ in self.base_lrs]
371
+
372
+ def _get_lr(self, step):
373
+ """Simple const lr policy"""
374
+ return self.base_lrs
375
+
376
+
377
+ def _squareroot_annealing(initial_lr, step, max_steps, min_lr):
378
+ mult = ((max_steps - step) / max_steps)**0.5
379
+ out_lr = initial_lr * mult
380
+ out_lr = max(out_lr, min_lr)
381
+ return out_lr
382
+
383
+
384
+ def _square_annealing(initial_lr, step, max_steps, min_lr):
385
+ mult = ((max_steps - step) / max_steps)**2
386
+ out_lr = initial_lr * mult
387
+ out_lr = max(out_lr, min_lr)
388
+ return out_lr
389
+
390
+
391
+ def _cosine_annealing(initial_lr, step, max_steps, min_lr):
392
+ mult = 0.5 * (1 + math.cos(math.pi * step / max_steps))
393
+ out_lr = (initial_lr - min_lr) * mult + min_lr
394
+ return out_lr
395
+
396
+
397
+ def _linear_warmup_with_cosine_annealing(max_lr, warmup_steps, step,
398
+ decay_steps, min_lr):
399
+ assert max_lr > min_lr
400
+ # Use linear warmup for the initial part.
401
+ if warmup_steps > 0 and step <= warmup_steps:
402
+ return max_lr * float(step) / float(warmup_steps)
403
+
404
+ # For any steps larger than `decay_steps`, use `min_lr`.
405
+ if step > warmup_steps + decay_steps:
406
+ return min_lr
407
+
408
+ # If we are done with the warmup period, use the decay style.
409
+ num_steps_ = step - warmup_steps
410
+ decay_steps_ = decay_steps
411
+ decay_ratio = float(num_steps_) / float(decay_steps_)
412
+ assert decay_ratio >= 0.0
413
+ assert decay_ratio <= 1.0
414
+ delta_lr = max_lr - min_lr
415
+
416
+ coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0)
417
+
418
+ return min_lr + coeff * delta_lr
419
+
420
+
421
+ def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle):
422
+ if cycle:
423
+ multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps)
424
+ decay_steps *= multiplier
425
+ else:
426
+ step = min(step, decay_steps)
427
+ p = step / decay_steps
428
+ lr = (initial_lr - min_lr) * math.pow(1.0 - p, power)
429
+ lr += min_lr
430
+ return lr
431
+
432
+
433
+ def _noam_hold_annealing(initial_lr, step, warmup_steps, hold_steps,
434
+ decay_rate, min_lr):
435
+ # hold_steps = total number of steps
436
+ # to hold the LR, not the warmup + hold steps.
437
+ T_warmup_decay = max(1, warmup_steps**decay_rate)
438
+ T_hold_decay = max(1, (step - hold_steps)**decay_rate)
439
+ lr = (initial_lr * T_warmup_decay) / T_hold_decay
440
+ lr = max(lr, min_lr)
441
+ return lr
442
+
443
+
444
+ class SquareAnnealing(WarmupPolicy):
445
+
446
+ def __init__(self,
447
+ optimizer,
448
+ *,
449
+ max_steps,
450
+ min_lr=1e-5,
451
+ last_epoch=-1,
452
+ **kwargs):
453
+ super().__init__(optimizer=optimizer,
454
+ max_steps=max_steps,
455
+ last_epoch=last_epoch,
456
+ min_lr=min_lr,
457
+ **kwargs)
458
+
459
+ def _get_lr(self, step):
460
+ new_lrs = [
461
+ _square_annealing(
462
+ initial_lr=initial_lr,
463
+ step=step - self.warmup_steps,
464
+ max_steps=self.max_steps - self.warmup_steps,
465
+ min_lr=self.min_lr,
466
+ ) for initial_lr in self.base_lrs
467
+ ]
468
+ return new_lrs
469
+
470
+
471
+ class SquareRootAnnealing(WarmupPolicy):
472
+
473
+ def __init__(self,
474
+ optimizer,
475
+ *,
476
+ max_steps,
477
+ min_lr=0,
478
+ last_epoch=-1,
479
+ **kwargs):
480
+ super().__init__(optimizer=optimizer,
481
+ max_steps=max_steps,
482
+ last_epoch=last_epoch,
483
+ min_lr=min_lr,
484
+ **kwargs)
485
+
486
+ def _get_lr(self, step):
487
+ new_lrs = [
488
+ _squareroot_annealing(initial_lr=initial_lr,
489
+ step=step,
490
+ max_steps=self.max_steps,
491
+ min_lr=self.min_lr)
492
+ for initial_lr in self.base_lrs
493
+ ]
494
+ return new_lrs
495
+
496
+
497
+ class CosineAnnealing(WarmupAnnealHoldPolicy):
498
+
499
+ def __init__(self,
500
+ optimizer,
501
+ *,
502
+ max_steps,
503
+ min_lr=0,
504
+ last_epoch=-1,
505
+ **kwargs):
506
+ super().__init__(optimizer=optimizer,
507
+ max_steps=max_steps,
508
+ last_epoch=last_epoch,
509
+ min_lr=min_lr,
510
+ **kwargs)
511
+
512
+ def _get_lr(self, step):
513
+ for initial_lr in self.base_lrs:
514
+ if initial_lr < self.min_lr:
515
+ raise ValueError(
516
+ f"{self} received an initial learning rate "
517
+ f"that was lower than the minimum learning rate.")
518
+
519
+ if self.constant_steps is None or self.constant_steps == 0:
520
+ new_lrs = [
521
+ _cosine_annealing(
522
+ initial_lr=initial_lr,
523
+ step=step - self.warmup_steps,
524
+ max_steps=self.max_steps - self.warmup_steps,
525
+ min_lr=self.min_lr,
526
+ ) for initial_lr in self.base_lrs
527
+ ]
528
+ else:
529
+ new_lrs = self._get_linear_warmup_with_cosine_annealing_lr(step)
530
+ return new_lrs
531
+
532
+ def _get_warmup_lr(self, step):
533
+ if self.constant_steps is None or self.constant_steps == 0:
534
+ return super()._get_warmup_lr(step)
535
+ else:
536
+ # Use linear warmup for the initial part.
537
+ return self._get_linear_warmup_with_cosine_annealing_lr(step)
538
+
539
+ def _get_constant_lr(self, step):
540
+ # Only called when `constant_steps` > 0.
541
+ return self._get_linear_warmup_with_cosine_annealing_lr(step)
542
+
543
+ def _get_linear_warmup_with_cosine_annealing_lr(self, step):
544
+ # Cosine Schedule for Megatron LM,
545
+ # slightly different warmup schedule + constant LR at the end.
546
+ new_lrs = [
547
+ _linear_warmup_with_cosine_annealing(
548
+ max_lr=self.base_lrs[0],
549
+ warmup_steps=self.warmup_steps,
550
+ step=step,
551
+ decay_steps=self.decay_steps,
552
+ min_lr=self.min_lr,
553
+ ) for _ in self.base_lrs
554
+ ]
555
+ return new_lrs
556
+
557
+
558
+ class NoamAnnealing(_LRScheduler):
559
+
560
+ def __init__(self,
561
+ optimizer,
562
+ *,
563
+ d_model,
564
+ warmup_steps=None,
565
+ warmup_ratio=None,
566
+ max_steps=None,
567
+ min_lr=0.0,
568
+ last_epoch=-1):
569
+ self._normalize = d_model**(-0.5)
570
+ assert not (warmup_steps is not None and warmup_ratio is not None), \
571
+ "Either use particular number of step or ratio"
572
+ assert warmup_ratio is None or max_steps is not None, \
573
+ "If there is a ratio, there should be a total steps"
574
+
575
+ # It is necessary to assign all attributes *before* __init__,
576
+ # as class is wrapped by an inner class.
577
+ self.max_steps = max_steps
578
+ if warmup_steps is not None:
579
+ self.warmup_steps = warmup_steps
580
+ elif warmup_ratio is not None:
581
+ self.warmup_steps = int(warmup_ratio * max_steps)
582
+ else:
583
+ self.warmup_steps = 0
584
+
585
+ self.min_lr = min_lr
586
+ super().__init__(optimizer, last_epoch)
587
+
588
+ def get_lr(self):
589
+ if not self._get_lr_called_within_step:
590
+ warnings.warn(
591
+ "To get the last learning rate computed "
592
+ "by the scheduler, please use `get_last_lr()`.",
593
+ UserWarning,
594
+ stacklevel=2)
595
+
596
+ step = max(1, self.last_epoch)
597
+
598
+ for initial_lr in self.base_lrs:
599
+ if initial_lr < self.min_lr:
600
+ raise ValueError(
601
+ f"{self} received an initial learning rate "
602
+ f"that was lower than the minimum learning rate.")
603
+
604
+ new_lrs = [
605
+ self._noam_annealing(initial_lr=initial_lr, step=step)
606
+ for initial_lr in self.base_lrs
607
+ ]
608
+ return new_lrs
609
+
610
+ def _noam_annealing(self, initial_lr, step):
611
+ if self.warmup_steps > 0:
612
+ mult = self._normalize * min(step**(-0.5),
613
+ step * (self.warmup_steps**(-1.5)))
614
+ else:
615
+ mult = self._normalize * step**(-0.5)
616
+
617
+ out_lr = initial_lr * mult
618
+ if step > self.warmup_steps:
619
+ out_lr = max(out_lr, self.min_lr)
620
+ return out_lr
621
+
622
+
623
+ class NoamHoldAnnealing(WarmupHoldPolicy):
624
+
625
+ def __init__(self,
626
+ optimizer,
627
+ *,
628
+ max_steps,
629
+ decay_rate=0.5,
630
+ min_lr=0.0,
631
+ last_epoch=-1,
632
+ **kwargs):
633
+ """
634
+ From Nemo:
635
+ Implementation of the Noam Hold Annealing policy
636
+ from the SqueezeFormer paper.
637
+
638
+ Unlike NoamAnnealing, the peak learning rate
639
+ can be explicitly set for this scheduler.
640
+ The schedule first performs linear warmup,
641
+ then holds the peak LR, then decays with some schedule for
642
+ the remainder of the steps.
643
+ Therefore the min-lr is still dependent
644
+ on the hyper parameters selected.
645
+
646
+ It's schedule is determined by three factors-
647
+
648
+ Warmup Steps: Initial stage, where linear warmup
649
+ occurs uptil the peak LR is reached. Unlike NoamAnnealing,
650
+ the peak LR is explicitly stated here instead of a scaling factor.
651
+
652
+ Hold Steps: Intermediate stage, where the peak LR
653
+ is maintained for some number of steps. In this region,
654
+ the high peak LR allows the model to converge faster
655
+ if training is stable. However the high LR
656
+ may also cause instability during training.
657
+ Should usually be a significant fraction of training
658
+ steps (around 30-40% of the entire training steps).
659
+
660
+ Decay Steps: Final stage, where the LR rapidly decays
661
+ with some scaling rate (set by decay rate).
662
+ To attain Noam decay, use 0.5,
663
+ for Squeezeformer recommended decay, use 1.0.
664
+ The fast decay after prolonged high LR during
665
+ hold phase allows for rapid convergence.
666
+
667
+ References:
668
+ - [Squeezeformer:
669
+ An Efficient Transformer for Automatic Speech Recognition]
670
+ (https://arxiv.org/abs/2206.00888)
671
+
672
+ Args:
673
+ optimizer: Pytorch compatible Optimizer object.
674
+ warmup_steps: Number of training steps in warmup stage
675
+ warmup_ratio: Ratio of warmup steps to total steps
676
+ hold_steps: Number of training steps to
677
+ hold the learning rate after warm up
678
+ hold_ratio: Ratio of hold steps to total steps
679
+ max_steps: Total number of steps while training or `None` for
680
+ infinite training
681
+ decay_rate: Float value describing the polynomial decay
682
+ after the hold period. Default value
683
+ of 0.5 corresponds to Noam decay.
684
+ min_lr: Minimum learning rate.
685
+ """
686
+ self.decay_rate = decay_rate
687
+ super().__init__(optimizer=optimizer,
688
+ max_steps=max_steps,
689
+ last_epoch=last_epoch,
690
+ min_lr=min_lr,
691
+ **kwargs)
692
+
693
+ def _get_lr(self, step):
694
+ if self.warmup_steps is None or self.warmup_steps == 0:
695
+ raise ValueError(
696
+ "Noam scheduler cannot be used without warmup steps")
697
+
698
+ if self.hold_steps > 0:
699
+ hold_steps = self.hold_steps - self.warmup_steps
700
+ else:
701
+ hold_steps = 0
702
+
703
+ new_lrs = [
704
+ _noam_hold_annealing(
705
+ initial_lr,
706
+ step=step,
707
+ warmup_steps=self.warmup_steps,
708
+ hold_steps=hold_steps,
709
+ decay_rate=self.decay_rate,
710
+ min_lr=self.min_lr,
711
+ ) for initial_lr in self.base_lrs
712
+ ]
713
+ return new_lrs
714
+
715
+ def set_step(self, step: int):
716
+ self.last_epoch = step
717
+
718
+
719
+ class ConstantLR(_LRScheduler):
720
+ """The ConstantLR scheduler
721
+
722
+ This scheduler keeps a constant lr
723
+
724
+ """
725
+
726
+ def __init__(
727
+ self,
728
+ optimizer: torch.optim.Optimizer,
729
+ ):
730
+ # __init__() must be invoked before setting field
731
+ # because step() is also invoked in __init__()
732
+ super().__init__(optimizer)
733
+
734
+ def get_lr(self):
735
+ return self.base_lrs
736
+
737
+ def set_step(self, step: int):
738
+ self.last_epoch = step